Statistics
| Branch: | Revision:

ffmpeg / libavcodec / i386 / dsputil_mmx.c @ 513fbd8e

History | View | Annotate | Download (117 KB)

1
/*
2
 * MMX optimized DSP utils
3
 * Copyright (c) 2000, 2001 Fabrice Bellard.
4
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 *
20
 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
21
 */
22

    
23
#include "../dsputil.h"
24
#include "../simple_idct.h"
25
#include "../mpegvideo.h"
26
#include "mmx.h"
27

    
28
//#undef NDEBUG
29
//#include <assert.h>
30

    
31
extern const uint8_t ff_h263_loop_filter_strength[32];
32
extern void ff_idct_xvid_mmx(short *block);
33
extern void ff_idct_xvid_mmx2(short *block);
34

    
35
int mm_flags; /* multimedia extension flags */
36

    
37
/* pixel operations */
38
static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
39
static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
40
static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
41

    
42
static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
43
static const uint64_t ff_pw_3  attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
44
static const uint64_t ff_pw_4  attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
45
static const uint64_t ff_pw_5  attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
46
static const uint64_t ff_pw_8  attribute_used __attribute__ ((aligned(8))) = 0x0008000800080008ULL;
47
static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
48
static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
49
static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
50
static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
51

    
52
static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
53
static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
54

    
55
#define JUMPALIGN() __asm __volatile (".balign 8"::)
56
#define MOVQ_ZERO(regd)  __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
57

    
58
#define MOVQ_WONE(regd) \
59
    __asm __volatile ( \
60
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
61
    "psrlw $15, %%" #regd ::)
62

    
63
#define MOVQ_BFE(regd) \
64
    __asm __volatile ( \
65
    "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
66
    "paddb %%" #regd ", %%" #regd " \n\t" ::)
67

    
68
#ifndef PIC
69
#define MOVQ_BONE(regd)  __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
70
#define MOVQ_WTWO(regd)  __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
71
#else
72
// for shared library it's better to use this way for accessing constants
73
// pcmpeqd -> -1
74
#define MOVQ_BONE(regd) \
75
    __asm __volatile ( \
76
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
77
    "psrlw $15, %%" #regd " \n\t" \
78
    "packuswb %%" #regd ", %%" #regd " \n\t" ::)
79

    
80
#define MOVQ_WTWO(regd) \
81
    __asm __volatile ( \
82
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
83
    "psrlw $15, %%" #regd " \n\t" \
84
    "psllw $1, %%" #regd " \n\t"::)
85

    
86
#endif
87

    
88
// using regr as temporary and for the output result
89
// first argument is unmodifed and second is trashed
90
// regfe is supposed to contain 0xfefefefefefefefe
91
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
92
    "movq " #rega ", " #regr "  \n\t"\
93
    "pand " #regb ", " #regr "  \n\t"\
94
    "pxor " #rega ", " #regb "  \n\t"\
95
    "pand " #regfe "," #regb "  \n\t"\
96
    "psrlq $1, " #regb "        \n\t"\
97
    "paddb " #regb ", " #regr " \n\t"
98

    
99
#define PAVGB_MMX(rega, regb, regr, regfe) \
100
    "movq " #rega ", " #regr "  \n\t"\
101
    "por  " #regb ", " #regr "  \n\t"\
102
    "pxor " #rega ", " #regb "  \n\t"\
103
    "pand " #regfe "," #regb "  \n\t"\
104
    "psrlq $1, " #regb "        \n\t"\
105
    "psubb " #regb ", " #regr " \n\t"
106

    
107
// mm6 is supposed to contain 0xfefefefefefefefe
108
#define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp) \
109
    "movq " #rega ", " #regr "  \n\t"\
110
    "movq " #regc ", " #regp "  \n\t"\
111
    "pand " #regb ", " #regr "  \n\t"\
112
    "pand " #regd ", " #regp "  \n\t"\
113
    "pxor " #rega ", " #regb "  \n\t"\
114
    "pxor " #regc ", " #regd "  \n\t"\
115
    "pand %%mm6, " #regb "      \n\t"\
116
    "pand %%mm6, " #regd "      \n\t"\
117
    "psrlq $1, " #regb "        \n\t"\
118
    "psrlq $1, " #regd "        \n\t"\
119
    "paddb " #regb ", " #regr " \n\t"\
120
    "paddb " #regd ", " #regp " \n\t"
121

    
122
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
123
    "movq " #rega ", " #regr "  \n\t"\
124
    "movq " #regc ", " #regp "  \n\t"\
125
    "por  " #regb ", " #regr "  \n\t"\
126
    "por  " #regd ", " #regp "  \n\t"\
127
    "pxor " #rega ", " #regb "  \n\t"\
128
    "pxor " #regc ", " #regd "  \n\t"\
129
    "pand %%mm6, " #regb "      \n\t"\
130
    "pand %%mm6, " #regd "      \n\t"\
131
    "psrlq $1, " #regd "        \n\t"\
132
    "psrlq $1, " #regb "        \n\t"\
133
    "psubb " #regb ", " #regr " \n\t"\
134
    "psubb " #regd ", " #regp " \n\t"
135

    
136
/***********************************/
137
/* MMX no rounding */
138
#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
139
#define SET_RND  MOVQ_WONE
140
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
141
#define PAVGB(a, b, c, e)               PAVGB_MMX_NO_RND(a, b, c, e)
142

    
143
#include "dsputil_mmx_rnd.h"
144

    
145
#undef DEF
146
#undef SET_RND
147
#undef PAVGBP
148
#undef PAVGB
149
/***********************************/
150
/* MMX rounding */
151

    
152
#define DEF(x, y) x ## _ ## y ##_mmx
153
#define SET_RND  MOVQ_WTWO
154
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
155
#define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
156

    
157
#include "dsputil_mmx_rnd.h"
158

    
159
#undef DEF
160
#undef SET_RND
161
#undef PAVGBP
162
#undef PAVGB
163

    
164
/***********************************/
165
/* 3Dnow specific */
166

    
167
#define DEF(x) x ## _3dnow
168
/* for Athlons PAVGUSB is prefered */
169
#define PAVGB "pavgusb"
170

    
171
#include "dsputil_mmx_avg.h"
172

    
173
#undef DEF
174
#undef PAVGB
175

    
176
/***********************************/
177
/* MMX2 specific */
178

    
179
#define DEF(x) x ## _mmx2
180

    
181
/* Introduced only in MMX2 set */
182
#define PAVGB "pavgb"
183

    
184
#include "dsputil_mmx_avg.h"
185

    
186
#undef DEF
187
#undef PAVGB
188

    
189
/***********************************/
190
/* standard MMX */
191

    
192
#ifdef CONFIG_ENCODERS
193
static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
194
{
195
    asm volatile(
196
        "mov $-128, %%"REG_a"           \n\t"
197
        "pxor %%mm7, %%mm7              \n\t"
198
        ".balign 16                     \n\t"
199
        "1:                             \n\t"
200
        "movq (%0), %%mm0               \n\t"
201
        "movq (%0, %2), %%mm2           \n\t"
202
        "movq %%mm0, %%mm1              \n\t"
203
        "movq %%mm2, %%mm3              \n\t"
204
        "punpcklbw %%mm7, %%mm0         \n\t"
205
        "punpckhbw %%mm7, %%mm1         \n\t"
206
        "punpcklbw %%mm7, %%mm2         \n\t"
207
        "punpckhbw %%mm7, %%mm3         \n\t"
208
        "movq %%mm0, (%1, %%"REG_a")    \n\t"
209
        "movq %%mm1, 8(%1, %%"REG_a")   \n\t"
210
        "movq %%mm2, 16(%1, %%"REG_a")  \n\t"
211
        "movq %%mm3, 24(%1, %%"REG_a")  \n\t"
212
        "add %3, %0                     \n\t"
213
        "add $32, %%"REG_a"             \n\t"
214
        "js 1b                          \n\t"
215
        : "+r" (pixels)
216
        : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
217
        : "%"REG_a
218
    );
219
}
220

    
221
static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
222
{
223
    asm volatile(
224
        "pxor %%mm7, %%mm7              \n\t"
225
        "mov $-128, %%"REG_a"           \n\t"
226
        ".balign 16                     \n\t"
227
        "1:                             \n\t"
228
        "movq (%0), %%mm0               \n\t"
229
        "movq (%1), %%mm2               \n\t"
230
        "movq %%mm0, %%mm1              \n\t"
231
        "movq %%mm2, %%mm3              \n\t"
232
        "punpcklbw %%mm7, %%mm0         \n\t"
233
        "punpckhbw %%mm7, %%mm1         \n\t"
234
        "punpcklbw %%mm7, %%mm2         \n\t"
235
        "punpckhbw %%mm7, %%mm3         \n\t"
236
        "psubw %%mm2, %%mm0             \n\t"
237
        "psubw %%mm3, %%mm1             \n\t"
238
        "movq %%mm0, (%2, %%"REG_a")    \n\t"
239
        "movq %%mm1, 8(%2, %%"REG_a")   \n\t"
240
        "add %3, %0                     \n\t"
241
        "add %3, %1                     \n\t"
242
        "add $16, %%"REG_a"             \n\t"
243
        "jnz 1b                         \n\t"
244
        : "+r" (s1), "+r" (s2)
245
        : "r" (block+64), "r" ((long)stride)
246
        : "%"REG_a
247
    );
248
}
249
#endif //CONFIG_ENCODERS
250

    
251
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
252
{
253
    const DCTELEM *p;
254
    uint8_t *pix;
255

    
256
    /* read the pixels */
257
    p = block;
258
    pix = pixels;
259
    /* unrolled loop */
260
        __asm __volatile(
261
                "movq   %3, %%mm0               \n\t"
262
                "movq   8%3, %%mm1              \n\t"
263
                "movq   16%3, %%mm2             \n\t"
264
                "movq   24%3, %%mm3             \n\t"
265
                "movq   32%3, %%mm4             \n\t"
266
                "movq   40%3, %%mm5             \n\t"
267
                "movq   48%3, %%mm6             \n\t"
268
                "movq   56%3, %%mm7             \n\t"
269
                "packuswb %%mm1, %%mm0          \n\t"
270
                "packuswb %%mm3, %%mm2          \n\t"
271
                "packuswb %%mm5, %%mm4          \n\t"
272
                "packuswb %%mm7, %%mm6          \n\t"
273
                "movq   %%mm0, (%0)             \n\t"
274
                "movq   %%mm2, (%0, %1)         \n\t"
275
                "movq   %%mm4, (%0, %1, 2)      \n\t"
276
                "movq   %%mm6, (%0, %2)         \n\t"
277
                ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
278
                :"memory");
279
        pix += line_size*4;
280
        p += 32;
281

    
282
    // if here would be an exact copy of the code above
283
    // compiler would generate some very strange code
284
    // thus using "r"
285
    __asm __volatile(
286
            "movq       (%3), %%mm0             \n\t"
287
            "movq       8(%3), %%mm1            \n\t"
288
            "movq       16(%3), %%mm2           \n\t"
289
            "movq       24(%3), %%mm3           \n\t"
290
            "movq       32(%3), %%mm4           \n\t"
291
            "movq       40(%3), %%mm5           \n\t"
292
            "movq       48(%3), %%mm6           \n\t"
293
            "movq       56(%3), %%mm7           \n\t"
294
            "packuswb %%mm1, %%mm0              \n\t"
295
            "packuswb %%mm3, %%mm2              \n\t"
296
            "packuswb %%mm5, %%mm4              \n\t"
297
            "packuswb %%mm7, %%mm6              \n\t"
298
            "movq       %%mm0, (%0)             \n\t"
299
            "movq       %%mm2, (%0, %1)         \n\t"
300
            "movq       %%mm4, (%0, %1, 2)      \n\t"
301
            "movq       %%mm6, (%0, %2)         \n\t"
302
            ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
303
            :"memory");
304
}
305

    
306
static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
307
  { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
308

    
309
void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
310
{
311
    int i;
312

    
313
    movq_m2r(*vector128, mm1);
314
    for (i = 0; i < 8; i++) {
315
        movq_m2r(*(block), mm0);
316
        packsswb_m2r(*(block + 4), mm0);
317
        block += 8;
318
        paddb_r2r(mm1, mm0);
319
        movq_r2m(mm0, *pixels);
320
        pixels += line_size;
321
    }
322
}
323

    
324
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
325
{
326
    const DCTELEM *p;
327
    uint8_t *pix;
328
    int i;
329

    
330
    /* read the pixels */
331
    p = block;
332
    pix = pixels;
333
    MOVQ_ZERO(mm7);
334
    i = 4;
335
    do {
336
        __asm __volatile(
337
                "movq   (%2), %%mm0     \n\t"
338
                "movq   8(%2), %%mm1    \n\t"
339
                "movq   16(%2), %%mm2   \n\t"
340
                "movq   24(%2), %%mm3   \n\t"
341
                "movq   %0, %%mm4       \n\t"
342
                "movq   %1, %%mm6       \n\t"
343
                "movq   %%mm4, %%mm5    \n\t"
344
                "punpcklbw %%mm7, %%mm4 \n\t"
345
                "punpckhbw %%mm7, %%mm5 \n\t"
346
                "paddsw %%mm4, %%mm0    \n\t"
347
                "paddsw %%mm5, %%mm1    \n\t"
348
                "movq   %%mm6, %%mm5    \n\t"
349
                "punpcklbw %%mm7, %%mm6 \n\t"
350
                "punpckhbw %%mm7, %%mm5 \n\t"
351
                "paddsw %%mm6, %%mm2    \n\t"
352
                "paddsw %%mm5, %%mm3    \n\t"
353
                "packuswb %%mm1, %%mm0  \n\t"
354
                "packuswb %%mm3, %%mm2  \n\t"
355
                "movq   %%mm0, %0       \n\t"
356
                "movq   %%mm2, %1       \n\t"
357
                :"+m"(*pix), "+m"(*(pix+line_size))
358
                :"r"(p)
359
                :"memory");
360
        pix += line_size*2;
361
        p += 16;
362
    } while (--i);
363
}
364

    
365
static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
366
{
367
    __asm __volatile(
368
         "lea (%3, %3), %%"REG_a"       \n\t"
369
         ".balign 8                     \n\t"
370
         "1:                            \n\t"
371
         "movd (%1), %%mm0              \n\t"
372
         "movd (%1, %3), %%mm1          \n\t"
373
         "movd %%mm0, (%2)              \n\t"
374
         "movd %%mm1, (%2, %3)          \n\t"
375
         "add %%"REG_a", %1             \n\t"
376
         "add %%"REG_a", %2             \n\t"
377
         "movd (%1), %%mm0              \n\t"
378
         "movd (%1, %3), %%mm1          \n\t"
379
         "movd %%mm0, (%2)              \n\t"
380
         "movd %%mm1, (%2, %3)          \n\t"
381
         "add %%"REG_a", %1             \n\t"
382
         "add %%"REG_a", %2             \n\t"
383
         "subl $4, %0                   \n\t"
384
         "jnz 1b                        \n\t"
385
         : "+g"(h), "+r" (pixels),  "+r" (block)
386
         : "r"((long)line_size)
387
         : "%"REG_a, "memory"
388
        );
389
}
390

    
391
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
392
{
393
    __asm __volatile(
394
         "lea (%3, %3), %%"REG_a"       \n\t"
395
         ".balign 8                     \n\t"
396
         "1:                            \n\t"
397
         "movq (%1), %%mm0              \n\t"
398
         "movq (%1, %3), %%mm1          \n\t"
399
         "movq %%mm0, (%2)              \n\t"
400
         "movq %%mm1, (%2, %3)          \n\t"
401
         "add %%"REG_a", %1             \n\t"
402
         "add %%"REG_a", %2             \n\t"
403
         "movq (%1), %%mm0              \n\t"
404
         "movq (%1, %3), %%mm1          \n\t"
405
         "movq %%mm0, (%2)              \n\t"
406
         "movq %%mm1, (%2, %3)          \n\t"
407
         "add %%"REG_a", %1             \n\t"
408
         "add %%"REG_a", %2             \n\t"
409
         "subl $4, %0                   \n\t"
410
         "jnz 1b                        \n\t"
411
         : "+g"(h), "+r" (pixels),  "+r" (block)
412
         : "r"((long)line_size)
413
         : "%"REG_a, "memory"
414
        );
415
}
416

    
417
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
418
{
419
    __asm __volatile(
420
         "lea (%3, %3), %%"REG_a"       \n\t"
421
         ".balign 8                     \n\t"
422
         "1:                            \n\t"
423
         "movq (%1), %%mm0              \n\t"
424
         "movq 8(%1), %%mm4             \n\t"
425
         "movq (%1, %3), %%mm1          \n\t"
426
         "movq 8(%1, %3), %%mm5         \n\t"
427
         "movq %%mm0, (%2)              \n\t"
428
         "movq %%mm4, 8(%2)             \n\t"
429
         "movq %%mm1, (%2, %3)          \n\t"
430
         "movq %%mm5, 8(%2, %3)         \n\t"
431
         "add %%"REG_a", %1             \n\t"
432
         "add %%"REG_a", %2             \n\t"
433
         "movq (%1), %%mm0              \n\t"
434
         "movq 8(%1), %%mm4             \n\t"
435
         "movq (%1, %3), %%mm1          \n\t"
436
         "movq 8(%1, %3), %%mm5         \n\t"
437
         "movq %%mm0, (%2)              \n\t"
438
         "movq %%mm4, 8(%2)             \n\t"
439
         "movq %%mm1, (%2, %3)          \n\t"
440
         "movq %%mm5, 8(%2, %3)         \n\t"
441
         "add %%"REG_a", %1             \n\t"
442
         "add %%"REG_a", %2             \n\t"
443
         "subl $4, %0                   \n\t"
444
         "jnz 1b                        \n\t"
445
         : "+g"(h), "+r" (pixels),  "+r" (block)
446
         : "r"((long)line_size)
447
         : "%"REG_a, "memory"
448
        );
449
}
450

    
451
static void clear_blocks_mmx(DCTELEM *blocks)
452
{
453
    __asm __volatile(
454
                "pxor %%mm7, %%mm7              \n\t"
455
                "mov $-128*6, %%"REG_a"         \n\t"
456
                "1:                             \n\t"
457
                "movq %%mm7, (%0, %%"REG_a")    \n\t"
458
                "movq %%mm7, 8(%0, %%"REG_a")   \n\t"
459
                "movq %%mm7, 16(%0, %%"REG_a")  \n\t"
460
                "movq %%mm7, 24(%0, %%"REG_a")  \n\t"
461
                "add $32, %%"REG_a"             \n\t"
462
                " js 1b                         \n\t"
463
                : : "r" (((uint8_t *)blocks)+128*6)
464
                : "%"REG_a
465
        );
466
}
467

    
468
#ifdef CONFIG_ENCODERS
469
static int pix_sum16_mmx(uint8_t * pix, int line_size){
470
    const int h=16;
471
    int sum;
472
    long index= -line_size*h;
473

    
474
    __asm __volatile(
475
                "pxor %%mm7, %%mm7              \n\t"
476
                "pxor %%mm6, %%mm6              \n\t"
477
                "1:                             \n\t"
478
                "movq (%2, %1), %%mm0           \n\t"
479
                "movq (%2, %1), %%mm1           \n\t"
480
                "movq 8(%2, %1), %%mm2          \n\t"
481
                "movq 8(%2, %1), %%mm3          \n\t"
482
                "punpcklbw %%mm7, %%mm0         \n\t"
483
                "punpckhbw %%mm7, %%mm1         \n\t"
484
                "punpcklbw %%mm7, %%mm2         \n\t"
485
                "punpckhbw %%mm7, %%mm3         \n\t"
486
                "paddw %%mm0, %%mm1             \n\t"
487
                "paddw %%mm2, %%mm3             \n\t"
488
                "paddw %%mm1, %%mm3             \n\t"
489
                "paddw %%mm3, %%mm6             \n\t"
490
                "add %3, %1                     \n\t"
491
                " js 1b                         \n\t"
492
                "movq %%mm6, %%mm5              \n\t"
493
                "psrlq $32, %%mm6               \n\t"
494
                "paddw %%mm5, %%mm6             \n\t"
495
                "movq %%mm6, %%mm5              \n\t"
496
                "psrlq $16, %%mm6               \n\t"
497
                "paddw %%mm5, %%mm6             \n\t"
498
                "movd %%mm6, %0                 \n\t"
499
                "andl $0xFFFF, %0               \n\t"
500
                : "=&r" (sum), "+r" (index)
501
                : "r" (pix - index), "r" ((long)line_size)
502
        );
503

    
504
        return sum;
505
}
506
#endif //CONFIG_ENCODERS
507

    
508
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
509
    long i=0;
510
    asm volatile(
511
        "1:                             \n\t"
512
        "movq  (%1, %0), %%mm0          \n\t"
513
        "movq  (%2, %0), %%mm1          \n\t"
514
        "paddb %%mm0, %%mm1             \n\t"
515
        "movq %%mm1, (%2, %0)           \n\t"
516
        "movq 8(%1, %0), %%mm0          \n\t"
517
        "movq 8(%2, %0), %%mm1          \n\t"
518
        "paddb %%mm0, %%mm1             \n\t"
519
        "movq %%mm1, 8(%2, %0)          \n\t"
520
        "add $16, %0                    \n\t"
521
        "cmp %3, %0                     \n\t"
522
        " jb 1b                         \n\t"
523
        : "+r" (i)
524
        : "r"(src), "r"(dst), "r"((long)w-15)
525
    );
526
    for(; i<w; i++)
527
        dst[i+0] += src[i+0];
528
}
529

    
530
#define H263_LOOP_FILTER \
531
        "pxor %%mm7, %%mm7              \n\t"\
532
        "movq  %0, %%mm0                \n\t"\
533
        "movq  %0, %%mm1                \n\t"\
534
        "movq  %3, %%mm2                \n\t"\
535
        "movq  %3, %%mm3                \n\t"\
536
        "punpcklbw %%mm7, %%mm0         \n\t"\
537
        "punpckhbw %%mm7, %%mm1         \n\t"\
538
        "punpcklbw %%mm7, %%mm2         \n\t"\
539
        "punpckhbw %%mm7, %%mm3         \n\t"\
540
        "psubw %%mm2, %%mm0             \n\t"\
541
        "psubw %%mm3, %%mm1             \n\t"\
542
        "movq  %1, %%mm2                \n\t"\
543
        "movq  %1, %%mm3                \n\t"\
544
        "movq  %2, %%mm4                \n\t"\
545
        "movq  %2, %%mm5                \n\t"\
546
        "punpcklbw %%mm7, %%mm2         \n\t"\
547
        "punpckhbw %%mm7, %%mm3         \n\t"\
548
        "punpcklbw %%mm7, %%mm4         \n\t"\
549
        "punpckhbw %%mm7, %%mm5         \n\t"\
550
        "psubw %%mm2, %%mm4             \n\t"\
551
        "psubw %%mm3, %%mm5             \n\t"\
552
        "psllw $2, %%mm4                \n\t"\
553
        "psllw $2, %%mm5                \n\t"\
554
        "paddw %%mm0, %%mm4             \n\t"\
555
        "paddw %%mm1, %%mm5             \n\t"\
556
        "pxor %%mm6, %%mm6              \n\t"\
557
        "pcmpgtw %%mm4, %%mm6           \n\t"\
558
        "pcmpgtw %%mm5, %%mm7           \n\t"\
559
        "pxor %%mm6, %%mm4              \n\t"\
560
        "pxor %%mm7, %%mm5              \n\t"\
561
        "psubw %%mm6, %%mm4             \n\t"\
562
        "psubw %%mm7, %%mm5             \n\t"\
563
        "psrlw $3, %%mm4                \n\t"\
564
        "psrlw $3, %%mm5                \n\t"\
565
        "packuswb %%mm5, %%mm4          \n\t"\
566
        "packsswb %%mm7, %%mm6          \n\t"\
567
        "pxor %%mm7, %%mm7              \n\t"\
568
        "movd %4, %%mm2                 \n\t"\
569
        "punpcklbw %%mm2, %%mm2         \n\t"\
570
        "punpcklbw %%mm2, %%mm2         \n\t"\
571
        "punpcklbw %%mm2, %%mm2         \n\t"\
572
        "psubusb %%mm4, %%mm2           \n\t"\
573
        "movq %%mm2, %%mm3              \n\t"\
574
        "psubusb %%mm4, %%mm3           \n\t"\
575
        "psubb %%mm3, %%mm2             \n\t"\
576
        "movq %1, %%mm3                 \n\t"\
577
        "movq %2, %%mm4                 \n\t"\
578
        "pxor %%mm6, %%mm3              \n\t"\
579
        "pxor %%mm6, %%mm4              \n\t"\
580
        "paddusb %%mm2, %%mm3           \n\t"\
581
        "psubusb %%mm2, %%mm4           \n\t"\
582
        "pxor %%mm6, %%mm3              \n\t"\
583
        "pxor %%mm6, %%mm4              \n\t"\
584
        "paddusb %%mm2, %%mm2           \n\t"\
585
        "packsswb %%mm1, %%mm0          \n\t"\
586
        "pcmpgtb %%mm0, %%mm7           \n\t"\
587
        "pxor %%mm7, %%mm0              \n\t"\
588
        "psubb %%mm7, %%mm0             \n\t"\
589
        "movq %%mm0, %%mm1              \n\t"\
590
        "psubusb %%mm2, %%mm0           \n\t"\
591
        "psubb %%mm0, %%mm1             \n\t"\
592
        "pand %5, %%mm1                 \n\t"\
593
        "psrlw $2, %%mm1                \n\t"\
594
        "pxor %%mm7, %%mm1              \n\t"\
595
        "psubb %%mm7, %%mm1             \n\t"\
596
        "movq %0, %%mm5                 \n\t"\
597
        "movq %3, %%mm6                 \n\t"\
598
        "psubb %%mm1, %%mm5             \n\t"\
599
        "paddb %%mm1, %%mm6             \n\t"
600

    
601
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
602
    const int strength= ff_h263_loop_filter_strength[qscale];
603

    
604
    asm volatile(
605

    
606
        H263_LOOP_FILTER
607

    
608
        "movq %%mm3, %1                 \n\t"
609
        "movq %%mm4, %2                 \n\t"
610
        "movq %%mm5, %0                 \n\t"
611
        "movq %%mm6, %3                 \n\t"
612
        : "+m" (*(uint64_t*)(src - 2*stride)),
613
          "+m" (*(uint64_t*)(src - 1*stride)),
614
          "+m" (*(uint64_t*)(src + 0*stride)),
615
          "+m" (*(uint64_t*)(src + 1*stride))
616
        : "g" (2*strength), "m"(ff_pb_FC)
617
    );
618
}
619

    
620
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
621
    asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
622
        "movd  %4, %%mm0                \n\t"
623
        "movd  %5, %%mm1                \n\t"
624
        "movd  %6, %%mm2                \n\t"
625
        "movd  %7, %%mm3                \n\t"
626
        "punpcklbw %%mm1, %%mm0         \n\t"
627
        "punpcklbw %%mm3, %%mm2         \n\t"
628
        "movq %%mm0, %%mm1              \n\t"
629
        "punpcklwd %%mm2, %%mm0         \n\t"
630
        "punpckhwd %%mm2, %%mm1         \n\t"
631
        "movd  %%mm0, %0                \n\t"
632
        "punpckhdq %%mm0, %%mm0         \n\t"
633
        "movd  %%mm0, %1                \n\t"
634
        "movd  %%mm1, %2                \n\t"
635
        "punpckhdq %%mm1, %%mm1         \n\t"
636
        "movd  %%mm1, %3                \n\t"
637

    
638
        : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
639
          "=m" (*(uint32_t*)(dst + 1*dst_stride)),
640
          "=m" (*(uint32_t*)(dst + 2*dst_stride)),
641
          "=m" (*(uint32_t*)(dst + 3*dst_stride))
642
        :  "m" (*(uint32_t*)(src + 0*src_stride)),
643
           "m" (*(uint32_t*)(src + 1*src_stride)),
644
           "m" (*(uint32_t*)(src + 2*src_stride)),
645
           "m" (*(uint32_t*)(src + 3*src_stride))
646
    );
647
}
648

    
649
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
650
    const int strength= ff_h263_loop_filter_strength[qscale];
651
    uint64_t temp[4] __attribute__ ((aligned(8)));
652
    uint8_t *btemp= (uint8_t*)temp;
653

    
654
    src -= 2;
655

    
656
    transpose4x4(btemp  , src           , 8, stride);
657
    transpose4x4(btemp+4, src + 4*stride, 8, stride);
658
    asm volatile(
659
        H263_LOOP_FILTER // 5 3 4 6
660

    
661
        : "+m" (temp[0]),
662
          "+m" (temp[1]),
663
          "+m" (temp[2]),
664
          "+m" (temp[3])
665
        : "g" (2*strength), "m"(ff_pb_FC)
666
    );
667

    
668
    asm volatile(
669
        "movq %%mm5, %%mm1              \n\t"
670
        "movq %%mm4, %%mm0              \n\t"
671
        "punpcklbw %%mm3, %%mm5         \n\t"
672
        "punpcklbw %%mm6, %%mm4         \n\t"
673
        "punpckhbw %%mm3, %%mm1         \n\t"
674
        "punpckhbw %%mm6, %%mm0         \n\t"
675
        "movq %%mm5, %%mm3              \n\t"
676
        "movq %%mm1, %%mm6              \n\t"
677
        "punpcklwd %%mm4, %%mm5         \n\t"
678
        "punpcklwd %%mm0, %%mm1         \n\t"
679
        "punpckhwd %%mm4, %%mm3         \n\t"
680
        "punpckhwd %%mm0, %%mm6         \n\t"
681
        "movd %%mm5, (%0)               \n\t"
682
        "punpckhdq %%mm5, %%mm5         \n\t"
683
        "movd %%mm5, (%0,%2)            \n\t"
684
        "movd %%mm3, (%0,%2,2)          \n\t"
685
        "punpckhdq %%mm3, %%mm3         \n\t"
686
        "movd %%mm3, (%0,%3)            \n\t"
687
        "movd %%mm1, (%1)               \n\t"
688
        "punpckhdq %%mm1, %%mm1         \n\t"
689
        "movd %%mm1, (%1,%2)            \n\t"
690
        "movd %%mm6, (%1,%2,2)          \n\t"
691
        "punpckhdq %%mm6, %%mm6         \n\t"
692
        "movd %%mm6, (%1,%3)            \n\t"
693
        :: "r" (src),
694
           "r" (src + 4*stride),
695
           "r" ((long)   stride ),
696
           "r" ((long)(3*stride))
697
    );
698
}
699

    
700
#ifdef CONFIG_ENCODERS
701
static int pix_norm1_mmx(uint8_t *pix, int line_size) {
702
    int tmp;
703
  asm volatile (
704
      "movl $16,%%ecx\n"
705
      "pxor %%mm0,%%mm0\n"
706
      "pxor %%mm7,%%mm7\n"
707
      "1:\n"
708
      "movq (%0),%%mm2\n"       /* mm2 = pix[0-7] */
709
      "movq 8(%0),%%mm3\n"      /* mm3 = pix[8-15] */
710

    
711
      "movq %%mm2,%%mm1\n"      /* mm1 = mm2 = pix[0-7] */
712

    
713
      "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
714
      "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
715

    
716
      "movq %%mm3,%%mm4\n"      /* mm4 = mm3 = pix[8-15] */
717
      "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
718
      "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
719

    
720
      "pmaddwd %%mm1,%%mm1\n"   /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
721
      "pmaddwd %%mm2,%%mm2\n"   /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
722

    
723
      "pmaddwd %%mm3,%%mm3\n"
724
      "pmaddwd %%mm4,%%mm4\n"
725

    
726
      "paddd %%mm1,%%mm2\n"     /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
727
                                          pix2^2+pix3^2+pix6^2+pix7^2) */
728
      "paddd %%mm3,%%mm4\n"
729
      "paddd %%mm2,%%mm7\n"
730

    
731
      "add %2, %0\n"
732
      "paddd %%mm4,%%mm7\n"
733
      "dec %%ecx\n"
734
      "jnz 1b\n"
735

    
736
      "movq %%mm7,%%mm1\n"
737
      "psrlq $32, %%mm7\n"      /* shift hi dword to lo */
738
      "paddd %%mm7,%%mm1\n"
739
      "movd %%mm1,%1\n"
740
      : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
741
    return tmp;
742
}
743

    
744
static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
745
    int tmp;
746
  asm volatile (
747
      "movl %4,%%ecx\n"
748
      "shr $1,%%ecx\n"
749
      "pxor %%mm0,%%mm0\n"      /* mm0 = 0 */
750
      "pxor %%mm7,%%mm7\n"      /* mm7 holds the sum */
751
      "1:\n"
752
      "movq (%0),%%mm1\n"       /* mm1 = pix1[0][0-7] */
753
      "movq (%1),%%mm2\n"       /* mm2 = pix2[0][0-7] */
754
      "movq (%0,%3),%%mm3\n"    /* mm3 = pix1[1][0-7] */
755
      "movq (%1,%3),%%mm4\n"    /* mm4 = pix2[1][0-7] */
756

    
757
      /* todo: mm1-mm2, mm3-mm4 */
758
      /* algo: substract mm1 from mm2 with saturation and vice versa */
759
      /*       OR the results to get absolute difference */
760
      "movq %%mm1,%%mm5\n"
761
      "movq %%mm3,%%mm6\n"
762
      "psubusb %%mm2,%%mm1\n"
763
      "psubusb %%mm4,%%mm3\n"
764
      "psubusb %%mm5,%%mm2\n"
765
      "psubusb %%mm6,%%mm4\n"
766

    
767
      "por %%mm1,%%mm2\n"
768
      "por %%mm3,%%mm4\n"
769

    
770
      /* now convert to 16-bit vectors so we can square them */
771
      "movq %%mm2,%%mm1\n"
772
      "movq %%mm4,%%mm3\n"
773

    
774
      "punpckhbw %%mm0,%%mm2\n"
775
      "punpckhbw %%mm0,%%mm4\n"
776
      "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
777
      "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
778

    
779
      "pmaddwd %%mm2,%%mm2\n"
780
      "pmaddwd %%mm4,%%mm4\n"
781
      "pmaddwd %%mm1,%%mm1\n"
782
      "pmaddwd %%mm3,%%mm3\n"
783

    
784
      "lea (%0,%3,2), %0\n"     /* pix1 += 2*line_size */
785
      "lea (%1,%3,2), %1\n"     /* pix2 += 2*line_size */
786

    
787
      "paddd %%mm2,%%mm1\n"
788
      "paddd %%mm4,%%mm3\n"
789
      "paddd %%mm1,%%mm7\n"
790
      "paddd %%mm3,%%mm7\n"
791

    
792
      "decl %%ecx\n"
793
      "jnz 1b\n"
794

    
795
      "movq %%mm7,%%mm1\n"
796
      "psrlq $32, %%mm7\n"      /* shift hi dword to lo */
797
      "paddd %%mm7,%%mm1\n"
798
      "movd %%mm1,%2\n"
799
      : "+r" (pix1), "+r" (pix2), "=r"(tmp)
800
      : "r" ((long)line_size) , "m" (h)
801
      : "%ecx");
802
    return tmp;
803
}
804

    
805
static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
806
    int tmp;
807
  asm volatile (
808
      "movl %4,%%ecx\n"
809
      "pxor %%mm0,%%mm0\n"      /* mm0 = 0 */
810
      "pxor %%mm7,%%mm7\n"      /* mm7 holds the sum */
811
      "1:\n"
812
      "movq (%0),%%mm1\n"       /* mm1 = pix1[0-7] */
813
      "movq (%1),%%mm2\n"       /* mm2 = pix2[0-7] */
814
      "movq 8(%0),%%mm3\n"      /* mm3 = pix1[8-15] */
815
      "movq 8(%1),%%mm4\n"      /* mm4 = pix2[8-15] */
816

    
817
      /* todo: mm1-mm2, mm3-mm4 */
818
      /* algo: substract mm1 from mm2 with saturation and vice versa */
819
      /*       OR the results to get absolute difference */
820
      "movq %%mm1,%%mm5\n"
821
      "movq %%mm3,%%mm6\n"
822
      "psubusb %%mm2,%%mm1\n"
823
      "psubusb %%mm4,%%mm3\n"
824
      "psubusb %%mm5,%%mm2\n"
825
      "psubusb %%mm6,%%mm4\n"
826

    
827
      "por %%mm1,%%mm2\n"
828
      "por %%mm3,%%mm4\n"
829

    
830
      /* now convert to 16-bit vectors so we can square them */
831
      "movq %%mm2,%%mm1\n"
832
      "movq %%mm4,%%mm3\n"
833

    
834
      "punpckhbw %%mm0,%%mm2\n"
835
      "punpckhbw %%mm0,%%mm4\n"
836
      "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
837
      "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
838

    
839
      "pmaddwd %%mm2,%%mm2\n"
840
      "pmaddwd %%mm4,%%mm4\n"
841
      "pmaddwd %%mm1,%%mm1\n"
842
      "pmaddwd %%mm3,%%mm3\n"
843

    
844
      "add %3,%0\n"
845
      "add %3,%1\n"
846

    
847
      "paddd %%mm2,%%mm1\n"
848
      "paddd %%mm4,%%mm3\n"
849
      "paddd %%mm1,%%mm7\n"
850
      "paddd %%mm3,%%mm7\n"
851

    
852
      "decl %%ecx\n"
853
      "jnz 1b\n"
854

    
855
      "movq %%mm7,%%mm1\n"
856
      "psrlq $32, %%mm7\n"      /* shift hi dword to lo */
857
      "paddd %%mm7,%%mm1\n"
858
      "movd %%mm1,%2\n"
859
      : "+r" (pix1), "+r" (pix2), "=r"(tmp)
860
      : "r" ((long)line_size) , "m" (h)
861
      : "%ecx");
862
    return tmp;
863
}
864

    
865
static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
866
    int tmp;
867
  asm volatile (
868
      "shr $1,%2\n"
869
      "pxor %%xmm0,%%xmm0\n"    /* mm0 = 0 */
870
      "pxor %%xmm7,%%xmm7\n"    /* mm7 holds the sum */
871
      "1:\n"
872
      "movdqu (%0),%%xmm1\n"    /* mm1 = pix1[0][0-15] */
873
      "movdqu (%1),%%xmm2\n"    /* mm2 = pix2[0][0-15] */
874
      "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
875
      "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
876

    
877
      /* todo: mm1-mm2, mm3-mm4 */
878
      /* algo: substract mm1 from mm2 with saturation and vice versa */
879
      /*       OR the results to get absolute difference */
880
      "movdqa %%xmm1,%%xmm5\n"
881
      "movdqa %%xmm3,%%xmm6\n"
882
      "psubusb %%xmm2,%%xmm1\n"
883
      "psubusb %%xmm4,%%xmm3\n"
884
      "psubusb %%xmm5,%%xmm2\n"
885
      "psubusb %%xmm6,%%xmm4\n"
886

    
887
      "por %%xmm1,%%xmm2\n"
888
      "por %%xmm3,%%xmm4\n"
889

    
890
      /* now convert to 16-bit vectors so we can square them */
891
      "movdqa %%xmm2,%%xmm1\n"
892
      "movdqa %%xmm4,%%xmm3\n"
893

    
894
      "punpckhbw %%xmm0,%%xmm2\n"
895
      "punpckhbw %%xmm0,%%xmm4\n"
896
      "punpcklbw %%xmm0,%%xmm1\n"  /* mm1 now spread over (mm1,mm2) */
897
      "punpcklbw %%xmm0,%%xmm3\n"  /* mm4 now spread over (mm3,mm4) */
898

    
899
      "pmaddwd %%xmm2,%%xmm2\n"
900
      "pmaddwd %%xmm4,%%xmm4\n"
901
      "pmaddwd %%xmm1,%%xmm1\n"
902
      "pmaddwd %%xmm3,%%xmm3\n"
903

    
904
      "lea (%0,%4,2), %0\n"        /* pix1 += 2*line_size */
905
      "lea (%1,%4,2), %1\n"        /* pix2 += 2*line_size */
906

    
907
      "paddd %%xmm2,%%xmm1\n"
908
      "paddd %%xmm4,%%xmm3\n"
909
      "paddd %%xmm1,%%xmm7\n"
910
      "paddd %%xmm3,%%xmm7\n"
911

    
912
      "decl %2\n"
913
      "jnz 1b\n"
914

    
915
      "movdqa %%xmm7,%%xmm1\n"
916
      "psrldq $8, %%xmm7\n"        /* shift hi qword to lo */
917
      "paddd %%xmm1,%%xmm7\n"
918
      "movdqa %%xmm7,%%xmm1\n"
919
      "psrldq $4, %%xmm7\n"        /* shift hi dword to lo */
920
      "paddd %%xmm1,%%xmm7\n"
921
      "movd %%xmm7,%3\n"
922
      : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
923
      : "r" ((long)line_size));
924
    return tmp;
925
}
926

    
927
static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
928
    int tmp;
929
  asm volatile (
930
      "movl %3,%%ecx\n"
931
      "pxor %%mm7,%%mm7\n"
932
      "pxor %%mm6,%%mm6\n"
933

    
934
      "movq (%0),%%mm0\n"
935
      "movq %%mm0, %%mm1\n"
936
      "psllq $8, %%mm0\n"
937
      "psrlq $8, %%mm1\n"
938
      "psrlq $8, %%mm0\n"
939
      "movq %%mm0, %%mm2\n"
940
      "movq %%mm1, %%mm3\n"
941
      "punpcklbw %%mm7,%%mm0\n"
942
      "punpcklbw %%mm7,%%mm1\n"
943
      "punpckhbw %%mm7,%%mm2\n"
944
      "punpckhbw %%mm7,%%mm3\n"
945
      "psubw %%mm1, %%mm0\n"
946
      "psubw %%mm3, %%mm2\n"
947

    
948
      "add %2,%0\n"
949

    
950
      "movq (%0),%%mm4\n"
951
      "movq %%mm4, %%mm1\n"
952
      "psllq $8, %%mm4\n"
953
      "psrlq $8, %%mm1\n"
954
      "psrlq $8, %%mm4\n"
955
      "movq %%mm4, %%mm5\n"
956
      "movq %%mm1, %%mm3\n"
957
      "punpcklbw %%mm7,%%mm4\n"
958
      "punpcklbw %%mm7,%%mm1\n"
959
      "punpckhbw %%mm7,%%mm5\n"
960
      "punpckhbw %%mm7,%%mm3\n"
961
      "psubw %%mm1, %%mm4\n"
962
      "psubw %%mm3, %%mm5\n"
963
      "psubw %%mm4, %%mm0\n"
964
      "psubw %%mm5, %%mm2\n"
965
      "pxor %%mm3, %%mm3\n"
966
      "pxor %%mm1, %%mm1\n"
967
      "pcmpgtw %%mm0, %%mm3\n\t"
968
      "pcmpgtw %%mm2, %%mm1\n\t"
969
      "pxor %%mm3, %%mm0\n"
970
      "pxor %%mm1, %%mm2\n"
971
      "psubw %%mm3, %%mm0\n"
972
      "psubw %%mm1, %%mm2\n"
973
      "paddw %%mm0, %%mm2\n"
974
      "paddw %%mm2, %%mm6\n"
975

    
976
      "add %2,%0\n"
977
      "1:\n"
978

    
979
      "movq (%0),%%mm0\n"
980
      "movq %%mm0, %%mm1\n"
981
      "psllq $8, %%mm0\n"
982
      "psrlq $8, %%mm1\n"
983
      "psrlq $8, %%mm0\n"
984
      "movq %%mm0, %%mm2\n"
985
      "movq %%mm1, %%mm3\n"
986
      "punpcklbw %%mm7,%%mm0\n"
987
      "punpcklbw %%mm7,%%mm1\n"
988
      "punpckhbw %%mm7,%%mm2\n"
989
      "punpckhbw %%mm7,%%mm3\n"
990
      "psubw %%mm1, %%mm0\n"
991
      "psubw %%mm3, %%mm2\n"
992
      "psubw %%mm0, %%mm4\n"
993
      "psubw %%mm2, %%mm5\n"
994
      "pxor %%mm3, %%mm3\n"
995
      "pxor %%mm1, %%mm1\n"
996
      "pcmpgtw %%mm4, %%mm3\n\t"
997
      "pcmpgtw %%mm5, %%mm1\n\t"
998
      "pxor %%mm3, %%mm4\n"
999
      "pxor %%mm1, %%mm5\n"
1000
      "psubw %%mm3, %%mm4\n"
1001
      "psubw %%mm1, %%mm5\n"
1002
      "paddw %%mm4, %%mm5\n"
1003
      "paddw %%mm5, %%mm6\n"
1004

    
1005
      "add %2,%0\n"
1006

    
1007
      "movq (%0),%%mm4\n"
1008
      "movq %%mm4, %%mm1\n"
1009
      "psllq $8, %%mm4\n"
1010
      "psrlq $8, %%mm1\n"
1011
      "psrlq $8, %%mm4\n"
1012
      "movq %%mm4, %%mm5\n"
1013
      "movq %%mm1, %%mm3\n"
1014
      "punpcklbw %%mm7,%%mm4\n"
1015
      "punpcklbw %%mm7,%%mm1\n"
1016
      "punpckhbw %%mm7,%%mm5\n"
1017
      "punpckhbw %%mm7,%%mm3\n"
1018
      "psubw %%mm1, %%mm4\n"
1019
      "psubw %%mm3, %%mm5\n"
1020
      "psubw %%mm4, %%mm0\n"
1021
      "psubw %%mm5, %%mm2\n"
1022
      "pxor %%mm3, %%mm3\n"
1023
      "pxor %%mm1, %%mm1\n"
1024
      "pcmpgtw %%mm0, %%mm3\n\t"
1025
      "pcmpgtw %%mm2, %%mm1\n\t"
1026
      "pxor %%mm3, %%mm0\n"
1027
      "pxor %%mm1, %%mm2\n"
1028
      "psubw %%mm3, %%mm0\n"
1029
      "psubw %%mm1, %%mm2\n"
1030
      "paddw %%mm0, %%mm2\n"
1031
      "paddw %%mm2, %%mm6\n"
1032

    
1033
      "add %2,%0\n"
1034
      "subl $2, %%ecx\n"
1035
      " jnz 1b\n"
1036

    
1037
      "movq %%mm6, %%mm0\n"
1038
      "punpcklwd %%mm7,%%mm0\n"
1039
      "punpckhwd %%mm7,%%mm6\n"
1040
      "paddd %%mm0, %%mm6\n"
1041

    
1042
      "movq %%mm6,%%mm0\n"
1043
      "psrlq $32, %%mm6\n"
1044
      "paddd %%mm6,%%mm0\n"
1045
      "movd %%mm0,%1\n"
1046
      : "+r" (pix1), "=r"(tmp)
1047
      : "r" ((long)line_size) , "g" (h-2)
1048
      : "%ecx");
1049
      return tmp;
1050
}
1051

    
1052
static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
1053
    int tmp;
1054
    uint8_t * pix= pix1;
1055
  asm volatile (
1056
      "movl %3,%%ecx\n"
1057
      "pxor %%mm7,%%mm7\n"
1058
      "pxor %%mm6,%%mm6\n"
1059

    
1060
      "movq (%0),%%mm0\n"
1061
      "movq 1(%0),%%mm1\n"
1062
      "movq %%mm0, %%mm2\n"
1063
      "movq %%mm1, %%mm3\n"
1064
      "punpcklbw %%mm7,%%mm0\n"
1065
      "punpcklbw %%mm7,%%mm1\n"
1066
      "punpckhbw %%mm7,%%mm2\n"
1067
      "punpckhbw %%mm7,%%mm3\n"
1068
      "psubw %%mm1, %%mm0\n"
1069
      "psubw %%mm3, %%mm2\n"
1070

    
1071
      "add %2,%0\n"
1072

    
1073
      "movq (%0),%%mm4\n"
1074
      "movq 1(%0),%%mm1\n"
1075
      "movq %%mm4, %%mm5\n"
1076
      "movq %%mm1, %%mm3\n"
1077
      "punpcklbw %%mm7,%%mm4\n"
1078
      "punpcklbw %%mm7,%%mm1\n"
1079
      "punpckhbw %%mm7,%%mm5\n"
1080
      "punpckhbw %%mm7,%%mm3\n"
1081
      "psubw %%mm1, %%mm4\n"
1082
      "psubw %%mm3, %%mm5\n"
1083
      "psubw %%mm4, %%mm0\n"
1084
      "psubw %%mm5, %%mm2\n"
1085
      "pxor %%mm3, %%mm3\n"
1086
      "pxor %%mm1, %%mm1\n"
1087
      "pcmpgtw %%mm0, %%mm3\n\t"
1088
      "pcmpgtw %%mm2, %%mm1\n\t"
1089
      "pxor %%mm3, %%mm0\n"
1090
      "pxor %%mm1, %%mm2\n"
1091
      "psubw %%mm3, %%mm0\n"
1092
      "psubw %%mm1, %%mm2\n"
1093
      "paddw %%mm0, %%mm2\n"
1094
      "paddw %%mm2, %%mm6\n"
1095

    
1096
      "add %2,%0\n"
1097
      "1:\n"
1098

    
1099
      "movq (%0),%%mm0\n"
1100
      "movq 1(%0),%%mm1\n"
1101
      "movq %%mm0, %%mm2\n"
1102
      "movq %%mm1, %%mm3\n"
1103
      "punpcklbw %%mm7,%%mm0\n"
1104
      "punpcklbw %%mm7,%%mm1\n"
1105
      "punpckhbw %%mm7,%%mm2\n"
1106
      "punpckhbw %%mm7,%%mm3\n"
1107
      "psubw %%mm1, %%mm0\n"
1108
      "psubw %%mm3, %%mm2\n"
1109
      "psubw %%mm0, %%mm4\n"
1110
      "psubw %%mm2, %%mm5\n"
1111
      "pxor %%mm3, %%mm3\n"
1112
      "pxor %%mm1, %%mm1\n"
1113
      "pcmpgtw %%mm4, %%mm3\n\t"
1114
      "pcmpgtw %%mm5, %%mm1\n\t"
1115
      "pxor %%mm3, %%mm4\n"
1116
      "pxor %%mm1, %%mm5\n"
1117
      "psubw %%mm3, %%mm4\n"
1118
      "psubw %%mm1, %%mm5\n"
1119
      "paddw %%mm4, %%mm5\n"
1120
      "paddw %%mm5, %%mm6\n"
1121

    
1122
      "add %2,%0\n"
1123

    
1124
      "movq (%0),%%mm4\n"
1125
      "movq 1(%0),%%mm1\n"
1126
      "movq %%mm4, %%mm5\n"
1127
      "movq %%mm1, %%mm3\n"
1128
      "punpcklbw %%mm7,%%mm4\n"
1129
      "punpcklbw %%mm7,%%mm1\n"
1130
      "punpckhbw %%mm7,%%mm5\n"
1131
      "punpckhbw %%mm7,%%mm3\n"
1132
      "psubw %%mm1, %%mm4\n"
1133
      "psubw %%mm3, %%mm5\n"
1134
      "psubw %%mm4, %%mm0\n"
1135
      "psubw %%mm5, %%mm2\n"
1136
      "pxor %%mm3, %%mm3\n"
1137
      "pxor %%mm1, %%mm1\n"
1138
      "pcmpgtw %%mm0, %%mm3\n\t"
1139
      "pcmpgtw %%mm2, %%mm1\n\t"
1140
      "pxor %%mm3, %%mm0\n"
1141
      "pxor %%mm1, %%mm2\n"
1142
      "psubw %%mm3, %%mm0\n"
1143
      "psubw %%mm1, %%mm2\n"
1144
      "paddw %%mm0, %%mm2\n"
1145
      "paddw %%mm2, %%mm6\n"
1146

    
1147
      "add %2,%0\n"
1148
      "subl $2, %%ecx\n"
1149
      " jnz 1b\n"
1150

    
1151
      "movq %%mm6, %%mm0\n"
1152
      "punpcklwd %%mm7,%%mm0\n"
1153
      "punpckhwd %%mm7,%%mm6\n"
1154
      "paddd %%mm0, %%mm6\n"
1155

    
1156
      "movq %%mm6,%%mm0\n"
1157
      "psrlq $32, %%mm6\n"
1158
      "paddd %%mm6,%%mm0\n"
1159
      "movd %%mm0,%1\n"
1160
      : "+r" (pix1), "=r"(tmp)
1161
      : "r" ((long)line_size) , "g" (h-2)
1162
      : "%ecx");
1163
      return tmp + hf_noise8_mmx(pix+8, line_size, h);
1164
}
1165

    
1166
static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1167
    MpegEncContext *c = p;
1168
    int score1, score2;
1169

    
1170
    if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
1171
    else  score1 = sse16_mmx(c, pix1, pix2, line_size, h);
1172
    score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
1173

    
1174
    if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
1175
    else  return score1 + ABS(score2)*8;
1176
}
1177

    
1178
static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1179
    MpegEncContext *c = p;
1180
    int score1= sse8_mmx(c, pix1, pix2, line_size, h);
1181
    int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
1182

    
1183
    if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
1184
    else  return score1 + ABS(score2)*8;
1185
}
1186

    
1187
static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
1188
    int tmp;
1189

    
1190
    assert( (((int)pix) & 7) == 0);
1191
    assert((line_size &7) ==0);
1192

    
1193
#define SUM(in0, in1, out0, out1) \
1194
      "movq (%0), %%mm2\n"\
1195
      "movq 8(%0), %%mm3\n"\
1196
      "add %2,%0\n"\
1197
      "movq %%mm2, " #out0 "\n"\
1198
      "movq %%mm3, " #out1 "\n"\
1199
      "psubusb " #in0 ", %%mm2\n"\
1200
      "psubusb " #in1 ", %%mm3\n"\
1201
      "psubusb " #out0 ", " #in0 "\n"\
1202
      "psubusb " #out1 ", " #in1 "\n"\
1203
      "por %%mm2, " #in0 "\n"\
1204
      "por %%mm3, " #in1 "\n"\
1205
      "movq " #in0 ", %%mm2\n"\
1206
      "movq " #in1 ", %%mm3\n"\
1207
      "punpcklbw %%mm7, " #in0 "\n"\
1208
      "punpcklbw %%mm7, " #in1 "\n"\
1209
      "punpckhbw %%mm7, %%mm2\n"\
1210
      "punpckhbw %%mm7, %%mm3\n"\
1211
      "paddw " #in1 ", " #in0 "\n"\
1212
      "paddw %%mm3, %%mm2\n"\
1213
      "paddw %%mm2, " #in0 "\n"\
1214
      "paddw " #in0 ", %%mm6\n"
1215

    
1216

    
1217
  asm volatile (
1218
      "movl %3,%%ecx\n"
1219
      "pxor %%mm6,%%mm6\n"
1220
      "pxor %%mm7,%%mm7\n"
1221
      "movq (%0),%%mm0\n"
1222
      "movq 8(%0),%%mm1\n"
1223
      "add %2,%0\n"
1224
      "subl $2, %%ecx\n"
1225
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1226
      "1:\n"
1227

    
1228
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1229

    
1230
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1231

    
1232
      "subl $2, %%ecx\n"
1233
      "jnz 1b\n"
1234

    
1235
      "movq %%mm6,%%mm0\n"
1236
      "psrlq $32, %%mm6\n"
1237
      "paddw %%mm6,%%mm0\n"
1238
      "movq %%mm0,%%mm6\n"
1239
      "psrlq $16, %%mm0\n"
1240
      "paddw %%mm6,%%mm0\n"
1241
      "movd %%mm0,%1\n"
1242
      : "+r" (pix), "=r"(tmp)
1243
      : "r" ((long)line_size) , "m" (h)
1244
      : "%ecx");
1245
    return tmp & 0xFFFF;
1246
}
1247
#undef SUM
1248

    
1249
static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
1250
    int tmp;
1251

    
1252
    assert( (((int)pix) & 7) == 0);
1253
    assert((line_size &7) ==0);
1254

    
1255
#define SUM(in0, in1, out0, out1) \
1256
      "movq (%0), " #out0 "\n"\
1257
      "movq 8(%0), " #out1 "\n"\
1258
      "add %2,%0\n"\
1259
      "psadbw " #out0 ", " #in0 "\n"\
1260
      "psadbw " #out1 ", " #in1 "\n"\
1261
      "paddw " #in1 ", " #in0 "\n"\
1262
      "paddw " #in0 ", %%mm6\n"
1263

    
1264
  asm volatile (
1265
      "movl %3,%%ecx\n"
1266
      "pxor %%mm6,%%mm6\n"
1267
      "pxor %%mm7,%%mm7\n"
1268
      "movq (%0),%%mm0\n"
1269
      "movq 8(%0),%%mm1\n"
1270
      "add %2,%0\n"
1271
      "subl $2, %%ecx\n"
1272
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1273
      "1:\n"
1274

    
1275
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1276

    
1277
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1278

    
1279
      "subl $2, %%ecx\n"
1280
      "jnz 1b\n"
1281

    
1282
      "movd %%mm6,%1\n"
1283
      : "+r" (pix), "=r"(tmp)
1284
      : "r" ((long)line_size) , "m" (h)
1285
      : "%ecx");
1286
    return tmp;
1287
}
1288
#undef SUM
1289

    
1290
static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1291
    int tmp;
1292

    
1293
    assert( (((int)pix1) & 7) == 0);
1294
    assert( (((int)pix2) & 7) == 0);
1295
    assert((line_size &7) ==0);
1296

    
1297
#define SUM(in0, in1, out0, out1) \
1298
      "movq (%0),%%mm2\n"\
1299
      "movq (%1)," #out0 "\n"\
1300
      "movq 8(%0),%%mm3\n"\
1301
      "movq 8(%1)," #out1 "\n"\
1302
      "add %3,%0\n"\
1303
      "add %3,%1\n"\
1304
      "psubb " #out0 ", %%mm2\n"\
1305
      "psubb " #out1 ", %%mm3\n"\
1306
      "pxor %%mm7, %%mm2\n"\
1307
      "pxor %%mm7, %%mm3\n"\
1308
      "movq %%mm2, " #out0 "\n"\
1309
      "movq %%mm3, " #out1 "\n"\
1310
      "psubusb " #in0 ", %%mm2\n"\
1311
      "psubusb " #in1 ", %%mm3\n"\
1312
      "psubusb " #out0 ", " #in0 "\n"\
1313
      "psubusb " #out1 ", " #in1 "\n"\
1314
      "por %%mm2, " #in0 "\n"\
1315
      "por %%mm3, " #in1 "\n"\
1316
      "movq " #in0 ", %%mm2\n"\
1317
      "movq " #in1 ", %%mm3\n"\
1318
      "punpcklbw %%mm7, " #in0 "\n"\
1319
      "punpcklbw %%mm7, " #in1 "\n"\
1320
      "punpckhbw %%mm7, %%mm2\n"\
1321
      "punpckhbw %%mm7, %%mm3\n"\
1322
      "paddw " #in1 ", " #in0 "\n"\
1323
      "paddw %%mm3, %%mm2\n"\
1324
      "paddw %%mm2, " #in0 "\n"\
1325
      "paddw " #in0 ", %%mm6\n"
1326

    
1327

    
1328
  asm volatile (
1329
      "movl %4,%%ecx\n"
1330
      "pxor %%mm6,%%mm6\n"
1331
      "pcmpeqw %%mm7,%%mm7\n"
1332
      "psllw $15, %%mm7\n"
1333
      "packsswb %%mm7, %%mm7\n"
1334
      "movq (%0),%%mm0\n"
1335
      "movq (%1),%%mm2\n"
1336
      "movq 8(%0),%%mm1\n"
1337
      "movq 8(%1),%%mm3\n"
1338
      "add %3,%0\n"
1339
      "add %3,%1\n"
1340
      "subl $2, %%ecx\n"
1341
      "psubb %%mm2, %%mm0\n"
1342
      "psubb %%mm3, %%mm1\n"
1343
      "pxor %%mm7, %%mm0\n"
1344
      "pxor %%mm7, %%mm1\n"
1345
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1346
      "1:\n"
1347

    
1348
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1349

    
1350
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1351

    
1352
      "subl $2, %%ecx\n"
1353
      "jnz 1b\n"
1354

    
1355
      "movq %%mm6,%%mm0\n"
1356
      "psrlq $32, %%mm6\n"
1357
      "paddw %%mm6,%%mm0\n"
1358
      "movq %%mm0,%%mm6\n"
1359
      "psrlq $16, %%mm0\n"
1360
      "paddw %%mm6,%%mm0\n"
1361
      "movd %%mm0,%2\n"
1362
      : "+r" (pix1), "+r" (pix2), "=r"(tmp)
1363
      : "r" ((long)line_size) , "m" (h)
1364
      : "%ecx");
1365
    return tmp & 0x7FFF;
1366
}
1367
#undef SUM
1368

    
1369
static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1370
    int tmp;
1371

    
1372
    assert( (((int)pix1) & 7) == 0);
1373
    assert( (((int)pix2) & 7) == 0);
1374
    assert((line_size &7) ==0);
1375

    
1376
#define SUM(in0, in1, out0, out1) \
1377
      "movq (%0)," #out0 "\n"\
1378
      "movq (%1),%%mm2\n"\
1379
      "movq 8(%0)," #out1 "\n"\
1380
      "movq 8(%1),%%mm3\n"\
1381
      "add %3,%0\n"\
1382
      "add %3,%1\n"\
1383
      "psubb %%mm2, " #out0 "\n"\
1384
      "psubb %%mm3, " #out1 "\n"\
1385
      "pxor %%mm7, " #out0 "\n"\
1386
      "pxor %%mm7, " #out1 "\n"\
1387
      "psadbw " #out0 ", " #in0 "\n"\
1388
      "psadbw " #out1 ", " #in1 "\n"\
1389
      "paddw " #in1 ", " #in0 "\n"\
1390
      "paddw " #in0 ", %%mm6\n"
1391

    
1392
  asm volatile (
1393
      "movl %4,%%ecx\n"
1394
      "pxor %%mm6,%%mm6\n"
1395
      "pcmpeqw %%mm7,%%mm7\n"
1396
      "psllw $15, %%mm7\n"
1397
      "packsswb %%mm7, %%mm7\n"
1398
      "movq (%0),%%mm0\n"
1399
      "movq (%1),%%mm2\n"
1400
      "movq 8(%0),%%mm1\n"
1401
      "movq 8(%1),%%mm3\n"
1402
      "add %3,%0\n"
1403
      "add %3,%1\n"
1404
      "subl $2, %%ecx\n"
1405
      "psubb %%mm2, %%mm0\n"
1406
      "psubb %%mm3, %%mm1\n"
1407
      "pxor %%mm7, %%mm0\n"
1408
      "pxor %%mm7, %%mm1\n"
1409
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1410
      "1:\n"
1411

    
1412
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1413

    
1414
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1415

    
1416
      "subl $2, %%ecx\n"
1417
      "jnz 1b\n"
1418

    
1419
      "movd %%mm6,%2\n"
1420
      : "+r" (pix1), "+r" (pix2), "=r"(tmp)
1421
      : "r" ((long)line_size) , "m" (h)
1422
      : "%ecx");
1423
    return tmp;
1424
}
1425
#undef SUM
1426

    
1427
static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
1428
    long i=0;
1429
    asm volatile(
1430
        "1:                             \n\t"
1431
        "movq  (%2, %0), %%mm0          \n\t"
1432
        "movq  (%1, %0), %%mm1          \n\t"
1433
        "psubb %%mm0, %%mm1             \n\t"
1434
        "movq %%mm1, (%3, %0)           \n\t"
1435
        "movq 8(%2, %0), %%mm0          \n\t"
1436
        "movq 8(%1, %0), %%mm1          \n\t"
1437
        "psubb %%mm0, %%mm1             \n\t"
1438
        "movq %%mm1, 8(%3, %0)          \n\t"
1439
        "add $16, %0                    \n\t"
1440
        "cmp %4, %0                     \n\t"
1441
        " jb 1b                         \n\t"
1442
        : "+r" (i)
1443
        : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
1444
    );
1445
    for(; i<w; i++)
1446
        dst[i+0] = src1[i+0]-src2[i+0];
1447
}
1448

    
1449
static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
1450
    long i=0;
1451
    uint8_t l, lt;
1452

    
1453
    asm volatile(
1454
        "1:                             \n\t"
1455
        "movq  -1(%1, %0), %%mm0        \n\t" // LT
1456
        "movq  (%1, %0), %%mm1          \n\t" // T
1457
        "movq  -1(%2, %0), %%mm2        \n\t" // L
1458
        "movq  (%2, %0), %%mm3          \n\t" // X
1459
        "movq %%mm2, %%mm4              \n\t" // L
1460
        "psubb %%mm0, %%mm2             \n\t"
1461
        "paddb %%mm1, %%mm2             \n\t" // L + T - LT
1462
        "movq %%mm4, %%mm5              \n\t" // L
1463
        "pmaxub %%mm1, %%mm4            \n\t" // max(T, L)
1464
        "pminub %%mm5, %%mm1            \n\t" // min(T, L)
1465
        "pminub %%mm2, %%mm4            \n\t"
1466
        "pmaxub %%mm1, %%mm4            \n\t"
1467
        "psubb %%mm4, %%mm3             \n\t" // dst - pred
1468
        "movq %%mm3, (%3, %0)           \n\t"
1469
        "add $8, %0                     \n\t"
1470
        "cmp %4, %0                     \n\t"
1471
        " jb 1b                         \n\t"
1472
        : "+r" (i)
1473
        : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
1474
    );
1475

    
1476
    l= *left;
1477
    lt= *left_top;
1478

    
1479
    dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
1480

    
1481
    *left_top= src1[w-1];
1482
    *left    = src2[w-1];
1483
}
1484

    
1485
#define LBUTTERFLY2(a1,b1,a2,b2)\
1486
    "paddw " #b1 ", " #a1 "           \n\t"\
1487
    "paddw " #b2 ", " #a2 "           \n\t"\
1488
    "paddw " #b1 ", " #b1 "           \n\t"\
1489
    "paddw " #b2 ", " #b2 "           \n\t"\
1490
    "psubw " #a1 ", " #b1 "           \n\t"\
1491
    "psubw " #a2 ", " #b2 "           \n\t"
1492

    
1493
#define HADAMARD48\
1494
        LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
1495
        LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
1496
        LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
1497
        LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
1498
        LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
1499
        LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
1500

    
1501
#define MMABS(a,z)\
1502
    "pxor " #z ", " #z "              \n\t"\
1503
    "pcmpgtw " #a ", " #z "           \n\t"\
1504
    "pxor " #z ", " #a "              \n\t"\
1505
    "psubw " #z ", " #a "             \n\t"
1506

    
1507
#define MMABS_SUM(a,z, sum)\
1508
    "pxor " #z ", " #z "              \n\t"\
1509
    "pcmpgtw " #a ", " #z "           \n\t"\
1510
    "pxor " #z ", " #a "              \n\t"\
1511
    "psubw " #z ", " #a "             \n\t"\
1512
    "paddusw " #a ", " #sum "         \n\t"
1513

    
1514
#define MMABS_MMX2(a,z)\
1515
    "pxor " #z ", " #z "              \n\t"\
1516
    "psubw " #a ", " #z "             \n\t"\
1517
    "pmaxsw " #z ", " #a "            \n\t"
1518

    
1519
#define MMABS_SUM_MMX2(a,z, sum)\
1520
    "pxor " #z ", " #z "              \n\t"\
1521
    "psubw " #a ", " #z "             \n\t"\
1522
    "pmaxsw " #z ", " #a "            \n\t"\
1523
    "paddusw " #a ", " #sum "         \n\t"
1524

    
1525
#define SBUTTERFLY(a,b,t,n)\
1526
    "movq " #a ", " #t "              \n\t" /* abcd */\
1527
    "punpckl" #n " " #b ", " #a "     \n\t" /* aebf */\
1528
    "punpckh" #n " " #b ", " #t "     \n\t" /* cgdh */\
1529

    
1530
#define TRANSPOSE4(a,b,c,d,t)\
1531
    SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
1532
    SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
1533
    SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
1534
    SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
1535

    
1536
#define LOAD4(o, a, b, c, d)\
1537
        "movq "#o"(%1), " #a "        \n\t"\
1538
        "movq "#o"+16(%1), " #b "     \n\t"\
1539
        "movq "#o"+32(%1), " #c "     \n\t"\
1540
        "movq "#o"+48(%1), " #d "     \n\t"
1541

    
1542
#define STORE4(o, a, b, c, d)\
1543
        "movq "#a", "#o"(%1)          \n\t"\
1544
        "movq "#b", "#o"+16(%1)       \n\t"\
1545
        "movq "#c", "#o"+32(%1)       \n\t"\
1546
        "movq "#d", "#o"+48(%1)       \n\t"\
1547

    
1548
static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1549
    DECLARE_ALIGNED_8(uint64_t, temp[16]);
1550
    int sum=0;
1551

    
1552
    assert(h==8);
1553

    
1554
    diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1555

    
1556
    asm volatile(
1557
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1558
        LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1559

    
1560
        HADAMARD48
1561

    
1562
        "movq %%mm7, 112(%1)            \n\t"
1563

    
1564
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1565
        STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1566

    
1567
        "movq 112(%1), %%mm7            \n\t"
1568
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1569
        STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1570

    
1571
        LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1572
        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1573

    
1574
        HADAMARD48
1575

    
1576
        "movq %%mm7, 120(%1)            \n\t"
1577

    
1578
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1579
        STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1580

    
1581
        "movq 120(%1), %%mm7            \n\t"
1582
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1583
        "movq %%mm7, %%mm5              \n\t"//FIXME remove
1584
        "movq %%mm6, %%mm7              \n\t"
1585
        "movq %%mm0, %%mm6              \n\t"
1586
//        STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1587

    
1588
        LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1589
//        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1590

    
1591
        HADAMARD48
1592
        "movq %%mm7, 64(%1)             \n\t"
1593
        MMABS(%%mm0, %%mm7)
1594
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1595
        MMABS_SUM(%%mm2, %%mm7, %%mm0)
1596
        MMABS_SUM(%%mm3, %%mm7, %%mm0)
1597
        MMABS_SUM(%%mm4, %%mm7, %%mm0)
1598
        MMABS_SUM(%%mm5, %%mm7, %%mm0)
1599
        MMABS_SUM(%%mm6, %%mm7, %%mm0)
1600
        "movq 64(%1), %%mm1             \n\t"
1601
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1602
        "movq %%mm0, 64(%1)             \n\t"
1603

    
1604
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1605
        LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1606

    
1607
        HADAMARD48
1608
        "movq %%mm7, (%1)               \n\t"
1609
        MMABS(%%mm0, %%mm7)
1610
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1611
        MMABS_SUM(%%mm2, %%mm7, %%mm0)
1612
        MMABS_SUM(%%mm3, %%mm7, %%mm0)
1613
        MMABS_SUM(%%mm4, %%mm7, %%mm0)
1614
        MMABS_SUM(%%mm5, %%mm7, %%mm0)
1615
        MMABS_SUM(%%mm6, %%mm7, %%mm0)
1616
        "movq (%1), %%mm1               \n\t"
1617
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1618
        "movq 64(%1), %%mm1             \n\t"
1619
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1620

    
1621
        "movq %%mm0, %%mm1              \n\t"
1622
        "psrlq $32, %%mm0               \n\t"
1623
        "paddusw %%mm1, %%mm0           \n\t"
1624
        "movq %%mm0, %%mm1              \n\t"
1625
        "psrlq $16, %%mm0               \n\t"
1626
        "paddusw %%mm1, %%mm0           \n\t"
1627
        "movd %%mm0, %0                 \n\t"
1628

    
1629
        : "=r" (sum)
1630
        : "r"(temp)
1631
    );
1632
    return sum&0xFFFF;
1633
}
1634

    
1635
static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1636
    DECLARE_ALIGNED_8(uint64_t, temp[16]);
1637
    int sum=0;
1638

    
1639
    assert(h==8);
1640

    
1641
    diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1642

    
1643
    asm volatile(
1644
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1645
        LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1646

    
1647
        HADAMARD48
1648

    
1649
        "movq %%mm7, 112(%1)            \n\t"
1650

    
1651
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1652
        STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1653

    
1654
        "movq 112(%1), %%mm7            \n\t"
1655
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1656
        STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1657

    
1658
        LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1659
        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1660

    
1661
        HADAMARD48
1662

    
1663
        "movq %%mm7, 120(%1)            \n\t"
1664

    
1665
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1666
        STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1667

    
1668
        "movq 120(%1), %%mm7            \n\t"
1669
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1670
        "movq %%mm7, %%mm5              \n\t"//FIXME remove
1671
        "movq %%mm6, %%mm7              \n\t"
1672
        "movq %%mm0, %%mm6              \n\t"
1673
//        STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1674

    
1675
        LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1676
//        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1677

    
1678
        HADAMARD48
1679
        "movq %%mm7, 64(%1)             \n\t"
1680
        MMABS_MMX2(%%mm0, %%mm7)
1681
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1682
        MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1683
        MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1684
        MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1685
        MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1686
        MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1687
        "movq 64(%1), %%mm1             \n\t"
1688
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1689
        "movq %%mm0, 64(%1)             \n\t"
1690

    
1691
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1692
        LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1693

    
1694
        HADAMARD48
1695
        "movq %%mm7, (%1)               \n\t"
1696
        MMABS_MMX2(%%mm0, %%mm7)
1697
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1698
        MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1699
        MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1700
        MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1701
        MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1702
        MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1703
        "movq (%1), %%mm1               \n\t"
1704
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1705
        "movq 64(%1), %%mm1             \n\t"
1706
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1707

    
1708
        "pshufw $0x0E, %%mm0, %%mm1     \n\t"
1709
        "paddusw %%mm1, %%mm0           \n\t"
1710
        "pshufw $0x01, %%mm0, %%mm1     \n\t"
1711
        "paddusw %%mm1, %%mm0           \n\t"
1712
        "movd %%mm0, %0                 \n\t"
1713

    
1714
        : "=r" (sum)
1715
        : "r"(temp)
1716
    );
1717
    return sum&0xFFFF;
1718
}
1719

    
1720

    
1721
WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
1722
WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
1723
#endif //CONFIG_ENCODERS
1724

    
1725
#define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
1726
#define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
1727

    
1728
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
1729
        "paddw " #m4 ", " #m3 "           \n\t" /* x1 */\
1730
        "movq "MANGLE(ff_pw_20)", %%mm4   \n\t" /* 20 */\
1731
        "pmullw " #m3 ", %%mm4            \n\t" /* 20x1 */\
1732
        "movq "#in7", " #m3 "             \n\t" /* d */\
1733
        "movq "#in0", %%mm5               \n\t" /* D */\
1734
        "paddw " #m3 ", %%mm5             \n\t" /* x4 */\
1735
        "psubw %%mm5, %%mm4               \n\t" /* 20x1 - x4 */\
1736
        "movq "#in1", %%mm5               \n\t" /* C */\
1737
        "movq "#in2", %%mm6               \n\t" /* B */\
1738
        "paddw " #m6 ", %%mm5             \n\t" /* x3 */\
1739
        "paddw " #m5 ", %%mm6             \n\t" /* x2 */\
1740
        "paddw %%mm6, %%mm6               \n\t" /* 2x2 */\
1741
        "psubw %%mm6, %%mm5               \n\t" /* -2x2 + x3 */\
1742
        "pmullw "MANGLE(ff_pw_3)", %%mm5  \n\t" /* -6x2 + 3x3 */\
1743
        "paddw " #rnd ", %%mm4            \n\t" /* x2 */\
1744
        "paddw %%mm4, %%mm5               \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
1745
        "psraw $5, %%mm5                  \n\t"\
1746
        "packuswb %%mm5, %%mm5            \n\t"\
1747
        OP(%%mm5, out, %%mm7, d)
1748

    
1749
#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
1750
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1751
    uint64_t temp;\
1752
\
1753
    asm volatile(\
1754
        "pxor %%mm7, %%mm7                \n\t"\
1755
        "1:                               \n\t"\
1756
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1757
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1758
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1759
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
1760
        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
1761
        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
1762
        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
1763
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1764
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1765
        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
1766
        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
1767
        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
1768
        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
1769
        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
1770
        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
1771
        "paddw %%mm3, %%mm5               \n\t" /* b */\
1772
        "paddw %%mm2, %%mm6               \n\t" /* c */\
1773
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1774
        "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
1775
        "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
1776
        "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
1777
        "paddw %%mm4, %%mm0               \n\t" /* a */\
1778
        "paddw %%mm1, %%mm5               \n\t" /* d */\
1779
        "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1780
        "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
1781
        "paddw %6, %%mm6                  \n\t"\
1782
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1783
        "psraw $5, %%mm0                  \n\t"\
1784
        "movq %%mm0, %5                   \n\t"\
1785
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1786
        \
1787
        "movq 5(%0), %%mm0                \n\t" /* FGHIJKLM */\
1788
        "movq %%mm0, %%mm5                \n\t" /* FGHIJKLM */\
1789
        "movq %%mm0, %%mm6                \n\t" /* FGHIJKLM */\
1790
        "psrlq $8, %%mm0                  \n\t" /* GHIJKLM0 */\
1791
        "psrlq $16, %%mm5                 \n\t" /* HIJKLM00 */\
1792
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0G0H0I0J */\
1793
        "punpcklbw %%mm7, %%mm5           \n\t" /* 0H0I0J0K */\
1794
        "paddw %%mm0, %%mm2               \n\t" /* b */\
1795
        "paddw %%mm5, %%mm3               \n\t" /* c */\
1796
        "paddw %%mm2, %%mm2               \n\t" /* 2b */\
1797
        "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
1798
        "movq %%mm6, %%mm2                \n\t" /* FGHIJKLM */\
1799
        "psrlq $24, %%mm6                 \n\t" /* IJKLM000 */\
1800
        "punpcklbw %%mm7, %%mm2           \n\t" /* 0F0G0H0I */\
1801
        "punpcklbw %%mm7, %%mm6           \n\t" /* 0I0J0K0L */\
1802
        "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
1803
        "paddw %%mm2, %%mm1               \n\t" /* a */\
1804
        "paddw %%mm6, %%mm4               \n\t" /* d */\
1805
        "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1806
        "psubw %%mm4, %%mm3               \n\t" /* - 6b +3c - d */\
1807
        "paddw %6, %%mm1                  \n\t"\
1808
        "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b +3c - d */\
1809
        "psraw $5, %%mm3                  \n\t"\
1810
        "movq %5, %%mm1                   \n\t"\
1811
        "packuswb %%mm3, %%mm1            \n\t"\
1812
        OP_MMX2(%%mm1, (%1),%%mm4, q)\
1813
        /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1814
        \
1815
        "movq 9(%0), %%mm1                \n\t" /* JKLMNOPQ */\
1816
        "movq %%mm1, %%mm4                \n\t" /* JKLMNOPQ */\
1817
        "movq %%mm1, %%mm3                \n\t" /* JKLMNOPQ */\
1818
        "psrlq $8, %%mm1                  \n\t" /* KLMNOPQ0 */\
1819
        "psrlq $16, %%mm4                 \n\t" /* LMNOPQ00 */\
1820
        "punpcklbw %%mm7, %%mm1           \n\t" /* 0K0L0M0N */\
1821
        "punpcklbw %%mm7, %%mm4           \n\t" /* 0L0M0N0O */\
1822
        "paddw %%mm1, %%mm5               \n\t" /* b */\
1823
        "paddw %%mm4, %%mm0               \n\t" /* c */\
1824
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1825
        "psubw %%mm5, %%mm0               \n\t" /* c - 2b */\
1826
        "movq %%mm3, %%mm5                \n\t" /* JKLMNOPQ */\
1827
        "psrlq $24, %%mm3                 \n\t" /* MNOPQ000 */\
1828
        "pmullw "MANGLE(ff_pw_3)", %%mm0  \n\t" /* 3c - 6b */\
1829
        "punpcklbw %%mm7, %%mm3           \n\t" /* 0M0N0O0P */\
1830
        "paddw %%mm3, %%mm2               \n\t" /* d */\
1831
        "psubw %%mm2, %%mm0               \n\t" /* -6b + 3c - d */\
1832
        "movq %%mm5, %%mm2                \n\t" /* JKLMNOPQ */\
1833
        "punpcklbw %%mm7, %%mm2           \n\t" /* 0J0K0L0M */\
1834
        "punpckhbw %%mm7, %%mm5           \n\t" /* 0N0O0P0Q */\
1835
        "paddw %%mm2, %%mm6               \n\t" /* a */\
1836
        "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1837
        "paddw %6, %%mm0                  \n\t"\
1838
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1839
        "psraw $5, %%mm0                  \n\t"\
1840
        /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1841
        \
1842
        "paddw %%mm5, %%mm3               \n\t" /* a */\
1843
        "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0O0P0Q0Q */\
1844
        "paddw %%mm4, %%mm6               \n\t" /* b */\
1845
        "pshufw $0xBE, %%mm5, %%mm4       \n\t" /* 0P0Q0Q0P */\
1846
        "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0Q0Q0P0O */\
1847
        "paddw %%mm1, %%mm4               \n\t" /* c */\
1848
        "paddw %%mm2, %%mm5               \n\t" /* d */\
1849
        "paddw %%mm6, %%mm6               \n\t" /* 2b */\
1850
        "psubw %%mm6, %%mm4               \n\t" /* c - 2b */\
1851
        "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1852
        "pmullw "MANGLE(ff_pw_3)", %%mm4  \n\t" /* 3c - 6b */\
1853
        "psubw %%mm5, %%mm3               \n\t" /* -6b + 3c - d */\
1854
        "paddw %6, %%mm4                  \n\t"\
1855
        "paddw %%mm3, %%mm4               \n\t" /* 20a - 6b + 3c - d */\
1856
        "psraw $5, %%mm4                  \n\t"\
1857
        "packuswb %%mm4, %%mm0            \n\t"\
1858
        OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1859
        \
1860
        "add %3, %0                       \n\t"\
1861
        "add %4, %1                       \n\t"\
1862
        "decl %2                          \n\t"\
1863
        " jnz 1b                          \n\t"\
1864
        : "+a"(src), "+c"(dst), "+m"(h)\
1865
        : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1866
        : "memory"\
1867
    );\
1868
}\
1869
\
1870
static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1871
    int i;\
1872
    int16_t temp[16];\
1873
    /* quick HACK, XXX FIXME MUST be optimized */\
1874
    for(i=0; i<h; i++)\
1875
    {\
1876
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1877
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1878
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1879
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1880
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1881
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1882
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1883
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1884
        temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1885
        temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1886
        temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1887
        temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1888
        temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1889
        temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1890
        temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1891
        temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1892
        asm volatile(\
1893
            "movq (%0), %%mm0               \n\t"\
1894
            "movq 8(%0), %%mm1              \n\t"\
1895
            "paddw %2, %%mm0                \n\t"\
1896
            "paddw %2, %%mm1                \n\t"\
1897
            "psraw $5, %%mm0                \n\t"\
1898
            "psraw $5, %%mm1                \n\t"\
1899
            "packuswb %%mm1, %%mm0          \n\t"\
1900
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1901
            "movq 16(%0), %%mm0             \n\t"\
1902
            "movq 24(%0), %%mm1             \n\t"\
1903
            "paddw %2, %%mm0                \n\t"\
1904
            "paddw %2, %%mm1                \n\t"\
1905
            "psraw $5, %%mm0                \n\t"\
1906
            "psraw $5, %%mm1                \n\t"\
1907
            "packuswb %%mm1, %%mm0          \n\t"\
1908
            OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1909
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1910
            : "memory"\
1911
        );\
1912
        dst+=dstStride;\
1913
        src+=srcStride;\
1914
    }\
1915
}\
1916
\
1917
static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1918
    uint64_t temp;\
1919
\
1920
    asm volatile(\
1921
        "pxor %%mm7, %%mm7                \n\t"\
1922
        "1:                               \n\t"\
1923
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1924
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1925
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1926
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
1927
        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
1928
        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
1929
        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
1930
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1931
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1932
        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
1933
        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
1934
        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
1935
        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
1936
        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
1937
        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
1938
        "paddw %%mm3, %%mm5               \n\t" /* b */\
1939
        "paddw %%mm2, %%mm6               \n\t" /* c */\
1940
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1941
        "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
1942
        "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
1943
        "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
1944
        "paddw %%mm4, %%mm0               \n\t" /* a */\
1945
        "paddw %%mm1, %%mm5               \n\t" /* d */\
1946
        "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1947
        "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
1948
        "paddw %6, %%mm6                  \n\t"\
1949
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1950
        "psraw $5, %%mm0                  \n\t"\
1951
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1952
        \
1953
        "movd 5(%0), %%mm5                \n\t" /* FGHI */\
1954
        "punpcklbw %%mm7, %%mm5           \n\t" /* 0F0G0H0I */\
1955
        "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0G0H0I0I */\
1956
        "paddw %%mm5, %%mm1               \n\t" /* a */\
1957
        "paddw %%mm6, %%mm2               \n\t" /* b */\
1958
        "pshufw $0xBE, %%mm5, %%mm6       \n\t" /* 0H0I0I0H */\
1959
        "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0I0I0H0G */\
1960
        "paddw %%mm6, %%mm3               \n\t" /* c */\
1961
        "paddw %%mm5, %%mm4               \n\t" /* d */\
1962
        "paddw %%mm2, %%mm2               \n\t" /* 2b */\
1963
        "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
1964
        "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1965
        "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
1966
        "psubw %%mm4, %%mm3               \n\t" /* -6b + 3c - d */\
1967
        "paddw %6, %%mm1                  \n\t"\
1968
        "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b + 3c - d */\
1969
        "psraw $5, %%mm3                  \n\t"\
1970
        "packuswb %%mm3, %%mm0            \n\t"\
1971
        OP_MMX2(%%mm0, (%1), %%mm4, q)\
1972
        \
1973
        "add %3, %0                       \n\t"\
1974
        "add %4, %1                       \n\t"\
1975
        "decl %2                          \n\t"\
1976
        " jnz 1b                          \n\t"\
1977
        : "+a"(src), "+c"(dst), "+m"(h)\
1978
        : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1979
        : "memory"\
1980
    );\
1981
}\
1982
\
1983
static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1984
    int i;\
1985
    int16_t temp[8];\
1986
    /* quick HACK, XXX FIXME MUST be optimized */\
1987
    for(i=0; i<h; i++)\
1988
    {\
1989
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1990
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1991
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1992
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1993
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1994
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1995
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1996
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1997
        asm volatile(\
1998
            "movq (%0), %%mm0           \n\t"\
1999
            "movq 8(%0), %%mm1          \n\t"\
2000
            "paddw %2, %%mm0            \n\t"\
2001
            "paddw %2, %%mm1            \n\t"\
2002
            "psraw $5, %%mm0            \n\t"\
2003
            "psraw $5, %%mm1            \n\t"\
2004
            "packuswb %%mm1, %%mm0      \n\t"\
2005
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
2006
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
2007
            :"memory"\
2008
        );\
2009
        dst+=dstStride;\
2010
        src+=srcStride;\
2011
    }\
2012
}
2013

    
2014
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
2015
\
2016
static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2017
    uint64_t temp[17*4];\
2018
    uint64_t *temp_ptr= temp;\
2019
    int count= 17;\
2020
\
2021
    /*FIXME unroll */\
2022
    asm volatile(\
2023
        "pxor %%mm7, %%mm7              \n\t"\
2024
        "1:                             \n\t"\
2025
        "movq (%0), %%mm0               \n\t"\
2026
        "movq (%0), %%mm1               \n\t"\
2027
        "movq 8(%0), %%mm2              \n\t"\
2028
        "movq 8(%0), %%mm3              \n\t"\
2029
        "punpcklbw %%mm7, %%mm0         \n\t"\
2030
        "punpckhbw %%mm7, %%mm1         \n\t"\
2031
        "punpcklbw %%mm7, %%mm2         \n\t"\
2032
        "punpckhbw %%mm7, %%mm3         \n\t"\
2033
        "movq %%mm0, (%1)               \n\t"\
2034
        "movq %%mm1, 17*8(%1)           \n\t"\
2035
        "movq %%mm2, 2*17*8(%1)         \n\t"\
2036
        "movq %%mm3, 3*17*8(%1)         \n\t"\
2037
        "add $8, %1                     \n\t"\
2038
        "add %3, %0                     \n\t"\
2039
        "decl %2                        \n\t"\
2040
        " jnz 1b                        \n\t"\
2041
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
2042
        : "r" ((long)srcStride)\
2043
        : "memory"\
2044
    );\
2045
    \
2046
    temp_ptr= temp;\
2047
    count=4;\
2048
    \
2049
/*FIXME reorder for speed */\
2050
    asm volatile(\
2051
        /*"pxor %%mm7, %%mm7              \n\t"*/\
2052
        "1:                             \n\t"\
2053
        "movq (%0), %%mm0               \n\t"\
2054
        "movq 8(%0), %%mm1              \n\t"\
2055
        "movq 16(%0), %%mm2             \n\t"\
2056
        "movq 24(%0), %%mm3             \n\t"\
2057
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
2058
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
2059
        "add %4, %1                     \n\t"\
2060
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
2061
        \
2062
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
2063
        "add %4, %1                     \n\t"\
2064
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
2065
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
2066
        "add %4, %1                     \n\t"\
2067
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
2068
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
2069
        "add %4, %1                     \n\t"\
2070
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
2071
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
2072
        "add %4, %1                     \n\t"\
2073
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
2074
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
2075
        "add %4, %1                     \n\t"\
2076
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
2077
        \
2078
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
2079
        "add %4, %1                     \n\t"  \
2080
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
2081
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
2082
        \
2083
        "add $136, %0                   \n\t"\
2084
        "add %6, %1                     \n\t"\
2085
        "decl %2                        \n\t"\
2086
        " jnz 1b                        \n\t"\
2087
        \
2088
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
2089
        : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
2090
        :"memory"\
2091
    );\
2092
}\
2093
\
2094
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2095
    uint64_t temp[9*2];\
2096
    uint64_t *temp_ptr= temp;\
2097
    int count= 9;\
2098
\
2099
    /*FIXME unroll */\
2100
    asm volatile(\
2101
        "pxor %%mm7, %%mm7              \n\t"\
2102
        "1:                             \n\t"\
2103
        "movq (%0), %%mm0               \n\t"\
2104
        "movq (%0), %%mm1               \n\t"\
2105
        "punpcklbw %%mm7, %%mm0         \n\t"\
2106
        "punpckhbw %%mm7, %%mm1         \n\t"\
2107
        "movq %%mm0, (%1)               \n\t"\
2108
        "movq %%mm1, 9*8(%1)            \n\t"\
2109
        "add $8, %1                     \n\t"\
2110
        "add %3, %0                     \n\t"\
2111
        "decl %2                        \n\t"\
2112
        " jnz 1b                        \n\t"\
2113
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
2114
        : "r" ((long)srcStride)\
2115
        : "memory"\
2116
    );\
2117
    \
2118
    temp_ptr= temp;\
2119
    count=2;\
2120
    \
2121
/*FIXME reorder for speed */\
2122
    asm volatile(\
2123
        /*"pxor %%mm7, %%mm7              \n\t"*/\
2124
        "1:                             \n\t"\
2125
        "movq (%0), %%mm0               \n\t"\
2126
        "movq 8(%0), %%mm1              \n\t"\
2127
        "movq 16(%0), %%mm2             \n\t"\
2128
        "movq 24(%0), %%mm3             \n\t"\
2129
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
2130
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
2131
        "add %4, %1                     \n\t"\
2132
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
2133
        \
2134
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
2135
        "add %4, %1                     \n\t"\
2136
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
2137
        \
2138
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
2139
        "add %4, %1                     \n\t"\
2140
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
2141
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
2142
                \
2143
        "add $72, %0                    \n\t"\
2144
        "add %6, %1                     \n\t"\
2145
        "decl %2                        \n\t"\
2146
        " jnz 1b                        \n\t"\
2147
         \
2148
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
2149
        : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
2150
        : "memory"\
2151
   );\
2152
}\
2153
\
2154
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2155
    OPNAME ## pixels8_mmx(dst, src, stride, 8);\
2156
}\
2157
\
2158
static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2159
    uint64_t temp[8];\
2160
    uint8_t * const half= (uint8_t*)temp;\
2161
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
2162
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
2163
}\
2164
\
2165
static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2166
    OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
2167
}\
2168
\
2169
static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2170
    uint64_t temp[8];\
2171
    uint8_t * const half= (uint8_t*)temp;\
2172
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
2173
    OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
2174
}\
2175
\
2176
static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2177
    uint64_t temp[8];\
2178
    uint8_t * const half= (uint8_t*)temp;\
2179
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
2180
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
2181
}\
2182
\
2183
static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2184
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
2185
}\
2186
\
2187
static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2188
    uint64_t temp[8];\
2189
    uint8_t * const half= (uint8_t*)temp;\
2190
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
2191
    OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
2192
}\
2193
static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2194
    uint64_t half[8 + 9];\
2195
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2196
    uint8_t * const halfHV= ((uint8_t*)half);\
2197
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2198
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2199
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2200
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2201
}\
2202
static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2203
    uint64_t half[8 + 9];\
2204
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2205
    uint8_t * const halfHV= ((uint8_t*)half);\
2206
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2207
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2208
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2209
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2210
}\
2211
static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2212
    uint64_t half[8 + 9];\
2213
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2214
    uint8_t * const halfHV= ((uint8_t*)half);\
2215
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2216
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2217
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2218
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2219
}\
2220
static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2221
    uint64_t half[8 + 9];\
2222
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2223
    uint8_t * const halfHV= ((uint8_t*)half);\
2224
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2225
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2226
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2227
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2228
}\
2229
static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2230
    uint64_t half[8 + 9];\
2231
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2232
    uint8_t * const halfHV= ((uint8_t*)half);\
2233
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2234
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2235
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2236
}\
2237
static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2238
    uint64_t half[8 + 9];\
2239
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2240
    uint8_t * const halfHV= ((uint8_t*)half);\
2241
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2242
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2243
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2244
}\
2245
static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2246
    uint64_t half[8 + 9];\
2247
    uint8_t * const halfH= ((uint8_t*)half);\
2248
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2249
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2250
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2251
}\
2252
static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2253
    uint64_t half[8 + 9];\
2254
    uint8_t * const halfH= ((uint8_t*)half);\
2255
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2256
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2257
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2258
}\
2259
static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2260
    uint64_t half[9];\
2261
    uint8_t * const halfH= ((uint8_t*)half);\
2262
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2263
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2264
}\
2265
static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2266
    OPNAME ## pixels16_mmx(dst, src, stride, 16);\
2267
}\
2268
\
2269
static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2270
    uint64_t temp[32];\
2271
    uint8_t * const half= (uint8_t*)temp;\
2272
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
2273
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
2274
}\
2275
\
2276
static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2277
    OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
2278
}\
2279
\
2280
static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2281
    uint64_t temp[32];\
2282
    uint8_t * const half= (uint8_t*)temp;\
2283
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
2284
    OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
2285
}\
2286
\
2287
static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2288
    uint64_t temp[32];\
2289
    uint8_t * const half= (uint8_t*)temp;\
2290
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
2291
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
2292
}\
2293
\
2294
static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2295
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
2296
}\
2297
\
2298
static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2299
    uint64_t temp[32];\
2300
    uint8_t * const half= (uint8_t*)temp;\
2301
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
2302
    OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
2303
}\
2304
static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2305
    uint64_t half[16*2 + 17*2];\
2306
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2307
    uint8_t * const halfHV= ((uint8_t*)half);\
2308
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2309
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2310
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2311
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2312
}\
2313
static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2314
    uint64_t half[16*2 + 17*2];\
2315
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2316
    uint8_t * const halfHV= ((uint8_t*)half);\
2317
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2318
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2319
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2320
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2321
}\
2322
static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2323
    uint64_t half[16*2 + 17*2];\
2324
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2325
    uint8_t * const halfHV= ((uint8_t*)half);\
2326
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2327
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2328
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2329
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2330
}\
2331
static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2332
    uint64_t half[16*2 + 17*2];\
2333
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2334
    uint8_t * const halfHV= ((uint8_t*)half);\
2335
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2336
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2337
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2338
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2339
}\
2340
static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2341
    uint64_t half[16*2 + 17*2];\
2342
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2343
    uint8_t * const halfHV= ((uint8_t*)half);\
2344
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2345
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2346
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2347
}\
2348
static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2349
    uint64_t half[16*2 + 17*2];\
2350
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2351
    uint8_t * const halfHV= ((uint8_t*)half);\
2352
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2353
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2354
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2355
}\
2356
static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2357
    uint64_t half[17*2];\
2358
    uint8_t * const halfH= ((uint8_t*)half);\
2359
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2360
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2361
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2362
}\
2363
static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2364
    uint64_t half[17*2];\
2365
    uint8_t * const halfH= ((uint8_t*)half);\
2366
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2367
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2368
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2369
}\
2370
static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2371
    uint64_t half[17*2];\
2372
    uint8_t * const halfH= ((uint8_t*)half);\
2373
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2374
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2375
}
2376

    
2377
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "        \n\t"
2378
#define AVG_3DNOW_OP(a,b,temp, size) \
2379
"mov" #size " " #b ", " #temp "   \n\t"\
2380
"pavgusb " #temp ", " #a "        \n\t"\
2381
"mov" #size " " #a ", " #b "      \n\t"
2382
#define AVG_MMX2_OP(a,b,temp, size) \
2383
"mov" #size " " #b ", " #temp "   \n\t"\
2384
"pavgb " #temp ", " #a "          \n\t"\
2385
"mov" #size " " #a ", " #b "      \n\t"
2386

    
2387
QPEL_BASE(put_       , ff_pw_16, _       , PUT_OP, PUT_OP)
2388
QPEL_BASE(avg_       , ff_pw_16, _       , AVG_MMX2_OP, AVG_3DNOW_OP)
2389
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
2390
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, 3dnow)
2391
QPEL_OP(avg_       , ff_pw_16, _       , AVG_3DNOW_OP, 3dnow)
2392
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
2393
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, mmx2)
2394
QPEL_OP(avg_       , ff_pw_16, _       , AVG_MMX2_OP, mmx2)
2395
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
2396

    
2397
#if 0
2398
static void just_return() { return; }
2399
#endif
2400

    
2401
#define SET_QPEL_FUNC(postfix1, postfix2) \
2402
    c->put_ ## postfix1 = put_ ## postfix2;\
2403
    c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
2404
    c->avg_ ## postfix1 = avg_ ## postfix2;
2405

    
2406
static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
2407
    long i=0;
2408

    
2409
    assert(ABS(scale) < 256);
2410
    scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
2411

    
2412
    asm volatile(
2413
        "pcmpeqw %%mm6, %%mm6           \n\t" // -1w
2414
        "psrlw $15, %%mm6               \n\t" //  1w
2415
        "pxor %%mm7, %%mm7              \n\t"
2416
        "movd  %4, %%mm5                \n\t"
2417
        "punpcklwd %%mm5, %%mm5         \n\t"
2418
        "punpcklwd %%mm5, %%mm5         \n\t"
2419
        "1:                             \n\t"
2420
        "movq  (%1, %0), %%mm0          \n\t"
2421
        "movq  8(%1, %0), %%mm1         \n\t"
2422
        "pmulhw %%mm5, %%mm0            \n\t"
2423
        "pmulhw %%mm5, %%mm1            \n\t"
2424
        "paddw %%mm6, %%mm0             \n\t"
2425
        "paddw %%mm6, %%mm1             \n\t"
2426
        "psraw $1, %%mm0                \n\t"
2427
        "psraw $1, %%mm1                \n\t"
2428
        "paddw (%2, %0), %%mm0          \n\t"
2429
        "paddw 8(%2, %0), %%mm1         \n\t"
2430
        "psraw $6, %%mm0                \n\t"
2431
        "psraw $6, %%mm1                \n\t"
2432
        "pmullw (%3, %0), %%mm0         \n\t"
2433
        "pmullw 8(%3, %0), %%mm1        \n\t"
2434
        "pmaddwd %%mm0, %%mm0           \n\t"
2435
        "pmaddwd %%mm1, %%mm1           \n\t"
2436
        "paddd %%mm1, %%mm0             \n\t"
2437
        "psrld $4, %%mm0                \n\t"
2438
        "paddd %%mm0, %%mm7             \n\t"
2439
        "add $16, %0                    \n\t"
2440
        "cmp $128, %0                   \n\t" //FIXME optimize & bench
2441
        " jb 1b                         \n\t"
2442
        "movq %%mm7, %%mm6              \n\t"
2443
        "psrlq $32, %%mm7               \n\t"
2444
        "paddd %%mm6, %%mm7             \n\t"
2445
        "psrld $2, %%mm7                \n\t"
2446
        "movd %%mm7, %0                 \n\t"
2447

    
2448
        : "+r" (i)
2449
        : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
2450
    );
2451
    return i;
2452
}
2453

    
2454
static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
2455
    long i=0;
2456

    
2457
    if(ABS(scale) < 256){
2458
        scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
2459
        asm volatile(
2460
                "pcmpeqw %%mm6, %%mm6   \n\t" // -1w
2461
                "psrlw $15, %%mm6       \n\t" //  1w
2462
                "movd  %3, %%mm5        \n\t"
2463
                "punpcklwd %%mm5, %%mm5 \n\t"
2464
                "punpcklwd %%mm5, %%mm5 \n\t"
2465
                "1:                     \n\t"
2466
                "movq  (%1, %0), %%mm0  \n\t"
2467
                "movq  8(%1, %0), %%mm1 \n\t"
2468
                "pmulhw %%mm5, %%mm0    \n\t"
2469
                "pmulhw %%mm5, %%mm1    \n\t"
2470
                "paddw %%mm6, %%mm0     \n\t"
2471
                "paddw %%mm6, %%mm1     \n\t"
2472
                "psraw $1, %%mm0        \n\t"
2473
                "psraw $1, %%mm1        \n\t"
2474
                "paddw (%2, %0), %%mm0  \n\t"
2475
                "paddw 8(%2, %0), %%mm1 \n\t"
2476
                "movq %%mm0, (%2, %0)   \n\t"
2477
                "movq %%mm1, 8(%2, %0)  \n\t"
2478
                "add $16, %0            \n\t"
2479
                "cmp $128, %0           \n\t" //FIXME optimize & bench
2480
                " jb 1b                 \n\t"
2481

    
2482
                : "+r" (i)
2483
                : "r"(basis), "r"(rem), "g"(scale)
2484
        );
2485
    }else{
2486
        for(i=0; i<8*8; i++){
2487
            rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
2488
        }
2489
    }
2490
}
2491

    
2492
#define PREFETCH(name, op) \
2493
void name(void *mem, int stride, int h){\
2494
    const uint8_t *p= mem;\
2495
    do{\
2496
        asm volatile(#op" %0" :: "m"(*p));\
2497
        p+= stride;\
2498
    }while(--h);\
2499
}
2500
PREFETCH(prefetch_mmx2,  prefetcht0)
2501
PREFETCH(prefetch_3dnow, prefetch)
2502
#undef PREFETCH
2503

    
2504
#include "h264dsp_mmx.c"
2505

    
2506
/* external functions, from idct_mmx.c */
2507
void ff_mmx_idct(DCTELEM *block);
2508
void ff_mmxext_idct(DCTELEM *block);
2509

    
2510
void ff_vp3_idct_sse2(int16_t *input_data);
2511
void ff_vp3_idct_mmx(int16_t *data);
2512
void ff_vp3_dsp_init_mmx(void);
2513

    
2514
/* XXX: those functions should be suppressed ASAP when all IDCTs are
2515
   converted */
2516
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
2517
{
2518
    ff_mmx_idct (block);
2519
    put_pixels_clamped_mmx(block, dest, line_size);
2520
}
2521
static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
2522
{
2523
    ff_mmx_idct (block);
2524
    add_pixels_clamped_mmx(block, dest, line_size);
2525
}
2526
static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
2527
{
2528
    ff_mmxext_idct (block);
2529
    put_pixels_clamped_mmx(block, dest, line_size);
2530
}
2531
static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
2532
{
2533
    ff_mmxext_idct (block);
2534
    add_pixels_clamped_mmx(block, dest, line_size);
2535
}
2536
static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
2537
{
2538
    ff_vp3_idct_sse2(block);
2539
    put_signed_pixels_clamped_mmx(block, dest, line_size);
2540
}
2541
static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
2542
{
2543
    ff_vp3_idct_sse2(block);
2544
    add_pixels_clamped_mmx(block, dest, line_size);
2545
}
2546
static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
2547
{
2548
    ff_vp3_idct_mmx(block);
2549
    put_signed_pixels_clamped_mmx(block, dest, line_size);
2550
}
2551
static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
2552
{
2553
    ff_vp3_idct_mmx(block);
2554
    add_pixels_clamped_mmx(block, dest, line_size);
2555
}
2556
#ifdef CONFIG_GPL
2557
static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
2558
{
2559
    ff_idct_xvid_mmx (block);
2560
    put_pixels_clamped_mmx(block, dest, line_size);
2561
}
2562
static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
2563
{
2564
    ff_idct_xvid_mmx (block);
2565
    add_pixels_clamped_mmx(block, dest, line_size);
2566
}
2567
static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
2568
{
2569
    ff_idct_xvid_mmx2 (block);
2570
    put_pixels_clamped_mmx(block, dest, line_size);
2571
}
2572
static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
2573
{
2574
    ff_idct_xvid_mmx2 (block);
2575
    add_pixels_clamped_mmx(block, dest, line_size);
2576
}
2577
#endif
2578

    
2579
#ifdef CONFIG_SNOW_ENCODER
2580
extern void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width);
2581
extern void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width);
2582
extern void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
2583
extern void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
2584
extern void ff_snow_inner_add_yblock_sse2(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2585
                           int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2586
extern void ff_snow_inner_add_yblock_mmx(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2587
                          int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2588
#endif
2589

    
2590
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2591
{
2592
    mm_flags = mm_support();
2593

    
2594
    if (avctx->dsp_mask) {
2595
        if (avctx->dsp_mask & FF_MM_FORCE)
2596
            mm_flags |= (avctx->dsp_mask & 0xffff);
2597
        else
2598
            mm_flags &= ~(avctx->dsp_mask & 0xffff);
2599
    }
2600

    
2601
#if 0
2602
    av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2603
    if (mm_flags & MM_MMX)
2604
        av_log(avctx, AV_LOG_INFO, " mmx");
2605
    if (mm_flags & MM_MMXEXT)
2606
        av_log(avctx, AV_LOG_INFO, " mmxext");
2607
    if (mm_flags & MM_3DNOW)
2608
        av_log(avctx, AV_LOG_INFO, " 3dnow");
2609
    if (mm_flags & MM_SSE)
2610
        av_log(avctx, AV_LOG_INFO, " sse");
2611
    if (mm_flags & MM_SSE2)
2612
        av_log(avctx, AV_LOG_INFO, " sse2");
2613
    av_log(avctx, AV_LOG_INFO, "\n");
2614
#endif
2615

    
2616
    if (mm_flags & MM_MMX) {
2617
        const int idct_algo= avctx->idct_algo;
2618

    
2619
#ifdef CONFIG_ENCODERS
2620
        const int dct_algo = avctx->dct_algo;
2621
        if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
2622
            if(mm_flags & MM_SSE2){
2623
                c->fdct = ff_fdct_sse2;
2624
            }else if(mm_flags & MM_MMXEXT){
2625
                c->fdct = ff_fdct_mmx2;
2626
            }else{
2627
                c->fdct = ff_fdct_mmx;
2628
            }
2629
        }
2630
#endif //CONFIG_ENCODERS
2631
        if(avctx->lowres==0){
2632
            if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2633
                c->idct_put= ff_simple_idct_put_mmx;
2634
                c->idct_add= ff_simple_idct_add_mmx;
2635
                c->idct    = ff_simple_idct_mmx;
2636
                c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2637
            }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2638
                if(mm_flags & MM_MMXEXT){
2639
                    c->idct_put= ff_libmpeg2mmx2_idct_put;
2640
                    c->idct_add= ff_libmpeg2mmx2_idct_add;
2641
                    c->idct    = ff_mmxext_idct;
2642
                }else{
2643
                    c->idct_put= ff_libmpeg2mmx_idct_put;
2644
                    c->idct_add= ff_libmpeg2mmx_idct_add;
2645
                    c->idct    = ff_mmx_idct;
2646
                }
2647
                c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2648
            }else if(idct_algo==FF_IDCT_VP3){
2649
                if(mm_flags & MM_SSE2){
2650
                    c->idct_put= ff_vp3_idct_put_sse2;
2651
                    c->idct_add= ff_vp3_idct_add_sse2;
2652
                    c->idct    = ff_vp3_idct_sse2;
2653
                    c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2654
                }else{
2655
                    ff_vp3_dsp_init_mmx();
2656
                    c->idct_put= ff_vp3_idct_put_mmx;
2657
                    c->idct_add= ff_vp3_idct_add_mmx;
2658
                    c->idct    = ff_vp3_idct_mmx;
2659
                    c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2660
                }
2661
#ifdef CONFIG_GPL
2662
            }else if(idct_algo==FF_IDCT_XVIDMMX){
2663
                if(mm_flags & MM_MMXEXT){
2664
                    c->idct_put= ff_idct_xvid_mmx2_put;
2665
                    c->idct_add= ff_idct_xvid_mmx2_add;
2666
                    c->idct    = ff_idct_xvid_mmx2;
2667
                }else{
2668
                    c->idct_put= ff_idct_xvid_mmx_put;
2669
                    c->idct_add= ff_idct_xvid_mmx_add;
2670
                    c->idct    = ff_idct_xvid_mmx;
2671
                }
2672
#endif
2673
            }
2674
        }
2675

    
2676
#ifdef CONFIG_ENCODERS
2677
        c->get_pixels = get_pixels_mmx;
2678
        c->diff_pixels = diff_pixels_mmx;
2679
#endif //CONFIG_ENCODERS
2680
        c->put_pixels_clamped = put_pixels_clamped_mmx;
2681
        c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2682
        c->add_pixels_clamped = add_pixels_clamped_mmx;
2683
        c->clear_blocks = clear_blocks_mmx;
2684
#ifdef CONFIG_ENCODERS
2685
        c->pix_sum = pix_sum16_mmx;
2686
#endif //CONFIG_ENCODERS
2687

    
2688
        c->put_pixels_tab[0][0] = put_pixels16_mmx;
2689
        c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
2690
        c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
2691
        c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
2692

    
2693
        c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
2694
        c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
2695
        c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
2696
        c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
2697

    
2698
        c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
2699
        c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
2700
        c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
2701
        c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
2702

    
2703
        c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
2704
        c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
2705
        c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
2706
        c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
2707

    
2708
        c->put_pixels_tab[1][0] = put_pixels8_mmx;
2709
        c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
2710
        c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
2711
        c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
2712

    
2713
        c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
2714
        c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
2715
        c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
2716
        c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
2717

    
2718
        c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
2719
        c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
2720
        c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
2721
        c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
2722

    
2723
        c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
2724
        c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
2725
        c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
2726
        c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
2727

    
2728
        c->add_bytes= add_bytes_mmx;
2729
#ifdef CONFIG_ENCODERS
2730
        c->diff_bytes= diff_bytes_mmx;
2731

    
2732
        c->hadamard8_diff[0]= hadamard8_diff16_mmx;
2733
        c->hadamard8_diff[1]= hadamard8_diff_mmx;
2734

    
2735
        c->pix_norm1 = pix_norm1_mmx;
2736
        c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx;
2737
          c->sse[1] = sse8_mmx;
2738
        c->vsad[4]= vsad_intra16_mmx;
2739

    
2740
        c->nsse[0] = nsse16_mmx;
2741
        c->nsse[1] = nsse8_mmx;
2742
        if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2743
            c->vsad[0] = vsad16_mmx;
2744
        }
2745

    
2746
        if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2747
            c->try_8x8basis= try_8x8basis_mmx;
2748
        }
2749
        c->add_8x8basis= add_8x8basis_mmx;
2750

    
2751
#endif //CONFIG_ENCODERS
2752

    
2753
        c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2754
        c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2755
        c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
2756
        c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
2757

    
2758
        c->h264_idct_dc_add=
2759
        c->h264_idct_add= ff_h264_idct_add_mmx;
2760
        c->h264_idct8_dc_add=
2761
        c->h264_idct8_add= ff_h264_idct8_add_mmx;
2762

    
2763
        if (mm_flags & MM_MMXEXT) {
2764
            c->prefetch = prefetch_mmx2;
2765

    
2766
            c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2767
            c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2768

    
2769
            c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2770
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2771
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2772

    
2773
            c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2774
            c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2775

    
2776
            c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2777
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2778
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2779

    
2780
#ifdef CONFIG_ENCODERS
2781
            c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
2782
            c->hadamard8_diff[1]= hadamard8_diff_mmx2;
2783
            c->vsad[4]= vsad_intra16_mmx2;
2784
#endif //CONFIG_ENCODERS
2785

    
2786
            c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
2787
            c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
2788

    
2789
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2790
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2791
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2792
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2793
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2794
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2795
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2796
#ifdef CONFIG_ENCODERS
2797
                c->vsad[0] = vsad16_mmx2;
2798
#endif //CONFIG_ENCODERS
2799
            }
2800

    
2801
#if 1
2802
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
2803
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
2804
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
2805
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
2806
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
2807
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
2808
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
2809
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
2810
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
2811
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
2812
            SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
2813
            SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
2814
            SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
2815
            SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
2816
            SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
2817
            SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
2818
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
2819
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
2820
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
2821
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
2822
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
2823
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
2824
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
2825
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
2826
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
2827
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
2828
            SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
2829
            SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
2830
            SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
2831
            SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
2832
            SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
2833
            SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
2834
#endif
2835

    
2836
//FIXME 3dnow too
2837
#define dspfunc(PFX, IDX, NUM) \
2838
    c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \
2839
    c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \
2840
    c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \
2841
    c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \
2842
    c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \
2843
    c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \
2844
    c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \
2845
    c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \
2846
    c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \
2847
    c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \
2848
    c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \
2849
    c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \
2850
    c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \
2851
    c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \
2852
    c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \
2853
    c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2
2854

    
2855
            dspfunc(put_h264_qpel, 0, 16);
2856
            dspfunc(put_h264_qpel, 1, 8);
2857
            dspfunc(put_h264_qpel, 2, 4);
2858
            dspfunc(avg_h264_qpel, 0, 16);
2859
            dspfunc(avg_h264_qpel, 1, 8);
2860
            dspfunc(avg_h264_qpel, 2, 4);
2861
#undef dspfunc
2862

    
2863
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
2864
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
2865
            c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
2866
            c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
2867
            c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
2868
            c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
2869
            c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
2870
            c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
2871
            c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
2872
            c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
2873

    
2874
            c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
2875
            c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
2876
            c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
2877
            c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
2878
            c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
2879
            c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
2880
            c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
2881
            c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
2882

    
2883
            c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
2884
            c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
2885
            c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
2886
            c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
2887
            c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
2888
            c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
2889
            c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
2890
            c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
2891

    
2892
#ifdef CONFIG_ENCODERS
2893
            c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
2894
#endif //CONFIG_ENCODERS
2895
        } else if (mm_flags & MM_3DNOW) {
2896
            c->prefetch = prefetch_3dnow;
2897

    
2898
            c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2899
            c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2900

    
2901
            c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2902
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2903
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2904

    
2905
            c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2906
            c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2907

    
2908
            c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2909
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2910
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2911

    
2912
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2913
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2914
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2915
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2916
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2917
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2918
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2919
            }
2920

    
2921
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
2922
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
2923
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
2924
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
2925
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
2926
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
2927
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
2928
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
2929
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
2930
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
2931
            SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
2932
            SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
2933
            SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
2934
            SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
2935
            SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
2936
            SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
2937
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
2938
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
2939
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
2940
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
2941
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
2942
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
2943
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
2944
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
2945
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
2946
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
2947
            SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
2948
            SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
2949
            SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
2950
            SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
2951
            SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
2952
            SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
2953

    
2954
#define dspfunc(PFX, IDX, NUM) \
2955
    c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \
2956
    c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \
2957
    c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \
2958
    c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \
2959
    c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \
2960
    c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \
2961
    c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \
2962
    c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \
2963
    c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \
2964
    c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \
2965
    c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \
2966
    c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \
2967
    c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \
2968
    c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \
2969
    c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \
2970
    c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow
2971

    
2972
            dspfunc(put_h264_qpel, 0, 16);
2973
            dspfunc(put_h264_qpel, 1, 8);
2974
            dspfunc(put_h264_qpel, 2, 4);
2975
            dspfunc(avg_h264_qpel, 0, 16);
2976
            dspfunc(avg_h264_qpel, 1, 8);
2977
            dspfunc(avg_h264_qpel, 2, 4);
2978

    
2979
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
2980
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
2981
        }
2982

    
2983
#ifdef CONFIG_SNOW_ENCODER
2984
        if(mm_flags & MM_SSE2){
2985
            c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
2986
            c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
2987
            c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
2988
        }
2989
        else{
2990
            c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
2991
            c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
2992
            c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
2993
        }
2994
#endif
2995
    }
2996

    
2997
#ifdef CONFIG_ENCODERS
2998
    dsputil_init_pix_mmx(c, avctx);
2999
#endif //CONFIG_ENCODERS
3000
#if 0
3001
    // for speed testing
3002
    get_pixels = just_return;
3003
    put_pixels_clamped = just_return;
3004
    add_pixels_clamped = just_return;
3005

3006
    pix_abs16x16 = just_return;
3007
    pix_abs16x16_x2 = just_return;
3008
    pix_abs16x16_y2 = just_return;
3009
    pix_abs16x16_xy2 = just_return;
3010

3011
    put_pixels_tab[0] = just_return;
3012
    put_pixels_tab[1] = just_return;
3013
    put_pixels_tab[2] = just_return;
3014
    put_pixels_tab[3] = just_return;
3015

3016
    put_no_rnd_pixels_tab[0] = just_return;
3017
    put_no_rnd_pixels_tab[1] = just_return;
3018
    put_no_rnd_pixels_tab[2] = just_return;
3019
    put_no_rnd_pixels_tab[3] = just_return;
3020

3021
    avg_pixels_tab[0] = just_return;
3022
    avg_pixels_tab[1] = just_return;
3023
    avg_pixels_tab[2] = just_return;
3024
    avg_pixels_tab[3] = just_return;
3025

3026
    avg_no_rnd_pixels_tab[0] = just_return;
3027
    avg_no_rnd_pixels_tab[1] = just_return;
3028
    avg_no_rnd_pixels_tab[2] = just_return;
3029
    avg_no_rnd_pixels_tab[3] = just_return;
3030

3031
    //av_fdct = just_return;
3032
    //ff_idct = just_return;
3033
#endif
3034
}