Statistics
| Branch: | Revision:

ffmpeg / libavcodec / i386 / dsputil_mmx.c @ 8f2ab833

History | View | Annotate | Download (82.4 KB)

1
/*
2
 * MMX optimized DSP utils
3
 * Copyright (c) 2000, 2001 Fabrice Bellard.
4
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 *
20
 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
21
 */
22

    
23
#include "../dsputil.h"
24
#include "../simple_idct.h"
25

    
26
//#undef NDEBUG
27
//#include <assert.h>
28

    
29
extern const uint8_t ff_h263_loop_filter_strength[32];
30

    
31
int mm_flags; /* multimedia extension flags */
32

    
33
/* pixel operations */
34
static const uint64_t mm_bone __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
35
static const uint64_t mm_wone __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
36
static const uint64_t mm_wtwo __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
37

    
38
static const uint64_t ff_pw_20 __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
39
static const uint64_t ff_pw_3  __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
40
static const uint64_t ff_pw_16 __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
41
static const uint64_t ff_pw_15 __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
42

    
43
static const uint64_t ff_pb_FC __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
44

    
45
#define JUMPALIGN() __asm __volatile (".balign 8"::)
46
#define MOVQ_ZERO(regd)  __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
47

    
48
#define MOVQ_WONE(regd) \
49
    __asm __volatile ( \
50
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
51
    "psrlw $15, %%" #regd ::)
52

    
53
#define MOVQ_BFE(regd) \
54
    __asm __volatile ( \
55
    "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
56
    "paddb %%" #regd ", %%" #regd " \n\t" ::)
57

    
58
#ifndef PIC
59
#define MOVQ_BONE(regd)  __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
60
#define MOVQ_WTWO(regd)  __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
61
#else
62
// for shared library it's better to use this way for accessing constants
63
// pcmpeqd -> -1
64
#define MOVQ_BONE(regd) \
65
    __asm __volatile ( \
66
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
67
    "psrlw $15, %%" #regd " \n\t" \
68
    "packuswb %%" #regd ", %%" #regd " \n\t" ::)
69

    
70
#define MOVQ_WTWO(regd) \
71
    __asm __volatile ( \
72
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
73
    "psrlw $15, %%" #regd " \n\t" \
74
    "psllw $1, %%" #regd " \n\t"::)
75

    
76
#endif
77

    
78
// using regr as temporary and for the output result
79
// first argument is unmodifed and second is trashed
80
// regfe is supposed to contain 0xfefefefefefefefe
81
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
82
    "movq " #rega ", " #regr "        \n\t"\
83
    "pand " #regb ", " #regr "        \n\t"\
84
    "pxor " #rega ", " #regb "        \n\t"\
85
    "pand " #regfe "," #regb "        \n\t"\
86
    "psrlq $1, " #regb "         \n\t"\
87
    "paddb " #regb ", " #regr "        \n\t"
88

    
89
#define PAVGB_MMX(rega, regb, regr, regfe) \
90
    "movq " #rega ", " #regr "        \n\t"\
91
    "por  " #regb ", " #regr "        \n\t"\
92
    "pxor " #rega ", " #regb "        \n\t"\
93
    "pand " #regfe "," #regb "        \n\t"\
94
    "psrlq $1, " #regb "        \n\t"\
95
    "psubb " #regb ", " #regr "        \n\t"
96

    
97
// mm6 is supposed to contain 0xfefefefefefefefe
98
#define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp) \
99
    "movq " #rega ", " #regr "        \n\t"\
100
    "movq " #regc ", " #regp "        \n\t"\
101
    "pand " #regb ", " #regr "        \n\t"\
102
    "pand " #regd ", " #regp "        \n\t"\
103
    "pxor " #rega ", " #regb "        \n\t"\
104
    "pxor " #regc ", " #regd "        \n\t"\
105
    "pand %%mm6, " #regb "        \n\t"\
106
    "pand %%mm6, " #regd "        \n\t"\
107
    "psrlq $1, " #regb "         \n\t"\
108
    "psrlq $1, " #regd "         \n\t"\
109
    "paddb " #regb ", " #regr "        \n\t"\
110
    "paddb " #regd ", " #regp "        \n\t"
111

    
112
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
113
    "movq " #rega ", " #regr "        \n\t"\
114
    "movq " #regc ", " #regp "        \n\t"\
115
    "por  " #regb ", " #regr "        \n\t"\
116
    "por  " #regd ", " #regp "        \n\t"\
117
    "pxor " #rega ", " #regb "        \n\t"\
118
    "pxor " #regc ", " #regd "        \n\t"\
119
    "pand %%mm6, " #regb "             \n\t"\
120
    "pand %%mm6, " #regd "             \n\t"\
121
    "psrlq $1, " #regd "        \n\t"\
122
    "psrlq $1, " #regb "        \n\t"\
123
    "psubb " #regb ", " #regr "        \n\t"\
124
    "psubb " #regd ", " #regp "        \n\t"
125

    
126
/***********************************/
127
/* MMX no rounding */
128
#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
129
#define SET_RND  MOVQ_WONE
130
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
131
#define PAVGB(a, b, c, e)                PAVGB_MMX_NO_RND(a, b, c, e)
132

    
133
#include "dsputil_mmx_rnd.h"
134

    
135
#undef DEF
136
#undef SET_RND
137
#undef PAVGBP
138
#undef PAVGB
139
/***********************************/
140
/* MMX rounding */
141

    
142
#define DEF(x, y) x ## _ ## y ##_mmx
143
#define SET_RND  MOVQ_WTWO
144
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
145
#define PAVGB(a, b, c, e)                PAVGB_MMX(a, b, c, e)
146

    
147
#include "dsputil_mmx_rnd.h"
148

    
149
#undef DEF
150
#undef SET_RND
151
#undef PAVGBP
152
#undef PAVGB
153

    
154
/***********************************/
155
/* 3Dnow specific */
156

    
157
#define DEF(x) x ## _3dnow
158
/* for Athlons PAVGUSB is prefered */
159
#define PAVGB "pavgusb"
160

    
161
#include "dsputil_mmx_avg.h"
162

    
163
#undef DEF
164
#undef PAVGB
165

    
166
/***********************************/
167
/* MMX2 specific */
168

    
169
#define DEF(x) x ## _mmx2
170

    
171
/* Introduced only in MMX2 set */
172
#define PAVGB "pavgb"
173

    
174
#include "dsputil_mmx_avg.h"
175

    
176
#undef DEF
177
#undef PAVGB
178

    
179
/***********************************/
180
/* standard MMX */
181

    
182
#ifdef CONFIG_ENCODERS
183
static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
184
{
185
    asm volatile(
186
        "movl $-128, %%eax        \n\t"
187
        "pxor %%mm7, %%mm7        \n\t"
188
        ".balign 16                \n\t"
189
        "1:                        \n\t"
190
        "movq (%0), %%mm0        \n\t"
191
        "movq (%0, %2), %%mm2        \n\t"
192
        "movq %%mm0, %%mm1        \n\t"
193
        "movq %%mm2, %%mm3        \n\t"
194
        "punpcklbw %%mm7, %%mm0        \n\t"
195
        "punpckhbw %%mm7, %%mm1        \n\t"
196
        "punpcklbw %%mm7, %%mm2        \n\t"
197
        "punpckhbw %%mm7, %%mm3        \n\t"
198
        "movq %%mm0, (%1, %%eax)\n\t"
199
        "movq %%mm1, 8(%1, %%eax)\n\t"
200
        "movq %%mm2, 16(%1, %%eax)\n\t"
201
        "movq %%mm3, 24(%1, %%eax)\n\t"
202
        "addl %3, %0                \n\t"
203
        "addl $32, %%eax        \n\t"
204
        "js 1b                        \n\t"
205
        : "+r" (pixels)
206
        : "r" (block+64), "r" (line_size), "r" (line_size*2)
207
        : "%eax"
208
    );
209
}
210

    
211
static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
212
{
213
    asm volatile(
214
        "pxor %%mm7, %%mm7        \n\t"
215
        "movl $-128, %%eax        \n\t"
216
        ".balign 16                \n\t"
217
        "1:                        \n\t"
218
        "movq (%0), %%mm0        \n\t"
219
        "movq (%1), %%mm2        \n\t"
220
        "movq %%mm0, %%mm1        \n\t"
221
        "movq %%mm2, %%mm3        \n\t"
222
        "punpcklbw %%mm7, %%mm0        \n\t"
223
        "punpckhbw %%mm7, %%mm1        \n\t"
224
        "punpcklbw %%mm7, %%mm2        \n\t"
225
        "punpckhbw %%mm7, %%mm3        \n\t"
226
        "psubw %%mm2, %%mm0        \n\t"
227
        "psubw %%mm3, %%mm1        \n\t"
228
        "movq %%mm0, (%2, %%eax)\n\t"
229
        "movq %%mm1, 8(%2, %%eax)\n\t"
230
        "addl %3, %0                \n\t"
231
        "addl %3, %1                \n\t"
232
        "addl $16, %%eax        \n\t"
233
        "jnz 1b                        \n\t"
234
        : "+r" (s1), "+r" (s2)
235
        : "r" (block+64), "r" (stride)
236
        : "%eax"
237
    );
238
}
239
#endif //CONFIG_ENCODERS
240

    
241
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
242
{
243
    const DCTELEM *p;
244
    uint8_t *pix;
245

    
246
    /* read the pixels */
247
    p = block;
248
    pix = pixels;
249
    /* unrolled loop */
250
        __asm __volatile(
251
                "movq        %3, %%mm0\n\t"
252
                "movq        8%3, %%mm1\n\t"
253
                "movq        16%3, %%mm2\n\t"
254
                "movq        24%3, %%mm3\n\t"
255
                "movq        32%3, %%mm4\n\t"
256
                "movq        40%3, %%mm5\n\t"
257
                "movq        48%3, %%mm6\n\t"
258
                "movq        56%3, %%mm7\n\t"
259
                "packuswb %%mm1, %%mm0\n\t"
260
                "packuswb %%mm3, %%mm2\n\t"
261
                "packuswb %%mm5, %%mm4\n\t"
262
                "packuswb %%mm7, %%mm6\n\t"
263
                "movq        %%mm0, (%0)\n\t"
264
                "movq        %%mm2, (%0, %1)\n\t"
265
                "movq        %%mm4, (%0, %1, 2)\n\t"
266
                "movq        %%mm6, (%0, %2)\n\t"
267
                ::"r" (pix), "r" (line_size), "r" (line_size*3), "m"(*p)
268
                :"memory");
269
        pix += line_size*4;
270
        p += 32;
271

    
272
    // if here would be an exact copy of the code above
273
    // compiler would generate some very strange code
274
    // thus using "r"
275
    __asm __volatile(
276
            "movq        (%3), %%mm0\n\t"
277
            "movq        8(%3), %%mm1\n\t"
278
            "movq        16(%3), %%mm2\n\t"
279
            "movq        24(%3), %%mm3\n\t"
280
            "movq        32(%3), %%mm4\n\t"
281
            "movq        40(%3), %%mm5\n\t"
282
            "movq        48(%3), %%mm6\n\t"
283
            "movq        56(%3), %%mm7\n\t"
284
            "packuswb %%mm1, %%mm0\n\t"
285
            "packuswb %%mm3, %%mm2\n\t"
286
            "packuswb %%mm5, %%mm4\n\t"
287
            "packuswb %%mm7, %%mm6\n\t"
288
            "movq        %%mm0, (%0)\n\t"
289
            "movq        %%mm2, (%0, %1)\n\t"
290
            "movq        %%mm4, (%0, %1, 2)\n\t"
291
            "movq        %%mm6, (%0, %2)\n\t"
292
            ::"r" (pix), "r" (line_size), "r" (line_size*3), "r"(p)
293
            :"memory");
294
}
295

    
296
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
297
{
298
    const DCTELEM *p;
299
    uint8_t *pix;
300
    int i;
301

    
302
    /* read the pixels */
303
    p = block;
304
    pix = pixels;
305
    MOVQ_ZERO(mm7);
306
    i = 4;
307
    do {
308
        __asm __volatile(
309
                "movq        (%2), %%mm0\n\t"
310
                "movq        8(%2), %%mm1\n\t"
311
                "movq        16(%2), %%mm2\n\t"
312
                "movq        24(%2), %%mm3\n\t"
313
                "movq        %0, %%mm4\n\t"
314
                "movq        %1, %%mm6\n\t"
315
                "movq        %%mm4, %%mm5\n\t"
316
                "punpcklbw %%mm7, %%mm4\n\t"
317
                "punpckhbw %%mm7, %%mm5\n\t"
318
                "paddsw        %%mm4, %%mm0\n\t"
319
                "paddsw        %%mm5, %%mm1\n\t"
320
                "movq        %%mm6, %%mm5\n\t"
321
                "punpcklbw %%mm7, %%mm6\n\t"
322
                "punpckhbw %%mm7, %%mm5\n\t"
323
                "paddsw        %%mm6, %%mm2\n\t"
324
                "paddsw        %%mm5, %%mm3\n\t"
325
                "packuswb %%mm1, %%mm0\n\t"
326
                "packuswb %%mm3, %%mm2\n\t"
327
                "movq        %%mm0, %0\n\t"
328
                "movq        %%mm2, %1\n\t"
329
                :"+m"(*pix), "+m"(*(pix+line_size))
330
                :"r"(p)
331
                :"memory");
332
        pix += line_size*2;
333
        p += 16;
334
    } while (--i);
335
}
336

    
337
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
338
{
339
    __asm __volatile(
340
         "lea (%3, %3), %%eax                \n\t"
341
         ".balign 8                        \n\t"
342
         "1:                                \n\t"
343
         "movq (%1), %%mm0                \n\t"
344
         "movq (%1, %3), %%mm1                \n\t"
345
              "movq %%mm0, (%2)                \n\t"
346
         "movq %%mm1, (%2, %3)                \n\t"
347
         "addl %%eax, %1                \n\t"
348
         "addl %%eax, %2                       \n\t"
349
         "movq (%1), %%mm0                \n\t"
350
         "movq (%1, %3), %%mm1                \n\t"
351
         "movq %%mm0, (%2)                \n\t"
352
         "movq %%mm1, (%2, %3)                \n\t"
353
         "addl %%eax, %1                \n\t"
354
         "addl %%eax, %2                       \n\t"
355
         "subl $4, %0                        \n\t"
356
         "jnz 1b                        \n\t"
357
         : "+g"(h), "+r" (pixels),  "+r" (block)
358
         : "r"(line_size)
359
         : "%eax", "memory"
360
        );
361
}
362

    
363
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
364
{
365
    __asm __volatile(
366
         "lea (%3, %3), %%eax                \n\t"
367
         ".balign 8                        \n\t"
368
         "1:                                \n\t"
369
         "movq (%1), %%mm0                \n\t"
370
         "movq 8(%1), %%mm4                \n\t"
371
         "movq (%1, %3), %%mm1                \n\t"
372
         "movq 8(%1, %3), %%mm5                \n\t"
373
              "movq %%mm0, (%2)                \n\t"
374
              "movq %%mm4, 8(%2)                \n\t"
375
         "movq %%mm1, (%2, %3)                \n\t"
376
         "movq %%mm5, 8(%2, %3)                \n\t"
377
         "addl %%eax, %1                \n\t"
378
         "addl %%eax, %2                       \n\t"
379
         "movq (%1), %%mm0                \n\t"
380
         "movq 8(%1), %%mm4                \n\t"
381
         "movq (%1, %3), %%mm1                \n\t"
382
         "movq 8(%1, %3), %%mm5                \n\t"
383
         "movq %%mm0, (%2)                \n\t"
384
         "movq %%mm4, 8(%2)                \n\t"
385
         "movq %%mm1, (%2, %3)                \n\t"
386
         "movq %%mm5, 8(%2, %3)                \n\t"
387
         "addl %%eax, %1                \n\t"
388
         "addl %%eax, %2                       \n\t"
389
         "subl $4, %0                        \n\t"
390
         "jnz 1b                        \n\t"
391
         : "+g"(h), "+r" (pixels),  "+r" (block)
392
         : "r"(line_size)
393
         : "%eax", "memory"
394
        );
395
}
396

    
397
static void clear_blocks_mmx(DCTELEM *blocks)
398
{
399
    __asm __volatile(
400
                "pxor %%mm7, %%mm7                \n\t"
401
                "movl $-128*6, %%eax                \n\t"
402
                "1:                                \n\t"
403
                "movq %%mm7, (%0, %%eax)        \n\t"
404
                "movq %%mm7, 8(%0, %%eax)        \n\t"
405
                "movq %%mm7, 16(%0, %%eax)        \n\t"
406
                "movq %%mm7, 24(%0, %%eax)        \n\t"
407
                "addl $32, %%eax                \n\t"
408
                " js 1b                                \n\t"
409
                : : "r" (((int)blocks)+128*6)
410
                : "%eax"
411
        );
412
}
413

    
414
#ifdef CONFIG_ENCODERS
415
static int pix_sum16_mmx(uint8_t * pix, int line_size){
416
    const int h=16;
417
    int sum;
418
    int index= -line_size*h;
419

    
420
    __asm __volatile(
421
                "pxor %%mm7, %%mm7                \n\t"
422
                "pxor %%mm6, %%mm6                \n\t"
423
                "1:                                \n\t"
424
                "movq (%2, %1), %%mm0                \n\t"
425
                "movq (%2, %1), %%mm1                \n\t"
426
                "movq 8(%2, %1), %%mm2                \n\t"
427
                "movq 8(%2, %1), %%mm3                \n\t"
428
                "punpcklbw %%mm7, %%mm0                \n\t"
429
                "punpckhbw %%mm7, %%mm1                \n\t"
430
                "punpcklbw %%mm7, %%mm2                \n\t"
431
                "punpckhbw %%mm7, %%mm3                \n\t"
432
                "paddw %%mm0, %%mm1                \n\t"
433
                "paddw %%mm2, %%mm3                \n\t"
434
                "paddw %%mm1, %%mm3                \n\t"
435
                "paddw %%mm3, %%mm6                \n\t"
436
                "addl %3, %1                        \n\t"
437
                " js 1b                                \n\t"
438
                "movq %%mm6, %%mm5                \n\t"
439
                "psrlq $32, %%mm6                \n\t"
440
                "paddw %%mm5, %%mm6                \n\t"
441
                "movq %%mm6, %%mm5                \n\t"
442
                "psrlq $16, %%mm6                \n\t"
443
                "paddw %%mm5, %%mm6                \n\t"
444
                "movd %%mm6, %0                        \n\t"
445
                "andl $0xFFFF, %0                \n\t"
446
                : "=&r" (sum), "+r" (index)
447
                : "r" (pix - index), "r" (line_size)
448
        );
449

    
450
        return sum;
451
}
452
#endif //CONFIG_ENCODERS
453

    
454
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
455
    int i=0;
456
    asm volatile(
457
        "1:                                \n\t"
458
        "movq  (%1, %0), %%mm0                \n\t"
459
        "movq  (%2, %0), %%mm1                \n\t"
460
        "paddb %%mm0, %%mm1                \n\t"
461
        "movq %%mm1, (%2, %0)                \n\t"
462
        "movq 8(%1, %0), %%mm0                \n\t"
463
        "movq 8(%2, %0), %%mm1                \n\t"
464
        "paddb %%mm0, %%mm1                \n\t"
465
        "movq %%mm1, 8(%2, %0)                \n\t"
466
        "addl $16, %0                        \n\t"
467
        "cmpl %3, %0                        \n\t"
468
        " jb 1b                                \n\t"
469
        : "+r" (i)
470
        : "r"(src), "r"(dst), "r"(w-15)
471
    );
472
    for(; i<w; i++)
473
        dst[i+0] += src[i+0];
474
}
475

    
476
#define H263_LOOP_FILTER \
477
        "pxor %%mm7, %%mm7                \n\t"\
478
        "movq  %0, %%mm0                \n\t"\
479
        "movq  %0, %%mm1                \n\t"\
480
        "movq  %3, %%mm2                \n\t"\
481
        "movq  %3, %%mm3                \n\t"\
482
        "punpcklbw %%mm7, %%mm0                \n\t"\
483
        "punpckhbw %%mm7, %%mm1                \n\t"\
484
        "punpcklbw %%mm7, %%mm2                \n\t"\
485
        "punpckhbw %%mm7, %%mm3                \n\t"\
486
        "psubw %%mm2, %%mm0                \n\t"\
487
        "psubw %%mm3, %%mm1                \n\t"\
488
        "movq  %1, %%mm2                \n\t"\
489
        "movq  %1, %%mm3                \n\t"\
490
        "movq  %2, %%mm4                \n\t"\
491
        "movq  %2, %%mm5                \n\t"\
492
        "punpcklbw %%mm7, %%mm2                \n\t"\
493
        "punpckhbw %%mm7, %%mm3                \n\t"\
494
        "punpcklbw %%mm7, %%mm4                \n\t"\
495
        "punpckhbw %%mm7, %%mm5                \n\t"\
496
        "psubw %%mm2, %%mm4                \n\t"\
497
        "psubw %%mm3, %%mm5                \n\t"\
498
        "psllw $2, %%mm4                \n\t"\
499
        "psllw $2, %%mm5                \n\t"\
500
        "paddw %%mm0, %%mm4                \n\t"\
501
        "paddw %%mm1, %%mm5                \n\t"\
502
        "pxor %%mm6, %%mm6                \n\t"\
503
        "pcmpgtw %%mm4, %%mm6                \n\t"\
504
        "pcmpgtw %%mm5, %%mm7                \n\t"\
505
        "pxor %%mm6, %%mm4                \n\t"\
506
        "pxor %%mm7, %%mm5                \n\t"\
507
        "psubw %%mm6, %%mm4                \n\t"\
508
        "psubw %%mm7, %%mm5                \n\t"\
509
        "psrlw $3, %%mm4                \n\t"\
510
        "psrlw $3, %%mm5                \n\t"\
511
        "packuswb %%mm5, %%mm4                \n\t"\
512
        "packsswb %%mm7, %%mm6                \n\t"\
513
        "pxor %%mm7, %%mm7                \n\t"\
514
        "movd %4, %%mm2                        \n\t"\
515
        "punpcklbw %%mm2, %%mm2                \n\t"\
516
        "punpcklbw %%mm2, %%mm2                \n\t"\
517
        "punpcklbw %%mm2, %%mm2                \n\t"\
518
        "psubusb %%mm4, %%mm2                \n\t"\
519
        "movq %%mm2, %%mm3                \n\t"\
520
        "psubusb %%mm4, %%mm3                \n\t"\
521
        "psubb %%mm3, %%mm2                \n\t"\
522
        "movq %1, %%mm3                        \n\t"\
523
        "movq %2, %%mm4                        \n\t"\
524
        "pxor %%mm6, %%mm3                \n\t"\
525
        "pxor %%mm6, %%mm4                \n\t"\
526
        "paddusb %%mm2, %%mm3                \n\t"\
527
        "psubusb %%mm2, %%mm4                \n\t"\
528
        "pxor %%mm6, %%mm3                \n\t"\
529
        "pxor %%mm6, %%mm4                \n\t"\
530
        "paddusb %%mm2, %%mm2                \n\t"\
531
        "packsswb %%mm1, %%mm0                \n\t"\
532
        "pcmpgtb %%mm0, %%mm7                \n\t"\
533
        "pxor %%mm7, %%mm0                \n\t"\
534
        "psubb %%mm7, %%mm0                \n\t"\
535
        "movq %%mm0, %%mm1                \n\t"\
536
        "psubusb %%mm2, %%mm0                \n\t"\
537
        "psubb %%mm0, %%mm1                \n\t"\
538
        "pand %5, %%mm1                        \n\t"\
539
        "psrlw $2, %%mm1                \n\t"\
540
        "pxor %%mm7, %%mm1                \n\t"\
541
        "psubb %%mm7, %%mm1                \n\t"\
542
        "movq %0, %%mm5                        \n\t"\
543
        "movq %3, %%mm6                        \n\t"\
544
        "psubb %%mm1, %%mm5                \n\t"\
545
        "paddb %%mm1, %%mm6                \n\t"
546

    
547
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
548
    const int strength= ff_h263_loop_filter_strength[qscale];
549

    
550
    asm volatile(
551
    
552
        H263_LOOP_FILTER
553
        
554
        "movq %%mm3, %1                        \n\t"
555
        "movq %%mm4, %2                        \n\t"
556
        "movq %%mm5, %0                        \n\t"
557
        "movq %%mm6, %3                        \n\t"
558
        : "+m" (*(uint64_t*)(src - 2*stride)),
559
          "+m" (*(uint64_t*)(src - 1*stride)),
560
          "+m" (*(uint64_t*)(src + 0*stride)),
561
          "+m" (*(uint64_t*)(src + 1*stride))
562
        : "g" (2*strength), "m"(ff_pb_FC)
563
    );
564
}
565

    
566
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
567
    asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
568
        "movd  %4, %%mm0                \n\t"
569
        "movd  %5, %%mm1                \n\t"
570
        "movd  %6, %%mm2                \n\t"
571
        "movd  %7, %%mm3                \n\t"
572
        "punpcklbw %%mm1, %%mm0                \n\t"
573
        "punpcklbw %%mm3, %%mm2                \n\t"
574
        "movq %%mm0, %%mm1                \n\t"
575
        "punpcklwd %%mm2, %%mm0                \n\t"
576
        "punpckhwd %%mm2, %%mm1                \n\t"
577
        "movd  %%mm0, %0                \n\t"
578
        "punpckhdq %%mm0, %%mm0                \n\t"
579
        "movd  %%mm0, %1                \n\t"
580
        "movd  %%mm1, %2                \n\t"
581
        "punpckhdq %%mm1, %%mm1                \n\t"
582
        "movd  %%mm1, %3                \n\t"
583
        
584
        : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
585
          "=m" (*(uint32_t*)(dst + 1*dst_stride)),
586
          "=m" (*(uint32_t*)(dst + 2*dst_stride)),
587
          "=m" (*(uint32_t*)(dst + 3*dst_stride))
588
        :  "m" (*(uint32_t*)(src + 0*src_stride)),
589
           "m" (*(uint32_t*)(src + 1*src_stride)),
590
           "m" (*(uint32_t*)(src + 2*src_stride)),
591
           "m" (*(uint32_t*)(src + 3*src_stride))
592
    );
593
}
594

    
595
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
596
    const int strength= ff_h263_loop_filter_strength[qscale];
597
    uint64_t temp[4] __attribute__ ((aligned(8)));
598
    uint8_t *btemp= (uint8_t*)temp;
599
    
600
    src -= 2;
601

    
602
    transpose4x4(btemp  , src           , 8, stride);
603
    transpose4x4(btemp+4, src + 4*stride, 8, stride);
604
    asm volatile(
605
        H263_LOOP_FILTER // 5 3 4 6
606
        
607
        : "+m" (temp[0]),
608
          "+m" (temp[1]),
609
          "+m" (temp[2]),
610
          "+m" (temp[3])
611
        : "g" (2*strength), "m"(ff_pb_FC)
612
    );
613

    
614
    asm volatile(
615
        "movq %%mm5, %%mm1                \n\t"
616
        "movq %%mm4, %%mm0                \n\t"
617
        "punpcklbw %%mm3, %%mm5                \n\t"
618
        "punpcklbw %%mm6, %%mm4                \n\t"
619
        "punpckhbw %%mm3, %%mm1                \n\t"
620
        "punpckhbw %%mm6, %%mm0                \n\t"
621
        "movq %%mm5, %%mm3                \n\t"
622
        "movq %%mm1, %%mm6                \n\t"
623
        "punpcklwd %%mm4, %%mm5                \n\t"
624
        "punpcklwd %%mm0, %%mm1                \n\t"
625
        "punpckhwd %%mm4, %%mm3                \n\t"
626
        "punpckhwd %%mm0, %%mm6                \n\t"
627
        "movd %%mm5, %0                        \n\t"
628
        "punpckhdq %%mm5, %%mm5                \n\t"
629
        "movd %%mm5, %1                        \n\t"
630
        "movd %%mm3, %2                        \n\t"
631
        "punpckhdq %%mm3, %%mm3                \n\t"
632
        "movd %%mm3, %3                        \n\t"
633
        "movd %%mm1, %4                        \n\t"
634
        "punpckhdq %%mm1, %%mm1                \n\t"
635
        "movd %%mm1, %5                        \n\t"
636
        "movd %%mm6, %6                        \n\t"
637
        "punpckhdq %%mm6, %%mm6                \n\t"
638
        "movd %%mm6, %7                        \n\t"
639
        : "=m" (*(uint32_t*)(src + 0*stride)),
640
          "=m" (*(uint32_t*)(src + 1*stride)),
641
          "=m" (*(uint32_t*)(src + 2*stride)),
642
          "=m" (*(uint32_t*)(src + 3*stride)),
643
          "=m" (*(uint32_t*)(src + 4*stride)),
644
          "=m" (*(uint32_t*)(src + 5*stride)),
645
          "=m" (*(uint32_t*)(src + 6*stride)),
646
          "=m" (*(uint32_t*)(src + 7*stride))
647
    );
648
}
649

    
650
#ifdef CONFIG_ENCODERS
651
static int pix_norm1_mmx(uint8_t *pix, int line_size) {
652
    int tmp;
653
  asm volatile (
654
      "movl $16,%%ecx\n"
655
      "pxor %%mm0,%%mm0\n"
656
      "pxor %%mm7,%%mm7\n"
657
      "1:\n"
658
      "movq (%0),%%mm2\n"        /* mm2 = pix[0-7] */
659
      "movq 8(%0),%%mm3\n"        /* mm3 = pix[8-15] */
660

    
661
      "movq %%mm2,%%mm1\n"        /* mm1 = mm2 = pix[0-7] */
662

    
663
      "punpckhbw %%mm0,%%mm1\n"        /* mm1 = [pix4-7] */
664
      "punpcklbw %%mm0,%%mm2\n"        /* mm2 = [pix0-3] */
665

    
666
      "movq %%mm3,%%mm4\n"        /* mm4 = mm3 = pix[8-15] */
667
      "punpckhbw %%mm0,%%mm3\n"        /* mm3 = [pix12-15] */
668
      "punpcklbw %%mm0,%%mm4\n"        /* mm4 = [pix8-11] */
669

    
670
      "pmaddwd %%mm1,%%mm1\n"        /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
671
      "pmaddwd %%mm2,%%mm2\n"        /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
672

    
673
      "pmaddwd %%mm3,%%mm3\n"
674
      "pmaddwd %%mm4,%%mm4\n"
675

    
676
      "paddd %%mm1,%%mm2\n"        /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
677
                                          pix2^2+pix3^2+pix6^2+pix7^2) */
678
      "paddd %%mm3,%%mm4\n"
679
      "paddd %%mm2,%%mm7\n"
680

    
681
      "addl %2, %0\n"
682
      "paddd %%mm4,%%mm7\n"
683
      "dec %%ecx\n"
684
      "jnz 1b\n"
685

    
686
      "movq %%mm7,%%mm1\n"
687
      "psrlq $32, %%mm7\n"        /* shift hi dword to lo */
688
      "paddd %%mm7,%%mm1\n"
689
      "movd %%mm1,%1\n"
690
      : "+r" (pix), "=r"(tmp) : "r" (line_size) : "%ecx" );
691
    return tmp;
692
}
693

    
694
static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
695
    int tmp;
696
  asm volatile (
697
      "movl %4,%%ecx\n"
698
      "pxor %%mm0,%%mm0\n"        /* mm0 = 0 */
699
      "pxor %%mm7,%%mm7\n"        /* mm7 holds the sum */
700
      "1:\n"
701
      "movq (%0),%%mm1\n"        /* mm1 = pix1[0-7] */
702
      "movq (%1),%%mm2\n"        /* mm2 = pix2[0-7] */
703
      "movq 8(%0),%%mm3\n"        /* mm3 = pix1[8-15] */
704
      "movq 8(%1),%%mm4\n"        /* mm4 = pix2[8-15] */
705

    
706
      /* todo: mm1-mm2, mm3-mm4 */
707
      /* algo: substract mm1 from mm2 with saturation and vice versa */
708
      /*       OR the results to get absolute difference */
709
      "movq %%mm1,%%mm5\n"
710
      "movq %%mm3,%%mm6\n"
711
      "psubusb %%mm2,%%mm1\n"
712
      "psubusb %%mm4,%%mm3\n"
713
      "psubusb %%mm5,%%mm2\n"
714
      "psubusb %%mm6,%%mm4\n"
715

    
716
      "por %%mm1,%%mm2\n"
717
      "por %%mm3,%%mm4\n"
718

    
719
      /* now convert to 16-bit vectors so we can square them */
720
      "movq %%mm2,%%mm1\n"
721
      "movq %%mm4,%%mm3\n"
722

    
723
      "punpckhbw %%mm0,%%mm2\n"
724
      "punpckhbw %%mm0,%%mm4\n"
725
      "punpcklbw %%mm0,%%mm1\n"        /* mm1 now spread over (mm1,mm2) */
726
      "punpcklbw %%mm0,%%mm3\n"        /* mm4 now spread over (mm3,mm4) */
727

    
728
      "pmaddwd %%mm2,%%mm2\n"
729
      "pmaddwd %%mm4,%%mm4\n"
730
      "pmaddwd %%mm1,%%mm1\n"
731
      "pmaddwd %%mm3,%%mm3\n"
732

    
733
      "addl %3,%0\n"
734
      "addl %3,%1\n"
735

    
736
      "paddd %%mm2,%%mm1\n"
737
      "paddd %%mm4,%%mm3\n"
738
      "paddd %%mm1,%%mm7\n"
739
      "paddd %%mm3,%%mm7\n"
740

    
741
      "decl %%ecx\n"
742
      "jnz 1b\n"
743

    
744
      "movq %%mm7,%%mm1\n"
745
      "psrlq $32, %%mm7\n"        /* shift hi dword to lo */
746
      "paddd %%mm7,%%mm1\n"
747
      "movd %%mm1,%2\n"
748
      : "+r" (pix1), "+r" (pix2), "=r"(tmp) 
749
      : "r" (line_size) , "m" (h)
750
      : "%ecx");
751
    return tmp;
752
}
753

    
754
static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
755
    int tmp;
756
    
757
    assert( (((int)pix) & 7) == 0);
758
    assert((line_size &7) ==0);
759
    
760
#define SUM(in0, in1, out0, out1) \
761
      "movq (%0), %%mm2\n"\
762
      "movq 8(%0), %%mm3\n"\
763
      "addl %2,%0\n"\
764
      "movq %%mm2, " #out0 "\n"\
765
      "movq %%mm3, " #out1 "\n"\
766
      "psubusb " #in0 ", %%mm2\n"\
767
      "psubusb " #in1 ", %%mm3\n"\
768
      "psubusb " #out0 ", " #in0 "\n"\
769
      "psubusb " #out1 ", " #in1 "\n"\
770
      "por %%mm2, " #in0 "\n"\
771
      "por %%mm3, " #in1 "\n"\
772
      "movq " #in0 ", %%mm2\n"\
773
      "movq " #in1 ", %%mm3\n"\
774
      "punpcklbw %%mm7, " #in0 "\n"\
775
      "punpcklbw %%mm7, " #in1 "\n"\
776
      "punpckhbw %%mm7, %%mm2\n"\
777
      "punpckhbw %%mm7, %%mm3\n"\
778
      "paddw " #in1 ", " #in0 "\n"\
779
      "paddw %%mm3, %%mm2\n"\
780
      "paddw %%mm2, " #in0 "\n"\
781
      "paddw " #in0 ", %%mm6\n"
782

    
783
    
784
  asm volatile (
785
      "movl %3,%%ecx\n"
786
      "pxor %%mm6,%%mm6\n"
787
      "pxor %%mm7,%%mm7\n"
788
      "movq (%0),%%mm0\n"
789
      "movq 8(%0),%%mm1\n"
790
      "addl %2,%0\n"
791
      "subl $2, %%ecx\n"
792
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
793
      "1:\n"
794
      
795
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
796
      
797
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
798
      
799
      "subl $2, %%ecx\n"
800
      "jnz 1b\n"
801

    
802
      "movq %%mm6,%%mm0\n"
803
      "psrlq $32, %%mm6\n"
804
      "paddw %%mm6,%%mm0\n"
805
      "movq %%mm0,%%mm6\n"
806
      "psrlq $16, %%mm0\n"
807
      "paddw %%mm6,%%mm0\n"
808
      "movd %%mm0,%1\n"
809
      : "+r" (pix), "=r"(tmp) 
810
      : "r" (line_size) , "m" (h)
811
      : "%ecx");
812
    return tmp & 0xFFFF;
813
}
814
#undef SUM
815

    
816
static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
817
    int tmp;
818
    
819
    assert( (((int)pix) & 7) == 0);
820
    assert((line_size &7) ==0);
821
    
822
#define SUM(in0, in1, out0, out1) \
823
      "movq (%0), " #out0 "\n"\
824
      "movq 8(%0), " #out1 "\n"\
825
      "addl %2,%0\n"\
826
      "psadbw " #out0 ", " #in0 "\n"\
827
      "psadbw " #out1 ", " #in1 "\n"\
828
      "paddw " #in1 ", " #in0 "\n"\
829
      "paddw " #in0 ", %%mm6\n"
830

    
831
  asm volatile (
832
      "movl %3,%%ecx\n"
833
      "pxor %%mm6,%%mm6\n"
834
      "pxor %%mm7,%%mm7\n"
835
      "movq (%0),%%mm0\n"
836
      "movq 8(%0),%%mm1\n"
837
      "addl %2,%0\n"
838
      "subl $2, %%ecx\n"
839
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
840
      "1:\n"
841
      
842
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
843
      
844
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
845
      
846
      "subl $2, %%ecx\n"
847
      "jnz 1b\n"
848

    
849
      "movd %%mm6,%1\n"
850
      : "+r" (pix), "=r"(tmp) 
851
      : "r" (line_size) , "m" (h)
852
      : "%ecx");
853
    return tmp;
854
}
855
#undef SUM
856

    
857
static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
858
    int tmp;
859
    
860
    assert( (((int)pix1) & 7) == 0);
861
    assert( (((int)pix2) & 7) == 0);
862
    assert((line_size &7) ==0);
863
    
864
#define SUM(in0, in1, out0, out1) \
865
      "movq (%0),%%mm2\n"\
866
      "movq (%1)," #out0 "\n"\
867
      "movq 8(%0),%%mm3\n"\
868
      "movq 8(%1)," #out1 "\n"\
869
      "addl %3,%0\n"\
870
      "addl %3,%1\n"\
871
      "psubb " #out0 ", %%mm2\n"\
872
      "psubb " #out1 ", %%mm3\n"\
873
      "pxor %%mm7, %%mm2\n"\
874
      "pxor %%mm7, %%mm3\n"\
875
      "movq %%mm2, " #out0 "\n"\
876
      "movq %%mm3, " #out1 "\n"\
877
      "psubusb " #in0 ", %%mm2\n"\
878
      "psubusb " #in1 ", %%mm3\n"\
879
      "psubusb " #out0 ", " #in0 "\n"\
880
      "psubusb " #out1 ", " #in1 "\n"\
881
      "por %%mm2, " #in0 "\n"\
882
      "por %%mm3, " #in1 "\n"\
883
      "movq " #in0 ", %%mm2\n"\
884
      "movq " #in1 ", %%mm3\n"\
885
      "punpcklbw %%mm7, " #in0 "\n"\
886
      "punpcklbw %%mm7, " #in1 "\n"\
887
      "punpckhbw %%mm7, %%mm2\n"\
888
      "punpckhbw %%mm7, %%mm3\n"\
889
      "paddw " #in1 ", " #in0 "\n"\
890
      "paddw %%mm3, %%mm2\n"\
891
      "paddw %%mm2, " #in0 "\n"\
892
      "paddw " #in0 ", %%mm6\n"
893

    
894
    
895
  asm volatile (
896
      "movl %4,%%ecx\n"
897
      "pxor %%mm6,%%mm6\n"
898
      "pcmpeqw %%mm7,%%mm7\n"
899
      "psllw $15, %%mm7\n"
900
      "packsswb %%mm7, %%mm7\n"
901
      "movq (%0),%%mm0\n"
902
      "movq (%1),%%mm2\n"
903
      "movq 8(%0),%%mm1\n"
904
      "movq 8(%1),%%mm3\n"
905
      "addl %3,%0\n"
906
      "addl %3,%1\n"
907
      "subl $2, %%ecx\n"
908
      "psubb %%mm2, %%mm0\n"
909
      "psubb %%mm3, %%mm1\n"
910
      "pxor %%mm7, %%mm0\n"
911
      "pxor %%mm7, %%mm1\n"
912
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
913
      "1:\n"
914
      
915
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
916
      
917
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
918
      
919
      "subl $2, %%ecx\n"
920
      "jnz 1b\n"
921

    
922
      "movq %%mm6,%%mm0\n"
923
      "psrlq $32, %%mm6\n"
924
      "paddw %%mm6,%%mm0\n"
925
      "movq %%mm0,%%mm6\n"
926
      "psrlq $16, %%mm0\n"
927
      "paddw %%mm6,%%mm0\n"
928
      "movd %%mm0,%2\n"
929
      : "+r" (pix1), "+r" (pix2), "=r"(tmp) 
930
      : "r" (line_size) , "m" (h)
931
      : "%ecx");
932
    return tmp & 0x7FFF;
933
}
934
#undef SUM
935

    
936
static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
937
    int tmp;
938
    
939
    assert( (((int)pix1) & 7) == 0);
940
    assert( (((int)pix2) & 7) == 0);
941
    assert((line_size &7) ==0);
942
    
943
#define SUM(in0, in1, out0, out1) \
944
      "movq (%0)," #out0 "\n"\
945
      "movq (%1),%%mm2\n"\
946
      "movq 8(%0)," #out1 "\n"\
947
      "movq 8(%1),%%mm3\n"\
948
      "addl %3,%0\n"\
949
      "addl %3,%1\n"\
950
      "psubb %%mm2, " #out0 "\n"\
951
      "psubb %%mm3, " #out1 "\n"\
952
      "pxor %%mm7, " #out0 "\n"\
953
      "pxor %%mm7, " #out1 "\n"\
954
      "psadbw " #out0 ", " #in0 "\n"\
955
      "psadbw " #out1 ", " #in1 "\n"\
956
      "paddw " #in1 ", " #in0 "\n"\
957
      "paddw " #in0 ", %%mm6\n"
958

    
959
  asm volatile (
960
      "movl %4,%%ecx\n"
961
      "pxor %%mm6,%%mm6\n"
962
      "pcmpeqw %%mm7,%%mm7\n"
963
      "psllw $15, %%mm7\n"
964
      "packsswb %%mm7, %%mm7\n"
965
      "movq (%0),%%mm0\n"
966
      "movq (%1),%%mm2\n"
967
      "movq 8(%0),%%mm1\n"
968
      "movq 8(%1),%%mm3\n"
969
      "addl %3,%0\n"
970
      "addl %3,%1\n"
971
      "subl $2, %%ecx\n"
972
      "psubb %%mm2, %%mm0\n"
973
      "psubb %%mm3, %%mm1\n"
974
      "pxor %%mm7, %%mm0\n"
975
      "pxor %%mm7, %%mm1\n"
976
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
977
      "1:\n"
978
      
979
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
980
      
981
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
982
      
983
      "subl $2, %%ecx\n"
984
      "jnz 1b\n"
985

    
986
      "movd %%mm6,%2\n"
987
      : "+r" (pix1), "+r" (pix2), "=r"(tmp) 
988
      : "r" (line_size) , "m" (h)
989
      : "%ecx");
990
    return tmp;
991
}
992
#undef SUM
993

    
994
static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
995
    int i=0;
996
    asm volatile(
997
        "1:                                \n\t"
998
        "movq  (%2, %0), %%mm0                \n\t"
999
        "movq  (%1, %0), %%mm1                \n\t"
1000
        "psubb %%mm0, %%mm1                \n\t"
1001
        "movq %%mm1, (%3, %0)                \n\t"
1002
        "movq 8(%2, %0), %%mm0                \n\t"
1003
        "movq 8(%1, %0), %%mm1                \n\t"
1004
        "psubb %%mm0, %%mm1                \n\t"
1005
        "movq %%mm1, 8(%3, %0)                \n\t"
1006
        "addl $16, %0                        \n\t"
1007
        "cmpl %4, %0                        \n\t"
1008
        " jb 1b                                \n\t"
1009
        : "+r" (i)
1010
        : "r"(src1), "r"(src2), "r"(dst), "r"(w-15)
1011
    );
1012
    for(; i<w; i++)
1013
        dst[i+0] = src1[i+0]-src2[i+0];
1014
}
1015

    
1016
static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
1017
    int i=0;
1018
    uint8_t l, lt;
1019

    
1020
    asm volatile(
1021
        "1:                                \n\t"
1022
        "movq  -1(%1, %0), %%mm0        \n\t" // LT
1023
        "movq  (%1, %0), %%mm1                \n\t" // T
1024
        "movq  -1(%2, %0), %%mm2        \n\t" // L
1025
        "movq  (%2, %0), %%mm3                \n\t" // X
1026
        "movq %%mm2, %%mm4                \n\t" // L
1027
        "psubb %%mm0, %%mm2                \n\t"
1028
        "paddb %%mm1, %%mm2                \n\t" // L + T - LT
1029
        "movq %%mm4, %%mm5                \n\t" // L
1030
        "pmaxub %%mm1, %%mm4                \n\t" // max(T, L)
1031
        "pminub %%mm5, %%mm1                \n\t" // min(T, L)
1032
        "pminub %%mm2, %%mm4                \n\t" 
1033
        "pmaxub %%mm1, %%mm4                \n\t"
1034
        "psubb %%mm4, %%mm3                \n\t" // dst - pred
1035
        "movq %%mm3, (%3, %0)                \n\t"
1036
        "addl $8, %0                        \n\t"
1037
        "cmpl %4, %0                        \n\t"
1038
        " jb 1b                                \n\t"
1039
        : "+r" (i)
1040
        : "r"(src1), "r"(src2), "r"(dst), "r"(w)
1041
    );
1042

    
1043
    l= *left;
1044
    lt= *left_top;
1045
    
1046
    dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
1047
    
1048
    *left_top= src1[w-1];
1049
    *left    = src2[w-1];
1050
}
1051

    
1052
#define LBUTTERFLY2(a1,b1,a2,b2)\
1053
    "paddw " #b1 ", " #a1 "                \n\t"\
1054
    "paddw " #b2 ", " #a2 "                \n\t"\
1055
    "paddw " #b1 ", " #b1 "                \n\t"\
1056
    "paddw " #b2 ", " #b2 "                \n\t"\
1057
    "psubw " #a1 ", " #b1 "                \n\t"\
1058
    "psubw " #a2 ", " #b2 "                \n\t"
1059

    
1060
#define HADAMARD48\
1061
        LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
1062
        LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
1063
        LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
1064
        LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
1065
        LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
1066
        LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
1067

    
1068
#define MMABS(a,z)\
1069
    "pxor " #z ", " #z "                \n\t"\
1070
    "pcmpgtw " #a ", " #z "                \n\t"\
1071
    "pxor " #z ", " #a "                \n\t"\
1072
    "psubw " #z ", " #a "                \n\t"
1073

    
1074
#define MMABS_SUM(a,z, sum)\
1075
    "pxor " #z ", " #z "                \n\t"\
1076
    "pcmpgtw " #a ", " #z "                \n\t"\
1077
    "pxor " #z ", " #a "                \n\t"\
1078
    "psubw " #z ", " #a "                \n\t"\
1079
    "paddusw " #a ", " #sum "                \n\t"
1080

    
1081
#define MMABS_MMX2(a,z)\
1082
    "pxor " #z ", " #z "                \n\t"\
1083
    "psubw " #a ", " #z "                \n\t"\
1084
    "pmaxsw " #z ", " #a "                \n\t"
1085

    
1086
#define MMABS_SUM_MMX2(a,z, sum)\
1087
    "pxor " #z ", " #z "                \n\t"\
1088
    "psubw " #a ", " #z "                \n\t"\
1089
    "pmaxsw " #z ", " #a "                \n\t"\
1090
    "paddusw " #a ", " #sum "                \n\t"
1091
        
1092
#define SBUTTERFLY(a,b,t,n)\
1093
    "movq " #a ", " #t "                \n\t" /* abcd */\
1094
    "punpckl" #n " " #b ", " #a "        \n\t" /* aebf */\
1095
    "punpckh" #n " " #b ", " #t "        \n\t" /* cgdh */\
1096

    
1097
#define TRANSPOSE4(a,b,c,d,t)\
1098
    SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
1099
    SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
1100
    SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
1101
    SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
1102

    
1103
#define LOAD4(o, a, b, c, d)\
1104
        "movq "#o"(%1), " #a "                \n\t"\
1105
        "movq "#o"+16(%1), " #b "        \n\t"\
1106
        "movq "#o"+32(%1), " #c "        \n\t"\
1107
        "movq "#o"+48(%1), " #d "        \n\t"
1108

    
1109
#define STORE4(o, a, b, c, d)\
1110
        "movq "#a", "#o"(%1)                \n\t"\
1111
        "movq "#b", "#o"+16(%1)                \n\t"\
1112
        "movq "#c", "#o"+32(%1)                \n\t"\
1113
        "movq "#d", "#o"+48(%1)                \n\t"\
1114

    
1115
static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1116
    uint64_t temp[16] __align8;
1117
    int sum=0;
1118
    
1119
    assert(h==8);
1120

    
1121
    diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1122

    
1123
    asm volatile(
1124
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1125
        LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1126
        
1127
        HADAMARD48
1128
        
1129
        "movq %%mm7, 112(%1)                \n\t"
1130
        
1131
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1132
        STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1133
        
1134
        "movq 112(%1), %%mm7                 \n\t"
1135
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1136
        STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1137

    
1138
        LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1139
        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1140
        
1141
        HADAMARD48
1142
        
1143
        "movq %%mm7, 120(%1)                \n\t"
1144
        
1145
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1146
        STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1147
        
1148
        "movq 120(%1), %%mm7                 \n\t"
1149
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1150
        "movq %%mm7, %%mm5                \n\t"//FIXME remove
1151
        "movq %%mm6, %%mm7                \n\t"
1152
        "movq %%mm0, %%mm6                \n\t"
1153
//        STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1154
        
1155
        LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1156
//        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1157
        
1158
        HADAMARD48
1159
        "movq %%mm7, 64(%1)                \n\t"
1160
        MMABS(%%mm0, %%mm7)
1161
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1162
        MMABS_SUM(%%mm2, %%mm7, %%mm0)
1163
        MMABS_SUM(%%mm3, %%mm7, %%mm0)
1164
        MMABS_SUM(%%mm4, %%mm7, %%mm0)
1165
        MMABS_SUM(%%mm5, %%mm7, %%mm0)
1166
        MMABS_SUM(%%mm6, %%mm7, %%mm0)
1167
        "movq 64(%1), %%mm1                \n\t"
1168
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1169
        "movq %%mm0, 64(%1)                \n\t"
1170
        
1171
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1172
        LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1173
        
1174
        HADAMARD48
1175
        "movq %%mm7, (%1)                \n\t"
1176
        MMABS(%%mm0, %%mm7)
1177
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1178
        MMABS_SUM(%%mm2, %%mm7, %%mm0)
1179
        MMABS_SUM(%%mm3, %%mm7, %%mm0)
1180
        MMABS_SUM(%%mm4, %%mm7, %%mm0)
1181
        MMABS_SUM(%%mm5, %%mm7, %%mm0)
1182
        MMABS_SUM(%%mm6, %%mm7, %%mm0)
1183
        "movq (%1), %%mm1                \n\t"
1184
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1185
        "movq 64(%1), %%mm1                \n\t"
1186
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1187
        
1188
        "movq %%mm0, %%mm1                \n\t"
1189
        "psrlq $32, %%mm0                \n\t"
1190
        "paddusw %%mm1, %%mm0                \n\t"
1191
        "movq %%mm0, %%mm1                \n\t"
1192
        "psrlq $16, %%mm0                \n\t"
1193
        "paddusw %%mm1, %%mm0                \n\t"
1194
        "movd %%mm0, %0                        \n\t"
1195
                
1196
        : "=r" (sum)
1197
        : "r"(temp)
1198
    );
1199
    return sum&0xFFFF;
1200
}
1201

    
1202
static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1203
    uint64_t temp[16] __align8;
1204
    int sum=0;
1205
    
1206
    assert(h==8);
1207

    
1208
    diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1209

    
1210
    asm volatile(
1211
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1212
        LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1213
        
1214
        HADAMARD48
1215
        
1216
        "movq %%mm7, 112(%1)                \n\t"
1217
        
1218
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1219
        STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1220
        
1221
        "movq 112(%1), %%mm7                 \n\t"
1222
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1223
        STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1224

    
1225
        LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1226
        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1227
        
1228
        HADAMARD48
1229
        
1230
        "movq %%mm7, 120(%1)                \n\t"
1231
        
1232
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1233
        STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1234
        
1235
        "movq 120(%1), %%mm7                 \n\t"
1236
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1237
        "movq %%mm7, %%mm5                \n\t"//FIXME remove
1238
        "movq %%mm6, %%mm7                \n\t"
1239
        "movq %%mm0, %%mm6                \n\t"
1240
//        STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1241
        
1242
        LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1243
//        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1244
        
1245
        HADAMARD48
1246
        "movq %%mm7, 64(%1)                \n\t"
1247
        MMABS_MMX2(%%mm0, %%mm7)
1248
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1249
        MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1250
        MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1251
        MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1252
        MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1253
        MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1254
        "movq 64(%1), %%mm1                \n\t"
1255
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1256
        "movq %%mm0, 64(%1)                \n\t"
1257
        
1258
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1259
        LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1260
        
1261
        HADAMARD48
1262
        "movq %%mm7, (%1)                \n\t"
1263
        MMABS_MMX2(%%mm0, %%mm7)
1264
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1265
        MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1266
        MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1267
        MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1268
        MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1269
        MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1270
        "movq (%1), %%mm1                \n\t"
1271
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1272
        "movq 64(%1), %%mm1                \n\t"
1273
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1274
        
1275
        "movq %%mm0, %%mm1                \n\t"
1276
        "psrlq $32, %%mm0                \n\t"
1277
        "paddusw %%mm1, %%mm0                \n\t"
1278
        "movq %%mm0, %%mm1                \n\t"
1279
        "psrlq $16, %%mm0                \n\t"
1280
        "paddusw %%mm1, %%mm0                \n\t"
1281
        "movd %%mm0, %0                        \n\t"
1282
                
1283
        : "=r" (sum)
1284
        : "r"(temp)
1285
    );
1286
    return sum&0xFFFF;
1287
}
1288

    
1289

    
1290
WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
1291
WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
1292
#endif //CONFIG_ENCODERS
1293

    
1294
#define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
1295
#define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
1296

    
1297
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
1298
        "paddw " #m4 ", " #m3 "                \n\t" /* x1 */\
1299
        "movq "MANGLE(ff_pw_20)", %%mm4                \n\t" /* 20 */\
1300
        "pmullw " #m3 ", %%mm4                \n\t" /* 20x1 */\
1301
        "movq "#in7", " #m3 "                \n\t" /* d */\
1302
        "movq "#in0", %%mm5                \n\t" /* D */\
1303
        "paddw " #m3 ", %%mm5                \n\t" /* x4 */\
1304
        "psubw %%mm5, %%mm4                \n\t" /* 20x1 - x4 */\
1305
        "movq "#in1", %%mm5                \n\t" /* C */\
1306
        "movq "#in2", %%mm6                \n\t" /* B */\
1307
        "paddw " #m6 ", %%mm5                \n\t" /* x3 */\
1308
        "paddw " #m5 ", %%mm6                \n\t" /* x2 */\
1309
        "paddw %%mm6, %%mm6                \n\t" /* 2x2 */\
1310
        "psubw %%mm6, %%mm5                \n\t" /* -2x2 + x3 */\
1311
        "pmullw "MANGLE(ff_pw_3)", %%mm5        \n\t" /* -6x2 + 3x3 */\
1312
        "paddw " #rnd ", %%mm4                \n\t" /* x2 */\
1313
        "paddw %%mm4, %%mm5                \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
1314
        "psraw $5, %%mm5                \n\t"\
1315
        "packuswb %%mm5, %%mm5                \n\t"\
1316
        OP(%%mm5, out, %%mm7, d)
1317

    
1318
#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
1319
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1320
    uint64_t temp;\
1321
\
1322
    asm volatile(\
1323
        "pxor %%mm7, %%mm7                \n\t"\
1324
        "1:                                \n\t"\
1325
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1326
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1327
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1328
        "punpcklbw %%mm7, %%mm0                \n\t" /* 0A0B0C0D */\
1329
        "punpckhbw %%mm7, %%mm1                \n\t" /* 0E0F0G0H */\
1330
        "pshufw $0x90, %%mm0, %%mm5        \n\t" /* 0A0A0B0C */\
1331
        "pshufw $0x41, %%mm0, %%mm6        \n\t" /* 0B0A0A0B */\
1332
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1333
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1334
        "psllq $8, %%mm2                \n\t" /* 0ABCDEFG */\
1335
        "psllq $16, %%mm3                \n\t" /* 00ABCDEF */\
1336
        "psllq $24, %%mm4                \n\t" /* 000ABCDE */\
1337
        "punpckhbw %%mm7, %%mm2                \n\t" /* 0D0E0F0G */\
1338
        "punpckhbw %%mm7, %%mm3                \n\t" /* 0C0D0E0F */\
1339
        "punpckhbw %%mm7, %%mm4                \n\t" /* 0B0C0D0E */\
1340
        "paddw %%mm3, %%mm5                \n\t" /* b */\
1341
        "paddw %%mm2, %%mm6                \n\t" /* c */\
1342
        "paddw %%mm5, %%mm5                \n\t" /* 2b */\
1343
        "psubw %%mm5, %%mm6                \n\t" /* c - 2b */\
1344
        "pshufw $0x06, %%mm0, %%mm5        \n\t" /* 0C0B0A0A */\
1345
        "pmullw "MANGLE(ff_pw_3)", %%mm6                \n\t" /* 3c - 6b */\
1346
        "paddw %%mm4, %%mm0                \n\t" /* a */\
1347
        "paddw %%mm1, %%mm5                \n\t" /* d */\
1348
        "pmullw "MANGLE(ff_pw_20)", %%mm0                \n\t" /* 20a */\
1349
        "psubw %%mm5, %%mm0                \n\t" /* 20a - d */\
1350
        "paddw %6, %%mm6                \n\t"\
1351
        "paddw %%mm6, %%mm0                \n\t" /* 20a - 6b + 3c - d */\
1352
        "psraw $5, %%mm0                \n\t"\
1353
        "movq %%mm0, %5                        \n\t"\
1354
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1355
        \
1356
        "movq 5(%0), %%mm0                \n\t" /* FGHIJKLM */\
1357
        "movq %%mm0, %%mm5                \n\t" /* FGHIJKLM */\
1358
        "movq %%mm0, %%mm6                \n\t" /* FGHIJKLM */\
1359
        "psrlq $8, %%mm0                \n\t" /* GHIJKLM0 */\
1360
        "psrlq $16, %%mm5                \n\t" /* HIJKLM00 */\
1361
        "punpcklbw %%mm7, %%mm0                \n\t" /* 0G0H0I0J */\
1362
        "punpcklbw %%mm7, %%mm5                \n\t" /* 0H0I0J0K */\
1363
        "paddw %%mm0, %%mm2                \n\t" /* b */\
1364
        "paddw %%mm5, %%mm3                \n\t" /* c */\
1365
        "paddw %%mm2, %%mm2                \n\t" /* 2b */\
1366
        "psubw %%mm2, %%mm3                \n\t" /* c - 2b */\
1367
        "movq %%mm6, %%mm2                \n\t" /* FGHIJKLM */\
1368
        "psrlq $24, %%mm6                \n\t" /* IJKLM000 */\
1369
        "punpcklbw %%mm7, %%mm2                \n\t" /* 0F0G0H0I */\
1370
        "punpcklbw %%mm7, %%mm6                \n\t" /* 0I0J0K0L */\
1371
        "pmullw "MANGLE(ff_pw_3)", %%mm3                \n\t" /* 3c - 6b */\
1372
        "paddw %%mm2, %%mm1                \n\t" /* a */\
1373
        "paddw %%mm6, %%mm4                \n\t" /* d */\
1374
        "pmullw "MANGLE(ff_pw_20)", %%mm1                \n\t" /* 20a */\
1375
        "psubw %%mm4, %%mm3                \n\t" /* - 6b +3c - d */\
1376
        "paddw %6, %%mm1                \n\t"\
1377
        "paddw %%mm1, %%mm3                \n\t" /* 20a - 6b +3c - d */\
1378
        "psraw $5, %%mm3                \n\t"\
1379
        "movq %5, %%mm1                        \n\t"\
1380
        "packuswb %%mm3, %%mm1                \n\t"\
1381
        OP_MMX2(%%mm1, (%1),%%mm4, q)\
1382
        /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1383
        \
1384
        "movq 9(%0), %%mm1                \n\t" /* JKLMNOPQ */\
1385
        "movq %%mm1, %%mm4                \n\t" /* JKLMNOPQ */\
1386
        "movq %%mm1, %%mm3                \n\t" /* JKLMNOPQ */\
1387
        "psrlq $8, %%mm1                \n\t" /* KLMNOPQ0 */\
1388
        "psrlq $16, %%mm4                \n\t" /* LMNOPQ00 */\
1389
        "punpcklbw %%mm7, %%mm1                \n\t" /* 0K0L0M0N */\
1390
        "punpcklbw %%mm7, %%mm4                \n\t" /* 0L0M0N0O */\
1391
        "paddw %%mm1, %%mm5                \n\t" /* b */\
1392
        "paddw %%mm4, %%mm0                \n\t" /* c */\
1393
        "paddw %%mm5, %%mm5                \n\t" /* 2b */\
1394
        "psubw %%mm5, %%mm0                \n\t" /* c - 2b */\
1395
        "movq %%mm3, %%mm5                \n\t" /* JKLMNOPQ */\
1396
        "psrlq $24, %%mm3                \n\t" /* MNOPQ000 */\
1397
        "pmullw "MANGLE(ff_pw_3)", %%mm0                \n\t" /* 3c - 6b */\
1398
        "punpcklbw %%mm7, %%mm3                \n\t" /* 0M0N0O0P */\
1399
        "paddw %%mm3, %%mm2                \n\t" /* d */\
1400
        "psubw %%mm2, %%mm0                \n\t" /* -6b + 3c - d */\
1401
        "movq %%mm5, %%mm2                \n\t" /* JKLMNOPQ */\
1402
        "punpcklbw %%mm7, %%mm2                \n\t" /* 0J0K0L0M */\
1403
        "punpckhbw %%mm7, %%mm5                \n\t" /* 0N0O0P0Q */\
1404
        "paddw %%mm2, %%mm6                \n\t" /* a */\
1405
        "pmullw "MANGLE(ff_pw_20)", %%mm6                \n\t" /* 20a */\
1406
        "paddw %6, %%mm0                \n\t"\
1407
        "paddw %%mm6, %%mm0                \n\t" /* 20a - 6b + 3c - d */\
1408
        "psraw $5, %%mm0                \n\t"\
1409
        /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1410
        \
1411
        "paddw %%mm5, %%mm3                \n\t" /* a */\
1412
        "pshufw $0xF9, %%mm5, %%mm6        \n\t" /* 0O0P0Q0Q */\
1413
        "paddw %%mm4, %%mm6                \n\t" /* b */\
1414
        "pshufw $0xBE, %%mm5, %%mm4        \n\t" /* 0P0Q0Q0P */\
1415
        "pshufw $0x6F, %%mm5, %%mm5        \n\t" /* 0Q0Q0P0O */\
1416
        "paddw %%mm1, %%mm4                \n\t" /* c */\
1417
        "paddw %%mm2, %%mm5                \n\t" /* d */\
1418
        "paddw %%mm6, %%mm6                \n\t" /* 2b */\
1419
        "psubw %%mm6, %%mm4                \n\t" /* c - 2b */\
1420
        "pmullw "MANGLE(ff_pw_20)", %%mm3                \n\t" /* 20a */\
1421
        "pmullw "MANGLE(ff_pw_3)", %%mm4                \n\t" /* 3c - 6b */\
1422
        "psubw %%mm5, %%mm3                \n\t" /* -6b + 3c - d */\
1423
        "paddw %6, %%mm4                \n\t"\
1424
        "paddw %%mm3, %%mm4                \n\t" /* 20a - 6b + 3c - d */\
1425
        "psraw $5, %%mm4                \n\t"\
1426
        "packuswb %%mm4, %%mm0                \n\t"\
1427
        OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1428
        \
1429
        "addl %3, %0                        \n\t"\
1430
        "addl %4, %1                        \n\t"\
1431
        "decl %2                        \n\t"\
1432
        " jnz 1b                                \n\t"\
1433
        : "+a"(src), "+c"(dst), "+m"(h)\
1434
        : "d"(srcStride), "S"(dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1435
        : "memory"\
1436
    );\
1437
}\
1438
\
1439
static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1440
    int i;\
1441
    int16_t temp[16];\
1442
    /* quick HACK, XXX FIXME MUST be optimized */\
1443
    for(i=0; i<h; i++)\
1444
    {\
1445
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1446
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1447
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1448
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1449
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1450
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1451
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1452
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1453
        temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1454
        temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1455
        temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1456
        temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1457
        temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1458
        temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1459
        temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1460
        temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1461
        asm volatile(\
1462
            "movq (%0), %%mm0                \n\t"\
1463
            "movq 8(%0), %%mm1                \n\t"\
1464
            "paddw %2, %%mm0                \n\t"\
1465
            "paddw %2, %%mm1                \n\t"\
1466
            "psraw $5, %%mm0                \n\t"\
1467
            "psraw $5, %%mm1                \n\t"\
1468
            "packuswb %%mm1, %%mm0        \n\t"\
1469
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1470
            "movq 16(%0), %%mm0                \n\t"\
1471
            "movq 24(%0), %%mm1                \n\t"\
1472
            "paddw %2, %%mm0                \n\t"\
1473
            "paddw %2, %%mm1                \n\t"\
1474
            "psraw $5, %%mm0                \n\t"\
1475
            "psraw $5, %%mm1                \n\t"\
1476
            "packuswb %%mm1, %%mm0        \n\t"\
1477
            OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1478
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1479
            : "memory"\
1480
        );\
1481
        dst+=dstStride;\
1482
        src+=srcStride;\
1483
    }\
1484
}\
1485
\
1486
static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1487
    uint64_t temp;\
1488
\
1489
    asm volatile(\
1490
        "pxor %%mm7, %%mm7                \n\t"\
1491
        "1:                                \n\t"\
1492
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1493
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1494
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1495
        "punpcklbw %%mm7, %%mm0                \n\t" /* 0A0B0C0D */\
1496
        "punpckhbw %%mm7, %%mm1                \n\t" /* 0E0F0G0H */\
1497
        "pshufw $0x90, %%mm0, %%mm5        \n\t" /* 0A0A0B0C */\
1498
        "pshufw $0x41, %%mm0, %%mm6        \n\t" /* 0B0A0A0B */\
1499
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1500
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1501
        "psllq $8, %%mm2                \n\t" /* 0ABCDEFG */\
1502
        "psllq $16, %%mm3                \n\t" /* 00ABCDEF */\
1503
        "psllq $24, %%mm4                \n\t" /* 000ABCDE */\
1504
        "punpckhbw %%mm7, %%mm2                \n\t" /* 0D0E0F0G */\
1505
        "punpckhbw %%mm7, %%mm3                \n\t" /* 0C0D0E0F */\
1506
        "punpckhbw %%mm7, %%mm4                \n\t" /* 0B0C0D0E */\
1507
        "paddw %%mm3, %%mm5                \n\t" /* b */\
1508
        "paddw %%mm2, %%mm6                \n\t" /* c */\
1509
        "paddw %%mm5, %%mm5                \n\t" /* 2b */\
1510
        "psubw %%mm5, %%mm6                \n\t" /* c - 2b */\
1511
        "pshufw $0x06, %%mm0, %%mm5        \n\t" /* 0C0B0A0A */\
1512
        "pmullw "MANGLE(ff_pw_3)", %%mm6                \n\t" /* 3c - 6b */\
1513
        "paddw %%mm4, %%mm0                \n\t" /* a */\
1514
        "paddw %%mm1, %%mm5                \n\t" /* d */\
1515
        "pmullw "MANGLE(ff_pw_20)", %%mm0                \n\t" /* 20a */\
1516
        "psubw %%mm5, %%mm0                \n\t" /* 20a - d */\
1517
        "paddw %6, %%mm6                \n\t"\
1518
        "paddw %%mm6, %%mm0                \n\t" /* 20a - 6b + 3c - d */\
1519
        "psraw $5, %%mm0                \n\t"\
1520
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1521
        \
1522
        "movd 5(%0), %%mm5                \n\t" /* FGHI */\
1523
        "punpcklbw %%mm7, %%mm5                \n\t" /* 0F0G0H0I */\
1524
        "pshufw $0xF9, %%mm5, %%mm6        \n\t" /* 0G0H0I0I */\
1525
        "paddw %%mm5, %%mm1                \n\t" /* a */\
1526
        "paddw %%mm6, %%mm2                \n\t" /* b */\
1527
        "pshufw $0xBE, %%mm5, %%mm6        \n\t" /* 0H0I0I0H */\
1528
        "pshufw $0x6F, %%mm5, %%mm5        \n\t" /* 0I0I0H0G */\
1529
        "paddw %%mm6, %%mm3                \n\t" /* c */\
1530
        "paddw %%mm5, %%mm4                \n\t" /* d */\
1531
        "paddw %%mm2, %%mm2                \n\t" /* 2b */\
1532
        "psubw %%mm2, %%mm3                \n\t" /* c - 2b */\
1533
        "pmullw "MANGLE(ff_pw_20)", %%mm1                \n\t" /* 20a */\
1534
        "pmullw "MANGLE(ff_pw_3)", %%mm3                \n\t" /* 3c - 6b */\
1535
        "psubw %%mm4, %%mm3                \n\t" /* -6b + 3c - d */\
1536
        "paddw %6, %%mm1                \n\t"\
1537
        "paddw %%mm1, %%mm3                \n\t" /* 20a - 6b + 3c - d */\
1538
        "psraw $5, %%mm3                \n\t"\
1539
        "packuswb %%mm3, %%mm0                \n\t"\
1540
        OP_MMX2(%%mm0, (%1), %%mm4, q)\
1541
        \
1542
        "addl %3, %0                        \n\t"\
1543
        "addl %4, %1                        \n\t"\
1544
        "decl %2                        \n\t"\
1545
        " jnz 1b                        \n\t"\
1546
        : "+a"(src), "+c"(dst), "+m"(h)\
1547
        : "S"(srcStride), "D"(dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1548
        : "memory"\
1549
    );\
1550
}\
1551
\
1552
static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1553
    int i;\
1554
    int16_t temp[8];\
1555
    /* quick HACK, XXX FIXME MUST be optimized */\
1556
    for(i=0; i<h; i++)\
1557
    {\
1558
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1559
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1560
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1561
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1562
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1563
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1564
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1565
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1566
        asm volatile(\
1567
            "movq (%0), %%mm0                \n\t"\
1568
            "movq 8(%0), %%mm1                \n\t"\
1569
            "paddw %2, %%mm0                \n\t"\
1570
            "paddw %2, %%mm1                \n\t"\
1571
            "psraw $5, %%mm0                \n\t"\
1572
            "psraw $5, %%mm1                \n\t"\
1573
            "packuswb %%mm1, %%mm0        \n\t"\
1574
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1575
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1576
            :"memory"\
1577
        );\
1578
        dst+=dstStride;\
1579
        src+=srcStride;\
1580
    }\
1581
}
1582

    
1583
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1584
\
1585
static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1586
    uint64_t temp[17*4];\
1587
    uint64_t *temp_ptr= temp;\
1588
    int count= 17;\
1589
\
1590
    /*FIXME unroll */\
1591
    asm volatile(\
1592
        "pxor %%mm7, %%mm7                \n\t"\
1593
        "1:                                \n\t"\
1594
        "movq (%0), %%mm0                \n\t"\
1595
        "movq (%0), %%mm1                \n\t"\
1596
        "movq 8(%0), %%mm2                \n\t"\
1597
        "movq 8(%0), %%mm3                \n\t"\
1598
        "punpcklbw %%mm7, %%mm0                \n\t"\
1599
        "punpckhbw %%mm7, %%mm1                \n\t"\
1600
        "punpcklbw %%mm7, %%mm2                \n\t"\
1601
        "punpckhbw %%mm7, %%mm3                \n\t"\
1602
        "movq %%mm0, (%1)                \n\t"\
1603
        "movq %%mm1, 17*8(%1)                \n\t"\
1604
        "movq %%mm2, 2*17*8(%1)                \n\t"\
1605
        "movq %%mm3, 3*17*8(%1)                \n\t"\
1606
        "addl $8, %1                        \n\t"\
1607
        "addl %3, %0                        \n\t"\
1608
        "decl %2                        \n\t"\
1609
        " jnz 1b                        \n\t"\
1610
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1611
        : "r" (srcStride)\
1612
        : "memory"\
1613
    );\
1614
    \
1615
    temp_ptr= temp;\
1616
    count=4;\
1617
    \
1618
/*FIXME reorder for speed */\
1619
    asm volatile(\
1620
        /*"pxor %%mm7, %%mm7                \n\t"*/\
1621
        "1:                                \n\t"\
1622
        "movq (%0), %%mm0                \n\t"\
1623
        "movq 8(%0), %%mm1                \n\t"\
1624
        "movq 16(%0), %%mm2                \n\t"\
1625
        "movq 24(%0), %%mm3                \n\t"\
1626
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1627
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1628
        "addl %4, %1                        \n\t"\
1629
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1630
        \
1631
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1632
        "addl %4, %1                        \n\t"\
1633
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1634
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1635
        "addl %4, %1                        \n\t"\
1636
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1637
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1638
        "addl %4, %1                        \n\t"\
1639
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1640
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1641
        "addl %4, %1                        \n\t"\
1642
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1643
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1644
        "addl %4, %1                        \n\t"\
1645
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1646
        \
1647
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1648
        "addl %4, %1                        \n\t"  \
1649
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1650
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1651
        \
1652
        "addl $136, %0                        \n\t"\
1653
        "addl %6, %1                        \n\t"\
1654
        "decl %2                        \n\t"\
1655
        " jnz 1b                        \n\t"\
1656
        \
1657
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1658
        : "r"(dstStride), "r"(2*dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*dstStride)\
1659
        :"memory"\
1660
    );\
1661
}\
1662
\
1663
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1664
    uint64_t temp[9*4];\
1665
    uint64_t *temp_ptr= temp;\
1666
    int count= 9;\
1667
\
1668
    /*FIXME unroll */\
1669
    asm volatile(\
1670
        "pxor %%mm7, %%mm7                \n\t"\
1671
        "1:                                \n\t"\
1672
        "movq (%0), %%mm0                \n\t"\
1673
        "movq (%0), %%mm1                \n\t"\
1674
        "punpcklbw %%mm7, %%mm0                \n\t"\
1675
        "punpckhbw %%mm7, %%mm1                \n\t"\
1676
        "movq %%mm0, (%1)                \n\t"\
1677
        "movq %%mm1, 9*8(%1)                \n\t"\
1678
        "addl $8, %1                        \n\t"\
1679
        "addl %3, %0                        \n\t"\
1680
        "decl %2                        \n\t"\
1681
        " jnz 1b                        \n\t"\
1682
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1683
        : "r" (srcStride)\
1684
        : "memory"\
1685
    );\
1686
    \
1687
    temp_ptr= temp;\
1688
    count=2;\
1689
    \
1690
/*FIXME reorder for speed */\
1691
    asm volatile(\
1692
        /*"pxor %%mm7, %%mm7                \n\t"*/\
1693
        "1:                                \n\t"\
1694
        "movq (%0), %%mm0                \n\t"\
1695
        "movq 8(%0), %%mm1                \n\t"\
1696
        "movq 16(%0), %%mm2                \n\t"\
1697
        "movq 24(%0), %%mm3                \n\t"\
1698
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1699
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1700
        "addl %4, %1                        \n\t"\
1701
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1702
        \
1703
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1704
        "addl %4, %1                        \n\t"\
1705
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1706
        \
1707
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1708
        "addl %4, %1                        \n\t"\
1709
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1710
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1711
                \
1712
        "addl $72, %0                        \n\t"\
1713
        "addl %6, %1                        \n\t"\
1714
        "decl %2                        \n\t"\
1715
        " jnz 1b                        \n\t"\
1716
         \
1717
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1718
        : "r"(dstStride), "r"(2*dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*dstStride)\
1719
        : "memory"\
1720
   );\
1721
}\
1722
\
1723
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1724
    OPNAME ## pixels8_mmx(dst, src, stride, 8);\
1725
}\
1726
\
1727
static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1728
    uint64_t temp[8];\
1729
    uint8_t * const half= (uint8_t*)temp;\
1730
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1731
    OPNAME ## pixels8_l2_mmx(dst, src, half, stride, stride, 8);\
1732
}\
1733
\
1734
static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1735
    OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1736
}\
1737
\
1738
static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1739
    uint64_t temp[8];\
1740
    uint8_t * const half= (uint8_t*)temp;\
1741
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1742
    OPNAME ## pixels8_l2_mmx(dst, src+1, half, stride, stride, 8);\
1743
}\
1744
\
1745
static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1746
    uint64_t temp[8];\
1747
    uint8_t * const half= (uint8_t*)temp;\
1748
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1749
    OPNAME ## pixels8_l2_mmx(dst, src, half, stride, stride, 8);\
1750
}\
1751
\
1752
static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1753
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1754
}\
1755
\
1756
static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1757
    uint64_t temp[8];\
1758
    uint8_t * const half= (uint8_t*)temp;\
1759
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1760
    OPNAME ## pixels8_l2_mmx(dst, src+stride, half, stride, stride, 8);\
1761
}\
1762
static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1763
    uint64_t half[8 + 9];\
1764
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1765
    uint8_t * const halfHV= ((uint8_t*)half);\
1766
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1767
    put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
1768
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1769
    OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
1770
}\
1771
static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1772
    uint64_t half[8 + 9];\
1773
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1774
    uint8_t * const halfHV= ((uint8_t*)half);\
1775
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1776
    put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
1777
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1778
    OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
1779
}\
1780
static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1781
    uint64_t half[8 + 9];\
1782
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1783
    uint8_t * const halfHV= ((uint8_t*)half);\
1784
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1785
    put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
1786
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1787
    OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
1788
}\
1789
static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1790
    uint64_t half[8 + 9];\
1791
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1792
    uint8_t * const halfHV= ((uint8_t*)half);\
1793
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1794
    put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
1795
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1796
    OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
1797
}\
1798
static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1799
    uint64_t half[8 + 9];\
1800
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1801
    uint8_t * const halfHV= ((uint8_t*)half);\
1802
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1803
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1804
    OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
1805
}\
1806
static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1807
    uint64_t half[8 + 9];\
1808
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1809
    uint8_t * const halfHV= ((uint8_t*)half);\
1810
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1811
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1812
    OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
1813
}\
1814
static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1815
    uint64_t half[8 + 9];\
1816
    uint8_t * const halfH= ((uint8_t*)half);\
1817
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1818
    put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
1819
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1820
}\
1821
static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1822
    uint64_t half[8 + 9];\
1823
    uint8_t * const halfH= ((uint8_t*)half);\
1824
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1825
    put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
1826
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1827
}\
1828
static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1829
    uint64_t half[9];\
1830
    uint8_t * const halfH= ((uint8_t*)half);\
1831
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1832
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1833
}\
1834
static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1835
    OPNAME ## pixels16_mmx(dst, src, stride, 16);\
1836
}\
1837
\
1838
static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1839
    uint64_t temp[32];\
1840
    uint8_t * const half= (uint8_t*)temp;\
1841
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1842
    OPNAME ## pixels16_l2_mmx(dst, src, half, stride, stride, 16);\
1843
}\
1844
\
1845
static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1846
    OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1847
}\
1848
\
1849
static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1850
    uint64_t temp[32];\
1851
    uint8_t * const half= (uint8_t*)temp;\
1852
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1853
    OPNAME ## pixels16_l2_mmx(dst, src+1, half, stride, stride, 16);\
1854
}\
1855
\
1856
static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1857
    uint64_t temp[32];\
1858
    uint8_t * const half= (uint8_t*)temp;\
1859
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1860
    OPNAME ## pixels16_l2_mmx(dst, src, half, stride, stride, 16);\
1861
}\
1862
\
1863
static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1864
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1865
}\
1866
\
1867
static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1868
    uint64_t temp[32];\
1869
    uint8_t * const half= (uint8_t*)temp;\
1870
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1871
    OPNAME ## pixels16_l2_mmx(dst, src+stride, half, stride, stride, 16);\
1872
}\
1873
static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1874
    uint64_t half[16*2 + 17*2];\
1875
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1876
    uint8_t * const halfHV= ((uint8_t*)half);\
1877
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1878
    put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
1879
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1880
    OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
1881
}\
1882
static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1883
    uint64_t half[16*2 + 17*2];\
1884
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1885
    uint8_t * const halfHV= ((uint8_t*)half);\
1886
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1887
    put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
1888
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1889
    OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
1890
}\
1891
static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1892
    uint64_t half[16*2 + 17*2];\
1893
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1894
    uint8_t * const halfHV= ((uint8_t*)half);\
1895
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1896
    put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
1897
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1898
    OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
1899
}\
1900
static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1901
    uint64_t half[16*2 + 17*2];\
1902
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1903
    uint8_t * const halfHV= ((uint8_t*)half);\
1904
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1905
    put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
1906
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1907
    OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
1908
}\
1909
static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1910
    uint64_t half[16*2 + 17*2];\
1911
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1912
    uint8_t * const halfHV= ((uint8_t*)half);\
1913
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1914
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1915
    OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
1916
}\
1917
static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1918
    uint64_t half[16*2 + 17*2];\
1919
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1920
    uint8_t * const halfHV= ((uint8_t*)half);\
1921
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1922
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1923
    OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
1924
}\
1925
static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1926
    uint64_t half[17*2];\
1927
    uint8_t * const halfH= ((uint8_t*)half);\
1928
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1929
    put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
1930
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1931
}\
1932
static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1933
    uint64_t half[17*2];\
1934
    uint8_t * const halfH= ((uint8_t*)half);\
1935
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1936
    put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
1937
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1938
}\
1939
static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1940
    uint64_t half[17*2];\
1941
    uint8_t * const halfH= ((uint8_t*)half);\
1942
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1943
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1944
}
1945

    
1946

    
1947
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "        \n\t"
1948
#define AVG_3DNOW_OP(a,b,temp, size) \
1949
"mov" #size " " #b ", " #temp "        \n\t"\
1950
"pavgusb " #temp ", " #a "        \n\t"\
1951
"mov" #size " " #a ", " #b "        \n\t"
1952
#define AVG_MMX2_OP(a,b,temp, size) \
1953
"mov" #size " " #b ", " #temp "        \n\t"\
1954
"pavgb " #temp ", " #a "        \n\t"\
1955
"mov" #size " " #a ", " #b "        \n\t"
1956

    
1957
QPEL_BASE(put_       , ff_pw_16, _       , PUT_OP, PUT_OP)
1958
QPEL_BASE(avg_       , ff_pw_16, _       , AVG_MMX2_OP, AVG_3DNOW_OP)
1959
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1960
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, 3dnow)
1961
QPEL_OP(avg_       , ff_pw_16, _       , AVG_3DNOW_OP, 3dnow)
1962
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1963
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, mmx2)
1964
QPEL_OP(avg_       , ff_pw_16, _       , AVG_MMX2_OP, mmx2)
1965
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1966

    
1967
#if 0
1968
static void just_return() { return; }
1969
#endif
1970

    
1971
#define SET_QPEL_FUNC(postfix1, postfix2) \
1972
    c->put_ ## postfix1 = put_ ## postfix2;\
1973
    c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
1974
    c->avg_ ## postfix1 = avg_ ## postfix2;
1975

    
1976
/* external functions, from idct_mmx.c */
1977
void ff_mmx_idct(DCTELEM *block);
1978
void ff_mmxext_idct(DCTELEM *block);
1979

    
1980
/* XXX: those functions should be suppressed ASAP when all IDCTs are
1981
   converted */
1982
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1983
{
1984
    ff_mmx_idct (block);
1985
    put_pixels_clamped_mmx(block, dest, line_size);
1986
}
1987
static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1988
{
1989
    ff_mmx_idct (block);
1990
    add_pixels_clamped_mmx(block, dest, line_size);
1991
}
1992
static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1993
{
1994
    ff_mmxext_idct (block);
1995
    put_pixels_clamped_mmx(block, dest, line_size);
1996
}
1997
static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1998
{
1999
    ff_mmxext_idct (block);
2000
    add_pixels_clamped_mmx(block, dest, line_size);
2001
}
2002
    
2003
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2004
{
2005
    mm_flags = mm_support();
2006

    
2007
    if (avctx->dsp_mask) {
2008
        if (avctx->dsp_mask & FF_MM_FORCE)
2009
            mm_flags |= (avctx->dsp_mask & 0xffff);
2010
        else
2011
            mm_flags &= ~(avctx->dsp_mask & 0xffff);
2012
    }
2013

    
2014
#if 0
2015
    fprintf(stderr, "libavcodec: CPU flags:");
2016
    if (mm_flags & MM_MMX)
2017
        fprintf(stderr, " mmx");
2018
    if (mm_flags & MM_MMXEXT)
2019
        fprintf(stderr, " mmxext");
2020
    if (mm_flags & MM_3DNOW)
2021
        fprintf(stderr, " 3dnow");
2022
    if (mm_flags & MM_SSE)
2023
        fprintf(stderr, " sse");
2024
    if (mm_flags & MM_SSE2)
2025
        fprintf(stderr, " sse2");
2026
    fprintf(stderr, "\n");
2027
#endif
2028

    
2029
    if (mm_flags & MM_MMX) {
2030
        const int dct_algo = avctx->dct_algo;
2031
        const int idct_algo= avctx->idct_algo;
2032

    
2033
#ifdef CONFIG_ENCODERS
2034
        if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
2035
            if(mm_flags & MM_MMXEXT){
2036
                c->fdct = ff_fdct_mmx2;
2037
            }else{
2038
                c->fdct = ff_fdct_mmx;
2039
            }
2040
        }
2041
#endif //CONFIG_ENCODERS
2042

    
2043
        if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2044
            c->idct_put= ff_simple_idct_put_mmx;
2045
            c->idct_add= ff_simple_idct_add_mmx;
2046
            c->idct    = ff_simple_idct_mmx;
2047
            c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2048
        }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2049
            if(mm_flags & MM_MMXEXT){
2050
                c->idct_put= ff_libmpeg2mmx2_idct_put;
2051
                c->idct_add= ff_libmpeg2mmx2_idct_add;
2052
                c->idct    = ff_mmxext_idct;
2053
            }else{
2054
                c->idct_put= ff_libmpeg2mmx_idct_put;
2055
                c->idct_add= ff_libmpeg2mmx_idct_add;
2056
                c->idct    = ff_mmx_idct;
2057
            }
2058
            c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2059
        }
2060
        
2061
#ifdef CONFIG_ENCODERS
2062
        c->get_pixels = get_pixels_mmx;
2063
        c->diff_pixels = diff_pixels_mmx;
2064
#endif //CONFIG_ENCODERS
2065
        c->put_pixels_clamped = put_pixels_clamped_mmx;
2066
        c->add_pixels_clamped = add_pixels_clamped_mmx;
2067
        c->clear_blocks = clear_blocks_mmx;
2068
#ifdef CONFIG_ENCODERS
2069
        c->pix_sum = pix_sum16_mmx;
2070
#endif //CONFIG_ENCODERS
2071

    
2072
        c->put_pixels_tab[0][0] = put_pixels16_mmx;
2073
        c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
2074
        c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
2075
        c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
2076

    
2077
        c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
2078
        c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
2079
        c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
2080
        c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
2081

    
2082
        c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
2083
        c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
2084
        c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
2085
        c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
2086

    
2087
        c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
2088
        c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
2089
        c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
2090
        c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
2091

    
2092
        c->put_pixels_tab[1][0] = put_pixels8_mmx;
2093
        c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
2094
        c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
2095
        c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
2096

    
2097
        c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
2098
        c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
2099
        c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
2100
        c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
2101

    
2102
        c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
2103
        c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
2104
        c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
2105
        c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
2106

    
2107
        c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
2108
        c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
2109
        c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
2110
        c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
2111
                
2112
        c->add_bytes= add_bytes_mmx;
2113
#ifdef CONFIG_ENCODERS
2114
        c->diff_bytes= diff_bytes_mmx;
2115
        
2116
        c->hadamard8_diff[0]= hadamard8_diff16_mmx;
2117
        c->hadamard8_diff[1]= hadamard8_diff_mmx;
2118
        
2119
        c->pix_norm1 = pix_norm1_mmx;
2120
        c->sse[0] = sse16_mmx;
2121
        c->vsad[4]= vsad_intra16_mmx;
2122

    
2123
        if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2124
            c->vsad[0] = vsad16_mmx;
2125
        }
2126
#endif //CONFIG_ENCODERS
2127

    
2128
        c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2129
        c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2130
        
2131
        if (mm_flags & MM_MMXEXT) {
2132
            c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2133
            c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2134

    
2135
            c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2136
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2137
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2138

    
2139
            c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2140
            c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2141

    
2142
            c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2143
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2144
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2145

    
2146
#ifdef CONFIG_ENCODERS
2147
            c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
2148
            c->hadamard8_diff[1]= hadamard8_diff_mmx2;
2149
            c->vsad[4]= vsad_intra16_mmx2;
2150
#endif //CONFIG_ENCODERS
2151

    
2152
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2153
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2154
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2155
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2156
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2157
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2158
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2159
                c->vsad[0] = vsad16_mmx2;
2160
            }
2161

    
2162
#if 1
2163
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
2164
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
2165
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
2166
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
2167
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
2168
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
2169
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
2170
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
2171
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
2172
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
2173
            SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
2174
            SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
2175
            SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
2176
            SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
2177
            SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
2178
            SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
2179
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
2180
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
2181
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
2182
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
2183
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
2184
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
2185
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
2186
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
2187
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
2188
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
2189
            SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
2190
            SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
2191
            SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
2192
            SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
2193
            SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
2194
            SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
2195
#endif
2196

    
2197
#ifdef CONFIG_ENCODERS
2198
            c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
2199
#endif //CONFIG_ENCODERS
2200
        } else if (mm_flags & MM_3DNOW) {
2201
            c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2202
            c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2203

    
2204
            c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2205
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2206
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2207

    
2208
            c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2209
            c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2210

    
2211
            c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2212
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2213
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2214

    
2215
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2216
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2217
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2218
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2219
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2220
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2221
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2222
            }
2223

    
2224
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
2225
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
2226
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
2227
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
2228
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
2229
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
2230
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
2231
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
2232
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
2233
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
2234
            SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
2235
            SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
2236
            SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
2237
            SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
2238
            SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
2239
            SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
2240
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
2241
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
2242
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
2243
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
2244
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
2245
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
2246
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
2247
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
2248
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
2249
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
2250
            SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
2251
            SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
2252
            SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
2253
            SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
2254
            SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
2255
            SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
2256
        }
2257
    }
2258
        
2259
#ifdef CONFIG_ENCODERS
2260
    dsputil_init_pix_mmx(c, avctx);
2261
#endif //CONFIG_ENCODERS
2262
#if 0
2263
    // for speed testing
2264
    get_pixels = just_return;
2265
    put_pixels_clamped = just_return;
2266
    add_pixels_clamped = just_return;
2267

2268
    pix_abs16x16 = just_return;
2269
    pix_abs16x16_x2 = just_return;
2270
    pix_abs16x16_y2 = just_return;
2271
    pix_abs16x16_xy2 = just_return;
2272

2273
    put_pixels_tab[0] = just_return;
2274
    put_pixels_tab[1] = just_return;
2275
    put_pixels_tab[2] = just_return;
2276
    put_pixels_tab[3] = just_return;
2277

2278
    put_no_rnd_pixels_tab[0] = just_return;
2279
    put_no_rnd_pixels_tab[1] = just_return;
2280
    put_no_rnd_pixels_tab[2] = just_return;
2281
    put_no_rnd_pixels_tab[3] = just_return;
2282

2283
    avg_pixels_tab[0] = just_return;
2284
    avg_pixels_tab[1] = just_return;
2285
    avg_pixels_tab[2] = just_return;
2286
    avg_pixels_tab[3] = just_return;
2287

2288
    avg_no_rnd_pixels_tab[0] = just_return;
2289
    avg_no_rnd_pixels_tab[1] = just_return;
2290
    avg_no_rnd_pixels_tab[2] = just_return;
2291
    avg_no_rnd_pixels_tab[3] = just_return;
2292

2293
    //av_fdct = just_return;
2294
    //ff_idct = just_return;
2295
#endif
2296
}