Statistics
| Branch: | Revision:

ffmpeg / libavcodec / i386 / dsputil_mmx.c @ 622348f9

History | View | Annotate | Download (82.3 KB)

1
/*
2
 * MMX optimized DSP utils
3
 * Copyright (c) 2000, 2001 Fabrice Bellard.
4
 *
5
 * This library is free software; you can redistribute it and/or
6
 * modify it under the terms of the GNU Lesser General Public
7
 * License as published by the Free Software Foundation; either
8
 * version 2 of the License, or (at your option) any later version.
9
 *
10
 * This library is distributed in the hope that it will be useful,
11
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
 * Lesser General Public License for more details.
14
 *
15
 * You should have received a copy of the GNU Lesser General Public
16
 * License along with this library; if not, write to the Free Software
17
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18
 *
19
 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
20
 */
21

    
22
#include "../dsputil.h"
23
#include "../simple_idct.h"
24

    
25
//#undef NDEBUG
26
//#include <assert.h>
27

    
28
extern const uint8_t ff_h263_loop_filter_strength[32];
29

    
30
int mm_flags; /* multimedia extension flags */
31

    
32
/* pixel operations */
33
static const uint64_t mm_bone __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
34
static const uint64_t mm_wone __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
35
static const uint64_t mm_wtwo __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
36

    
37
static const uint64_t ff_pw_20 __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
38
static const uint64_t ff_pw_3  __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
39
static const uint64_t ff_pw_16 __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
40
static const uint64_t ff_pw_15 __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
41

    
42
static const uint64_t ff_pb_FC __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
43

    
44
#define JUMPALIGN() __asm __volatile (".balign 8"::)
45
#define MOVQ_ZERO(regd)  __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
46

    
47
#define MOVQ_WONE(regd) \
48
    __asm __volatile ( \
49
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
50
    "psrlw $15, %%" #regd ::)
51

    
52
#define MOVQ_BFE(regd) \
53
    __asm __volatile ( \
54
    "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
55
    "paddb %%" #regd ", %%" #regd " \n\t" ::)
56

    
57
#ifndef PIC
58
#define MOVQ_BONE(regd)  __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
59
#define MOVQ_WTWO(regd)  __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
60
#else
61
// for shared library it's better to use this way for accessing constants
62
// pcmpeqd -> -1
63
#define MOVQ_BONE(regd) \
64
    __asm __volatile ( \
65
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
66
    "psrlw $15, %%" #regd " \n\t" \
67
    "packuswb %%" #regd ", %%" #regd " \n\t" ::)
68

    
69
#define MOVQ_WTWO(regd) \
70
    __asm __volatile ( \
71
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
72
    "psrlw $15, %%" #regd " \n\t" \
73
    "psllw $1, %%" #regd " \n\t"::)
74

    
75
#endif
76

    
77
// using regr as temporary and for the output result
78
// first argument is unmodifed and second is trashed
79
// regfe is supposed to contain 0xfefefefefefefefe
80
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
81
    "movq " #rega ", " #regr "        \n\t"\
82
    "pand " #regb ", " #regr "        \n\t"\
83
    "pxor " #rega ", " #regb "        \n\t"\
84
    "pand " #regfe "," #regb "        \n\t"\
85
    "psrlq $1, " #regb "         \n\t"\
86
    "paddb " #regb ", " #regr "        \n\t"
87

    
88
#define PAVGB_MMX(rega, regb, regr, regfe) \
89
    "movq " #rega ", " #regr "        \n\t"\
90
    "por  " #regb ", " #regr "        \n\t"\
91
    "pxor " #rega ", " #regb "        \n\t"\
92
    "pand " #regfe "," #regb "        \n\t"\
93
    "psrlq $1, " #regb "        \n\t"\
94
    "psubb " #regb ", " #regr "        \n\t"
95

    
96
// mm6 is supposed to contain 0xfefefefefefefefe
97
#define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp) \
98
    "movq " #rega ", " #regr "        \n\t"\
99
    "movq " #regc ", " #regp "        \n\t"\
100
    "pand " #regb ", " #regr "        \n\t"\
101
    "pand " #regd ", " #regp "        \n\t"\
102
    "pxor " #rega ", " #regb "        \n\t"\
103
    "pxor " #regc ", " #regd "        \n\t"\
104
    "pand %%mm6, " #regb "        \n\t"\
105
    "pand %%mm6, " #regd "        \n\t"\
106
    "psrlq $1, " #regb "         \n\t"\
107
    "psrlq $1, " #regd "         \n\t"\
108
    "paddb " #regb ", " #regr "        \n\t"\
109
    "paddb " #regd ", " #regp "        \n\t"
110

    
111
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
112
    "movq " #rega ", " #regr "        \n\t"\
113
    "movq " #regc ", " #regp "        \n\t"\
114
    "por  " #regb ", " #regr "        \n\t"\
115
    "por  " #regd ", " #regp "        \n\t"\
116
    "pxor " #rega ", " #regb "        \n\t"\
117
    "pxor " #regc ", " #regd "        \n\t"\
118
    "pand %%mm6, " #regb "             \n\t"\
119
    "pand %%mm6, " #regd "             \n\t"\
120
    "psrlq $1, " #regd "        \n\t"\
121
    "psrlq $1, " #regb "        \n\t"\
122
    "psubb " #regb ", " #regr "        \n\t"\
123
    "psubb " #regd ", " #regp "        \n\t"
124

    
125
/***********************************/
126
/* MMX no rounding */
127
#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
128
#define SET_RND  MOVQ_WONE
129
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
130
#define PAVGB(a, b, c, e)                PAVGB_MMX_NO_RND(a, b, c, e)
131

    
132
#include "dsputil_mmx_rnd.h"
133

    
134
#undef DEF
135
#undef SET_RND
136
#undef PAVGBP
137
#undef PAVGB
138
/***********************************/
139
/* MMX rounding */
140

    
141
#define DEF(x, y) x ## _ ## y ##_mmx
142
#define SET_RND  MOVQ_WTWO
143
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
144
#define PAVGB(a, b, c, e)                PAVGB_MMX(a, b, c, e)
145

    
146
#include "dsputil_mmx_rnd.h"
147

    
148
#undef DEF
149
#undef SET_RND
150
#undef PAVGBP
151
#undef PAVGB
152

    
153
/***********************************/
154
/* 3Dnow specific */
155

    
156
#define DEF(x) x ## _3dnow
157
/* for Athlons PAVGUSB is prefered */
158
#define PAVGB "pavgusb"
159

    
160
#include "dsputil_mmx_avg.h"
161

    
162
#undef DEF
163
#undef PAVGB
164

    
165
/***********************************/
166
/* MMX2 specific */
167

    
168
#define DEF(x) x ## _mmx2
169

    
170
/* Introduced only in MMX2 set */
171
#define PAVGB "pavgb"
172

    
173
#include "dsputil_mmx_avg.h"
174

    
175
#undef DEF
176
#undef PAVGB
177

    
178
/***********************************/
179
/* standard MMX */
180

    
181
#ifdef CONFIG_ENCODERS
182
static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
183
{
184
    asm volatile(
185
        "movl $-128, %%eax        \n\t"
186
        "pxor %%mm7, %%mm7        \n\t"
187
        ".balign 16                \n\t"
188
        "1:                        \n\t"
189
        "movq (%0), %%mm0        \n\t"
190
        "movq (%0, %2), %%mm2        \n\t"
191
        "movq %%mm0, %%mm1        \n\t"
192
        "movq %%mm2, %%mm3        \n\t"
193
        "punpcklbw %%mm7, %%mm0        \n\t"
194
        "punpckhbw %%mm7, %%mm1        \n\t"
195
        "punpcklbw %%mm7, %%mm2        \n\t"
196
        "punpckhbw %%mm7, %%mm3        \n\t"
197
        "movq %%mm0, (%1, %%eax)\n\t"
198
        "movq %%mm1, 8(%1, %%eax)\n\t"
199
        "movq %%mm2, 16(%1, %%eax)\n\t"
200
        "movq %%mm3, 24(%1, %%eax)\n\t"
201
        "addl %3, %0                \n\t"
202
        "addl $32, %%eax        \n\t"
203
        "js 1b                        \n\t"
204
        : "+r" (pixels)
205
        : "r" (block+64), "r" (line_size), "r" (line_size*2)
206
        : "%eax"
207
    );
208
}
209

    
210
static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
211
{
212
    asm volatile(
213
        "pxor %%mm7, %%mm7        \n\t"
214
        "movl $-128, %%eax        \n\t"
215
        ".balign 16                \n\t"
216
        "1:                        \n\t"
217
        "movq (%0), %%mm0        \n\t"
218
        "movq (%1), %%mm2        \n\t"
219
        "movq %%mm0, %%mm1        \n\t"
220
        "movq %%mm2, %%mm3        \n\t"
221
        "punpcklbw %%mm7, %%mm0        \n\t"
222
        "punpckhbw %%mm7, %%mm1        \n\t"
223
        "punpcklbw %%mm7, %%mm2        \n\t"
224
        "punpckhbw %%mm7, %%mm3        \n\t"
225
        "psubw %%mm2, %%mm0        \n\t"
226
        "psubw %%mm3, %%mm1        \n\t"
227
        "movq %%mm0, (%2, %%eax)\n\t"
228
        "movq %%mm1, 8(%2, %%eax)\n\t"
229
        "addl %3, %0                \n\t"
230
        "addl %3, %1                \n\t"
231
        "addl $16, %%eax        \n\t"
232
        "jnz 1b                        \n\t"
233
        : "+r" (s1), "+r" (s2)
234
        : "r" (block+64), "r" (stride)
235
        : "%eax"
236
    );
237
}
238
#endif //CONFIG_ENCODERS
239

    
240
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
241
{
242
    const DCTELEM *p;
243
    uint8_t *pix;
244

    
245
    /* read the pixels */
246
    p = block;
247
    pix = pixels;
248
    /* unrolled loop */
249
        __asm __volatile(
250
                "movq        %3, %%mm0\n\t"
251
                "movq        8%3, %%mm1\n\t"
252
                "movq        16%3, %%mm2\n\t"
253
                "movq        24%3, %%mm3\n\t"
254
                "movq        32%3, %%mm4\n\t"
255
                "movq        40%3, %%mm5\n\t"
256
                "movq        48%3, %%mm6\n\t"
257
                "movq        56%3, %%mm7\n\t"
258
                "packuswb %%mm1, %%mm0\n\t"
259
                "packuswb %%mm3, %%mm2\n\t"
260
                "packuswb %%mm5, %%mm4\n\t"
261
                "packuswb %%mm7, %%mm6\n\t"
262
                "movq        %%mm0, (%0)\n\t"
263
                "movq        %%mm2, (%0, %1)\n\t"
264
                "movq        %%mm4, (%0, %1, 2)\n\t"
265
                "movq        %%mm6, (%0, %2)\n\t"
266
                ::"r" (pix), "r" (line_size), "r" (line_size*3), "m"(*p)
267
                :"memory");
268
        pix += line_size*4;
269
        p += 32;
270

    
271
    // if here would be an exact copy of the code above
272
    // compiler would generate some very strange code
273
    // thus using "r"
274
    __asm __volatile(
275
            "movq        (%3), %%mm0\n\t"
276
            "movq        8(%3), %%mm1\n\t"
277
            "movq        16(%3), %%mm2\n\t"
278
            "movq        24(%3), %%mm3\n\t"
279
            "movq        32(%3), %%mm4\n\t"
280
            "movq        40(%3), %%mm5\n\t"
281
            "movq        48(%3), %%mm6\n\t"
282
            "movq        56(%3), %%mm7\n\t"
283
            "packuswb %%mm1, %%mm0\n\t"
284
            "packuswb %%mm3, %%mm2\n\t"
285
            "packuswb %%mm5, %%mm4\n\t"
286
            "packuswb %%mm7, %%mm6\n\t"
287
            "movq        %%mm0, (%0)\n\t"
288
            "movq        %%mm2, (%0, %1)\n\t"
289
            "movq        %%mm4, (%0, %1, 2)\n\t"
290
            "movq        %%mm6, (%0, %2)\n\t"
291
            ::"r" (pix), "r" (line_size), "r" (line_size*3), "r"(p)
292
            :"memory");
293
}
294

    
295
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
296
{
297
    const DCTELEM *p;
298
    uint8_t *pix;
299
    int i;
300

    
301
    /* read the pixels */
302
    p = block;
303
    pix = pixels;
304
    MOVQ_ZERO(mm7);
305
    i = 4;
306
    do {
307
        __asm __volatile(
308
                "movq        (%2), %%mm0\n\t"
309
                "movq        8(%2), %%mm1\n\t"
310
                "movq        16(%2), %%mm2\n\t"
311
                "movq        24(%2), %%mm3\n\t"
312
                "movq        %0, %%mm4\n\t"
313
                "movq        %1, %%mm6\n\t"
314
                "movq        %%mm4, %%mm5\n\t"
315
                "punpcklbw %%mm7, %%mm4\n\t"
316
                "punpckhbw %%mm7, %%mm5\n\t"
317
                "paddsw        %%mm4, %%mm0\n\t"
318
                "paddsw        %%mm5, %%mm1\n\t"
319
                "movq        %%mm6, %%mm5\n\t"
320
                "punpcklbw %%mm7, %%mm6\n\t"
321
                "punpckhbw %%mm7, %%mm5\n\t"
322
                "paddsw        %%mm6, %%mm2\n\t"
323
                "paddsw        %%mm5, %%mm3\n\t"
324
                "packuswb %%mm1, %%mm0\n\t"
325
                "packuswb %%mm3, %%mm2\n\t"
326
                "movq        %%mm0, %0\n\t"
327
                "movq        %%mm2, %1\n\t"
328
                :"+m"(*pix), "+m"(*(pix+line_size))
329
                :"r"(p)
330
                :"memory");
331
        pix += line_size*2;
332
        p += 16;
333
    } while (--i);
334
}
335

    
336
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
337
{
338
    __asm __volatile(
339
         "lea (%3, %3), %%eax                \n\t"
340
         ".balign 8                        \n\t"
341
         "1:                                \n\t"
342
         "movq (%1), %%mm0                \n\t"
343
         "movq (%1, %3), %%mm1                \n\t"
344
              "movq %%mm0, (%2)                \n\t"
345
         "movq %%mm1, (%2, %3)                \n\t"
346
         "addl %%eax, %1                \n\t"
347
         "addl %%eax, %2                       \n\t"
348
         "movq (%1), %%mm0                \n\t"
349
         "movq (%1, %3), %%mm1                \n\t"
350
         "movq %%mm0, (%2)                \n\t"
351
         "movq %%mm1, (%2, %3)                \n\t"
352
         "addl %%eax, %1                \n\t"
353
         "addl %%eax, %2                       \n\t"
354
         "subl $4, %0                        \n\t"
355
         "jnz 1b                        \n\t"
356
         : "+g"(h), "+r" (pixels),  "+r" (block)
357
         : "r"(line_size)
358
         : "%eax", "memory"
359
        );
360
}
361

    
362
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
363
{
364
    __asm __volatile(
365
         "lea (%3, %3), %%eax                \n\t"
366
         ".balign 8                        \n\t"
367
         "1:                                \n\t"
368
         "movq (%1), %%mm0                \n\t"
369
         "movq 8(%1), %%mm4                \n\t"
370
         "movq (%1, %3), %%mm1                \n\t"
371
         "movq 8(%1, %3), %%mm5                \n\t"
372
              "movq %%mm0, (%2)                \n\t"
373
              "movq %%mm4, 8(%2)                \n\t"
374
         "movq %%mm1, (%2, %3)                \n\t"
375
         "movq %%mm5, 8(%2, %3)                \n\t"
376
         "addl %%eax, %1                \n\t"
377
         "addl %%eax, %2                       \n\t"
378
         "movq (%1), %%mm0                \n\t"
379
         "movq 8(%1), %%mm4                \n\t"
380
         "movq (%1, %3), %%mm1                \n\t"
381
         "movq 8(%1, %3), %%mm5                \n\t"
382
         "movq %%mm0, (%2)                \n\t"
383
         "movq %%mm4, 8(%2)                \n\t"
384
         "movq %%mm1, (%2, %3)                \n\t"
385
         "movq %%mm5, 8(%2, %3)                \n\t"
386
         "addl %%eax, %1                \n\t"
387
         "addl %%eax, %2                       \n\t"
388
         "subl $4, %0                        \n\t"
389
         "jnz 1b                        \n\t"
390
         : "+g"(h), "+r" (pixels),  "+r" (block)
391
         : "r"(line_size)
392
         : "%eax", "memory"
393
        );
394
}
395

    
396
static void clear_blocks_mmx(DCTELEM *blocks)
397
{
398
    __asm __volatile(
399
                "pxor %%mm7, %%mm7                \n\t"
400
                "movl $-128*6, %%eax                \n\t"
401
                "1:                                \n\t"
402
                "movq %%mm7, (%0, %%eax)        \n\t"
403
                "movq %%mm7, 8(%0, %%eax)        \n\t"
404
                "movq %%mm7, 16(%0, %%eax)        \n\t"
405
                "movq %%mm7, 24(%0, %%eax)        \n\t"
406
                "addl $32, %%eax                \n\t"
407
                " js 1b                                \n\t"
408
                : : "r" (((int)blocks)+128*6)
409
                : "%eax"
410
        );
411
}
412

    
413
#ifdef CONFIG_ENCODERS
414
static int pix_sum16_mmx(uint8_t * pix, int line_size){
415
    const int h=16;
416
    int sum;
417
    int index= -line_size*h;
418

    
419
    __asm __volatile(
420
                "pxor %%mm7, %%mm7                \n\t"
421
                "pxor %%mm6, %%mm6                \n\t"
422
                "1:                                \n\t"
423
                "movq (%2, %1), %%mm0                \n\t"
424
                "movq (%2, %1), %%mm1                \n\t"
425
                "movq 8(%2, %1), %%mm2                \n\t"
426
                "movq 8(%2, %1), %%mm3                \n\t"
427
                "punpcklbw %%mm7, %%mm0                \n\t"
428
                "punpckhbw %%mm7, %%mm1                \n\t"
429
                "punpcklbw %%mm7, %%mm2                \n\t"
430
                "punpckhbw %%mm7, %%mm3                \n\t"
431
                "paddw %%mm0, %%mm1                \n\t"
432
                "paddw %%mm2, %%mm3                \n\t"
433
                "paddw %%mm1, %%mm3                \n\t"
434
                "paddw %%mm3, %%mm6                \n\t"
435
                "addl %3, %1                        \n\t"
436
                " js 1b                                \n\t"
437
                "movq %%mm6, %%mm5                \n\t"
438
                "psrlq $32, %%mm6                \n\t"
439
                "paddw %%mm5, %%mm6                \n\t"
440
                "movq %%mm6, %%mm5                \n\t"
441
                "psrlq $16, %%mm6                \n\t"
442
                "paddw %%mm5, %%mm6                \n\t"
443
                "movd %%mm6, %0                        \n\t"
444
                "andl $0xFFFF, %0                \n\t"
445
                : "=&r" (sum), "+r" (index)
446
                : "r" (pix - index), "r" (line_size)
447
        );
448

    
449
        return sum;
450
}
451
#endif //CONFIG_ENCODERS
452

    
453
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
454
    int i=0;
455
    asm volatile(
456
        "1:                                \n\t"
457
        "movq  (%1, %0), %%mm0                \n\t"
458
        "movq  (%2, %0), %%mm1                \n\t"
459
        "paddb %%mm0, %%mm1                \n\t"
460
        "movq %%mm1, (%2, %0)                \n\t"
461
        "movq 8(%1, %0), %%mm0                \n\t"
462
        "movq 8(%2, %0), %%mm1                \n\t"
463
        "paddb %%mm0, %%mm1                \n\t"
464
        "movq %%mm1, 8(%2, %0)                \n\t"
465
        "addl $16, %0                        \n\t"
466
        "cmpl %3, %0                        \n\t"
467
        " jb 1b                                \n\t"
468
        : "+r" (i)
469
        : "r"(src), "r"(dst), "r"(w-15)
470
    );
471
    for(; i<w; i++)
472
        dst[i+0] += src[i+0];
473
}
474

    
475
#define H263_LOOP_FILTER \
476
        "pxor %%mm7, %%mm7                \n\t"\
477
        "movq  %0, %%mm0                \n\t"\
478
        "movq  %0, %%mm1                \n\t"\
479
        "movq  %3, %%mm2                \n\t"\
480
        "movq  %3, %%mm3                \n\t"\
481
        "punpcklbw %%mm7, %%mm0                \n\t"\
482
        "punpckhbw %%mm7, %%mm1                \n\t"\
483
        "punpcklbw %%mm7, %%mm2                \n\t"\
484
        "punpckhbw %%mm7, %%mm3                \n\t"\
485
        "psubw %%mm2, %%mm0                \n\t"\
486
        "psubw %%mm3, %%mm1                \n\t"\
487
        "movq  %1, %%mm2                \n\t"\
488
        "movq  %1, %%mm3                \n\t"\
489
        "movq  %2, %%mm4                \n\t"\
490
        "movq  %2, %%mm5                \n\t"\
491
        "punpcklbw %%mm7, %%mm2                \n\t"\
492
        "punpckhbw %%mm7, %%mm3                \n\t"\
493
        "punpcklbw %%mm7, %%mm4                \n\t"\
494
        "punpckhbw %%mm7, %%mm5                \n\t"\
495
        "psubw %%mm2, %%mm4                \n\t"\
496
        "psubw %%mm3, %%mm5                \n\t"\
497
        "psllw $2, %%mm4                \n\t"\
498
        "psllw $2, %%mm5                \n\t"\
499
        "paddw %%mm0, %%mm4                \n\t"\
500
        "paddw %%mm1, %%mm5                \n\t"\
501
        "pxor %%mm6, %%mm6                \n\t"\
502
        "pcmpgtw %%mm4, %%mm6                \n\t"\
503
        "pcmpgtw %%mm5, %%mm7                \n\t"\
504
        "pxor %%mm6, %%mm4                \n\t"\
505
        "pxor %%mm7, %%mm5                \n\t"\
506
        "psubw %%mm6, %%mm4                \n\t"\
507
        "psubw %%mm7, %%mm5                \n\t"\
508
        "psrlw $3, %%mm4                \n\t"\
509
        "psrlw $3, %%mm5                \n\t"\
510
        "packuswb %%mm5, %%mm4                \n\t"\
511
        "packsswb %%mm7, %%mm6                \n\t"\
512
        "pxor %%mm7, %%mm7                \n\t"\
513
        "movd %4, %%mm2                        \n\t"\
514
        "punpcklbw %%mm2, %%mm2                \n\t"\
515
        "punpcklbw %%mm2, %%mm2                \n\t"\
516
        "punpcklbw %%mm2, %%mm2                \n\t"\
517
        "psubusb %%mm4, %%mm2                \n\t"\
518
        "movq %%mm2, %%mm3                \n\t"\
519
        "psubusb %%mm4, %%mm3                \n\t"\
520
        "psubb %%mm3, %%mm2                \n\t"\
521
        "movq %1, %%mm3                        \n\t"\
522
        "movq %2, %%mm4                        \n\t"\
523
        "pxor %%mm6, %%mm3                \n\t"\
524
        "pxor %%mm6, %%mm4                \n\t"\
525
        "paddusb %%mm2, %%mm3                \n\t"\
526
        "psubusb %%mm2, %%mm4                \n\t"\
527
        "pxor %%mm6, %%mm3                \n\t"\
528
        "pxor %%mm6, %%mm4                \n\t"\
529
        "paddusb %%mm2, %%mm2                \n\t"\
530
        "packsswb %%mm1, %%mm0                \n\t"\
531
        "pcmpgtb %%mm0, %%mm7                \n\t"\
532
        "pxor %%mm7, %%mm0                \n\t"\
533
        "psubb %%mm7, %%mm0                \n\t"\
534
        "movq %%mm0, %%mm1                \n\t"\
535
        "psubusb %%mm2, %%mm0                \n\t"\
536
        "psubb %%mm0, %%mm1                \n\t"\
537
        "pand %5, %%mm1                        \n\t"\
538
        "psrlw $2, %%mm1                \n\t"\
539
        "pxor %%mm7, %%mm1                \n\t"\
540
        "psubb %%mm7, %%mm1                \n\t"\
541
        "movq %0, %%mm5                        \n\t"\
542
        "movq %3, %%mm6                        \n\t"\
543
        "psubb %%mm1, %%mm5                \n\t"\
544
        "paddb %%mm1, %%mm6                \n\t"
545

    
546
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
547
    const int strength= ff_h263_loop_filter_strength[qscale];
548

    
549
    asm volatile(
550
    
551
        H263_LOOP_FILTER
552
        
553
        "movq %%mm3, %1                        \n\t"
554
        "movq %%mm4, %2                        \n\t"
555
        "movq %%mm5, %0                        \n\t"
556
        "movq %%mm6, %3                        \n\t"
557
        : "+m" (*(uint64_t*)(src - 2*stride)),
558
          "+m" (*(uint64_t*)(src - 1*stride)),
559
          "+m" (*(uint64_t*)(src + 0*stride)),
560
          "+m" (*(uint64_t*)(src + 1*stride))
561
        : "g" (2*strength), "m"(ff_pb_FC)
562
    );
563
}
564

    
565
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
566
    asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
567
        "movd  %4, %%mm0                \n\t"
568
        "movd  %5, %%mm1                \n\t"
569
        "movd  %6, %%mm2                \n\t"
570
        "movd  %7, %%mm3                \n\t"
571
        "punpcklbw %%mm1, %%mm0                \n\t"
572
        "punpcklbw %%mm3, %%mm2                \n\t"
573
        "movq %%mm0, %%mm1                \n\t"
574
        "punpcklwd %%mm2, %%mm0                \n\t"
575
        "punpckhwd %%mm2, %%mm1                \n\t"
576
        "movd  %%mm0, %0                \n\t"
577
        "punpckhdq %%mm0, %%mm0                \n\t"
578
        "movd  %%mm0, %1                \n\t"
579
        "movd  %%mm1, %2                \n\t"
580
        "punpckhdq %%mm1, %%mm1                \n\t"
581
        "movd  %%mm1, %3                \n\t"
582
        
583
        : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
584
          "=m" (*(uint32_t*)(dst + 1*dst_stride)),
585
          "=m" (*(uint32_t*)(dst + 2*dst_stride)),
586
          "=m" (*(uint32_t*)(dst + 3*dst_stride))
587
        :  "m" (*(uint32_t*)(src + 0*src_stride)),
588
           "m" (*(uint32_t*)(src + 1*src_stride)),
589
           "m" (*(uint32_t*)(src + 2*src_stride)),
590
           "m" (*(uint32_t*)(src + 3*src_stride))
591
    );
592
}
593

    
594
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
595
    const int strength= ff_h263_loop_filter_strength[qscale];
596
    uint64_t temp[4] __attribute__ ((aligned(8)));
597
    uint8_t *btemp= (uint8_t*)temp;
598
    
599
    src -= 2;
600

    
601
    transpose4x4(btemp  , src           , 8, stride);
602
    transpose4x4(btemp+4, src + 4*stride, 8, stride);
603
    asm volatile(
604
        H263_LOOP_FILTER // 5 3 4 6
605
        
606
        : "+m" (temp[0]),
607
          "+m" (temp[1]),
608
          "+m" (temp[2]),
609
          "+m" (temp[3])
610
        : "g" (2*strength), "m"(ff_pb_FC)
611
    );
612

    
613
    asm volatile(
614
        "movq %%mm5, %%mm1                \n\t"
615
        "movq %%mm4, %%mm0                \n\t"
616
        "punpcklbw %%mm3, %%mm5                \n\t"
617
        "punpcklbw %%mm6, %%mm4                \n\t"
618
        "punpckhbw %%mm3, %%mm1                \n\t"
619
        "punpckhbw %%mm6, %%mm0                \n\t"
620
        "movq %%mm5, %%mm3                \n\t"
621
        "movq %%mm1, %%mm6                \n\t"
622
        "punpcklwd %%mm4, %%mm5                \n\t"
623
        "punpcklwd %%mm0, %%mm1                \n\t"
624
        "punpckhwd %%mm4, %%mm3                \n\t"
625
        "punpckhwd %%mm0, %%mm6                \n\t"
626
        "movd %%mm5, %0                        \n\t"
627
        "punpckhdq %%mm5, %%mm5                \n\t"
628
        "movd %%mm5, %1                        \n\t"
629
        "movd %%mm3, %2                        \n\t"
630
        "punpckhdq %%mm3, %%mm3                \n\t"
631
        "movd %%mm3, %3                        \n\t"
632
        "movd %%mm1, %4                        \n\t"
633
        "punpckhdq %%mm1, %%mm1                \n\t"
634
        "movd %%mm1, %5                        \n\t"
635
        "movd %%mm6, %6                        \n\t"
636
        "punpckhdq %%mm6, %%mm6                \n\t"
637
        "movd %%mm6, %7                        \n\t"
638
        : "=m" (*(uint32_t*)(src + 0*stride)),
639
          "=m" (*(uint32_t*)(src + 1*stride)),
640
          "=m" (*(uint32_t*)(src + 2*stride)),
641
          "=m" (*(uint32_t*)(src + 3*stride)),
642
          "=m" (*(uint32_t*)(src + 4*stride)),
643
          "=m" (*(uint32_t*)(src + 5*stride)),
644
          "=m" (*(uint32_t*)(src + 6*stride)),
645
          "=m" (*(uint32_t*)(src + 7*stride))
646
    );
647
}
648

    
649
#ifdef CONFIG_ENCODERS
650
static int pix_norm1_mmx(uint8_t *pix, int line_size) {
651
    int tmp;
652
  asm volatile (
653
      "movl $16,%%ecx\n"
654
      "pxor %%mm0,%%mm0\n"
655
      "pxor %%mm7,%%mm7\n"
656
      "1:\n"
657
      "movq (%0),%%mm2\n"        /* mm2 = pix[0-7] */
658
      "movq 8(%0),%%mm3\n"        /* mm3 = pix[8-15] */
659

    
660
      "movq %%mm2,%%mm1\n"        /* mm1 = mm2 = pix[0-7] */
661

    
662
      "punpckhbw %%mm0,%%mm1\n"        /* mm1 = [pix4-7] */
663
      "punpcklbw %%mm0,%%mm2\n"        /* mm2 = [pix0-3] */
664

    
665
      "movq %%mm3,%%mm4\n"        /* mm4 = mm3 = pix[8-15] */
666
      "punpckhbw %%mm0,%%mm3\n"        /* mm3 = [pix12-15] */
667
      "punpcklbw %%mm0,%%mm4\n"        /* mm4 = [pix8-11] */
668

    
669
      "pmaddwd %%mm1,%%mm1\n"        /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
670
      "pmaddwd %%mm2,%%mm2\n"        /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
671

    
672
      "pmaddwd %%mm3,%%mm3\n"
673
      "pmaddwd %%mm4,%%mm4\n"
674

    
675
      "paddd %%mm1,%%mm2\n"        /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
676
                                          pix2^2+pix3^2+pix6^2+pix7^2) */
677
      "paddd %%mm3,%%mm4\n"
678
      "paddd %%mm2,%%mm7\n"
679

    
680
      "addl %2, %0\n"
681
      "paddd %%mm4,%%mm7\n"
682
      "dec %%ecx\n"
683
      "jnz 1b\n"
684

    
685
      "movq %%mm7,%%mm1\n"
686
      "psrlq $32, %%mm7\n"        /* shift hi dword to lo */
687
      "paddd %%mm7,%%mm1\n"
688
      "movd %%mm1,%1\n"
689
      : "+r" (pix), "=r"(tmp) : "r" (line_size) : "%ecx" );
690
    return tmp;
691
}
692

    
693
static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
694
    int tmp;
695
  asm volatile (
696
      "movl %4,%%ecx\n"
697
      "pxor %%mm0,%%mm0\n"        /* mm0 = 0 */
698
      "pxor %%mm7,%%mm7\n"        /* mm7 holds the sum */
699
      "1:\n"
700
      "movq (%0),%%mm1\n"        /* mm1 = pix1[0-7] */
701
      "movq (%1),%%mm2\n"        /* mm2 = pix2[0-7] */
702
      "movq 8(%0),%%mm3\n"        /* mm3 = pix1[8-15] */
703
      "movq 8(%1),%%mm4\n"        /* mm4 = pix2[8-15] */
704

    
705
      /* todo: mm1-mm2, mm3-mm4 */
706
      /* algo: substract mm1 from mm2 with saturation and vice versa */
707
      /*       OR the results to get absolute difference */
708
      "movq %%mm1,%%mm5\n"
709
      "movq %%mm3,%%mm6\n"
710
      "psubusb %%mm2,%%mm1\n"
711
      "psubusb %%mm4,%%mm3\n"
712
      "psubusb %%mm5,%%mm2\n"
713
      "psubusb %%mm6,%%mm4\n"
714

    
715
      "por %%mm1,%%mm2\n"
716
      "por %%mm3,%%mm4\n"
717

    
718
      /* now convert to 16-bit vectors so we can square them */
719
      "movq %%mm2,%%mm1\n"
720
      "movq %%mm4,%%mm3\n"
721

    
722
      "punpckhbw %%mm0,%%mm2\n"
723
      "punpckhbw %%mm0,%%mm4\n"
724
      "punpcklbw %%mm0,%%mm1\n"        /* mm1 now spread over (mm1,mm2) */
725
      "punpcklbw %%mm0,%%mm3\n"        /* mm4 now spread over (mm3,mm4) */
726

    
727
      "pmaddwd %%mm2,%%mm2\n"
728
      "pmaddwd %%mm4,%%mm4\n"
729
      "pmaddwd %%mm1,%%mm1\n"
730
      "pmaddwd %%mm3,%%mm3\n"
731

    
732
      "addl %3,%0\n"
733
      "addl %3,%1\n"
734

    
735
      "paddd %%mm2,%%mm1\n"
736
      "paddd %%mm4,%%mm3\n"
737
      "paddd %%mm1,%%mm7\n"
738
      "paddd %%mm3,%%mm7\n"
739

    
740
      "decl %%ecx\n"
741
      "jnz 1b\n"
742

    
743
      "movq %%mm7,%%mm1\n"
744
      "psrlq $32, %%mm7\n"        /* shift hi dword to lo */
745
      "paddd %%mm7,%%mm1\n"
746
      "movd %%mm1,%2\n"
747
      : "+r" (pix1), "+r" (pix2), "=r"(tmp) 
748
      : "r" (line_size) , "m" (h)
749
      : "%ecx");
750
    return tmp;
751
}
752

    
753
static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
754
    int tmp;
755
    
756
    assert( (((int)pix) & 7) == 0);
757
    assert((line_size &7) ==0);
758
    
759
#define SUM(in0, in1, out0, out1) \
760
      "movq (%0), %%mm2\n"\
761
      "movq 8(%0), %%mm3\n"\
762
      "addl %2,%0\n"\
763
      "movq %%mm2, " #out0 "\n"\
764
      "movq %%mm3, " #out1 "\n"\
765
      "psubusb " #in0 ", %%mm2\n"\
766
      "psubusb " #in1 ", %%mm3\n"\
767
      "psubusb " #out0 ", " #in0 "\n"\
768
      "psubusb " #out1 ", " #in1 "\n"\
769
      "por %%mm2, " #in0 "\n"\
770
      "por %%mm3, " #in1 "\n"\
771
      "movq " #in0 ", %%mm2\n"\
772
      "movq " #in1 ", %%mm3\n"\
773
      "punpcklbw %%mm7, " #in0 "\n"\
774
      "punpcklbw %%mm7, " #in1 "\n"\
775
      "punpckhbw %%mm7, %%mm2\n"\
776
      "punpckhbw %%mm7, %%mm3\n"\
777
      "paddw " #in1 ", " #in0 "\n"\
778
      "paddw %%mm3, %%mm2\n"\
779
      "paddw %%mm2, " #in0 "\n"\
780
      "paddw " #in0 ", %%mm6\n"
781

    
782
    
783
  asm volatile (
784
      "movl %3,%%ecx\n"
785
      "pxor %%mm6,%%mm6\n"
786
      "pxor %%mm7,%%mm7\n"
787
      "movq (%0),%%mm0\n"
788
      "movq 8(%0),%%mm1\n"
789
      "addl %2,%0\n"
790
      "subl $2, %%ecx\n"
791
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
792
      "1:\n"
793
      
794
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
795
      
796
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
797
      
798
      "subl $2, %%ecx\n"
799
      "jnz 1b\n"
800

    
801
      "movq %%mm6,%%mm0\n"
802
      "psrlq $32, %%mm6\n"
803
      "paddw %%mm6,%%mm0\n"
804
      "movq %%mm0,%%mm6\n"
805
      "psrlq $16, %%mm0\n"
806
      "paddw %%mm6,%%mm0\n"
807
      "movd %%mm0,%1\n"
808
      : "+r" (pix), "=r"(tmp) 
809
      : "r" (line_size) , "m" (h)
810
      : "%ecx");
811
    return tmp & 0xFFFF;
812
}
813
#undef SUM
814

    
815
static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
816
    int tmp;
817
    
818
    assert( (((int)pix) & 7) == 0);
819
    assert((line_size &7) ==0);
820
    
821
#define SUM(in0, in1, out0, out1) \
822
      "movq (%0), " #out0 "\n"\
823
      "movq 8(%0), " #out1 "\n"\
824
      "addl %2,%0\n"\
825
      "psadbw " #out0 ", " #in0 "\n"\
826
      "psadbw " #out1 ", " #in1 "\n"\
827
      "paddw " #in1 ", " #in0 "\n"\
828
      "paddw " #in0 ", %%mm6\n"
829

    
830
  asm volatile (
831
      "movl %3,%%ecx\n"
832
      "pxor %%mm6,%%mm6\n"
833
      "pxor %%mm7,%%mm7\n"
834
      "movq (%0),%%mm0\n"
835
      "movq 8(%0),%%mm1\n"
836
      "addl %2,%0\n"
837
      "subl $2, %%ecx\n"
838
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
839
      "1:\n"
840
      
841
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
842
      
843
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
844
      
845
      "subl $2, %%ecx\n"
846
      "jnz 1b\n"
847

    
848
      "movd %%mm6,%1\n"
849
      : "+r" (pix), "=r"(tmp) 
850
      : "r" (line_size) , "m" (h)
851
      : "%ecx");
852
    return tmp;
853
}
854
#undef SUM
855

    
856
static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
857
    int tmp;
858
    
859
    assert( (((int)pix1) & 7) == 0);
860
    assert( (((int)pix2) & 7) == 0);
861
    assert((line_size &7) ==0);
862
    
863
#define SUM(in0, in1, out0, out1) \
864
      "movq (%0),%%mm2\n"\
865
      "movq (%1)," #out0 "\n"\
866
      "movq 8(%0),%%mm3\n"\
867
      "movq 8(%1)," #out1 "\n"\
868
      "addl %3,%0\n"\
869
      "addl %3,%1\n"\
870
      "psubb " #out0 ", %%mm2\n"\
871
      "psubb " #out1 ", %%mm3\n"\
872
      "pxor %%mm7, %%mm2\n"\
873
      "pxor %%mm7, %%mm3\n"\
874
      "movq %%mm2, " #out0 "\n"\
875
      "movq %%mm3, " #out1 "\n"\
876
      "psubusb " #in0 ", %%mm2\n"\
877
      "psubusb " #in1 ", %%mm3\n"\
878
      "psubusb " #out0 ", " #in0 "\n"\
879
      "psubusb " #out1 ", " #in1 "\n"\
880
      "por %%mm2, " #in0 "\n"\
881
      "por %%mm3, " #in1 "\n"\
882
      "movq " #in0 ", %%mm2\n"\
883
      "movq " #in1 ", %%mm3\n"\
884
      "punpcklbw %%mm7, " #in0 "\n"\
885
      "punpcklbw %%mm7, " #in1 "\n"\
886
      "punpckhbw %%mm7, %%mm2\n"\
887
      "punpckhbw %%mm7, %%mm3\n"\
888
      "paddw " #in1 ", " #in0 "\n"\
889
      "paddw %%mm3, %%mm2\n"\
890
      "paddw %%mm2, " #in0 "\n"\
891
      "paddw " #in0 ", %%mm6\n"
892

    
893
    
894
  asm volatile (
895
      "movl %4,%%ecx\n"
896
      "pxor %%mm6,%%mm6\n"
897
      "pcmpeqw %%mm7,%%mm7\n"
898
      "psllw $15, %%mm7\n"
899
      "packsswb %%mm7, %%mm7\n"
900
      "movq (%0),%%mm0\n"
901
      "movq (%1),%%mm2\n"
902
      "movq 8(%0),%%mm1\n"
903
      "movq 8(%1),%%mm3\n"
904
      "addl %3,%0\n"
905
      "addl %3,%1\n"
906
      "subl $2, %%ecx\n"
907
      "psubb %%mm2, %%mm0\n"
908
      "psubb %%mm3, %%mm1\n"
909
      "pxor %%mm7, %%mm0\n"
910
      "pxor %%mm7, %%mm1\n"
911
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
912
      "1:\n"
913
      
914
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
915
      
916
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
917
      
918
      "subl $2, %%ecx\n"
919
      "jnz 1b\n"
920

    
921
      "movq %%mm6,%%mm0\n"
922
      "psrlq $32, %%mm6\n"
923
      "paddw %%mm6,%%mm0\n"
924
      "movq %%mm0,%%mm6\n"
925
      "psrlq $16, %%mm0\n"
926
      "paddw %%mm6,%%mm0\n"
927
      "movd %%mm0,%2\n"
928
      : "+r" (pix1), "+r" (pix2), "=r"(tmp) 
929
      : "r" (line_size) , "m" (h)
930
      : "%ecx");
931
    return tmp & 0x7FFF;
932
}
933
#undef SUM
934

    
935
static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
936
    int tmp;
937
    
938
    assert( (((int)pix1) & 7) == 0);
939
    assert( (((int)pix2) & 7) == 0);
940
    assert((line_size &7) ==0);
941
    
942
#define SUM(in0, in1, out0, out1) \
943
      "movq (%0)," #out0 "\n"\
944
      "movq (%1),%%mm2\n"\
945
      "movq 8(%0)," #out1 "\n"\
946
      "movq 8(%1),%%mm3\n"\
947
      "addl %3,%0\n"\
948
      "addl %3,%1\n"\
949
      "psubb %%mm2, " #out0 "\n"\
950
      "psubb %%mm3, " #out1 "\n"\
951
      "pxor %%mm7, " #out0 "\n"\
952
      "pxor %%mm7, " #out1 "\n"\
953
      "psadbw " #out0 ", " #in0 "\n"\
954
      "psadbw " #out1 ", " #in1 "\n"\
955
      "paddw " #in1 ", " #in0 "\n"\
956
      "paddw " #in0 ", %%mm6\n"
957

    
958
  asm volatile (
959
      "movl %4,%%ecx\n"
960
      "pxor %%mm6,%%mm6\n"
961
      "pcmpeqw %%mm7,%%mm7\n"
962
      "psllw $15, %%mm7\n"
963
      "packsswb %%mm7, %%mm7\n"
964
      "movq (%0),%%mm0\n"
965
      "movq (%1),%%mm2\n"
966
      "movq 8(%0),%%mm1\n"
967
      "movq 8(%1),%%mm3\n"
968
      "addl %3,%0\n"
969
      "addl %3,%1\n"
970
      "subl $2, %%ecx\n"
971
      "psubb %%mm2, %%mm0\n"
972
      "psubb %%mm3, %%mm1\n"
973
      "pxor %%mm7, %%mm0\n"
974
      "pxor %%mm7, %%mm1\n"
975
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
976
      "1:\n"
977
      
978
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
979
      
980
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
981
      
982
      "subl $2, %%ecx\n"
983
      "jnz 1b\n"
984

    
985
      "movd %%mm6,%2\n"
986
      : "+r" (pix1), "+r" (pix2), "=r"(tmp) 
987
      : "r" (line_size) , "m" (h)
988
      : "%ecx");
989
    return tmp;
990
}
991
#undef SUM
992

    
993
static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
994
    int i=0;
995
    asm volatile(
996
        "1:                                \n\t"
997
        "movq  (%2, %0), %%mm0                \n\t"
998
        "movq  (%1, %0), %%mm1                \n\t"
999
        "psubb %%mm0, %%mm1                \n\t"
1000
        "movq %%mm1, (%3, %0)                \n\t"
1001
        "movq 8(%2, %0), %%mm0                \n\t"
1002
        "movq 8(%1, %0), %%mm1                \n\t"
1003
        "psubb %%mm0, %%mm1                \n\t"
1004
        "movq %%mm1, 8(%3, %0)                \n\t"
1005
        "addl $16, %0                        \n\t"
1006
        "cmpl %4, %0                        \n\t"
1007
        " jb 1b                                \n\t"
1008
        : "+r" (i)
1009
        : "r"(src1), "r"(src2), "r"(dst), "r"(w-15)
1010
    );
1011
    for(; i<w; i++)
1012
        dst[i+0] = src1[i+0]-src2[i+0];
1013
}
1014

    
1015
static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
1016
    int i=0;
1017
    uint8_t l, lt;
1018

    
1019
    asm volatile(
1020
        "1:                                \n\t"
1021
        "movq  -1(%1, %0), %%mm0        \n\t" // LT
1022
        "movq  (%1, %0), %%mm1                \n\t" // T
1023
        "movq  -1(%2, %0), %%mm2        \n\t" // L
1024
        "movq  (%2, %0), %%mm3                \n\t" // X
1025
        "movq %%mm2, %%mm4                \n\t" // L
1026
        "psubb %%mm0, %%mm2                \n\t"
1027
        "paddb %%mm1, %%mm2                \n\t" // L + T - LT
1028
        "movq %%mm4, %%mm5                \n\t" // L
1029
        "pmaxub %%mm1, %%mm4                \n\t" // max(T, L)
1030
        "pminub %%mm5, %%mm1                \n\t" // min(T, L)
1031
        "pminub %%mm2, %%mm4                \n\t" 
1032
        "pmaxub %%mm1, %%mm4                \n\t"
1033
        "psubb %%mm4, %%mm3                \n\t" // dst - pred
1034
        "movq %%mm3, (%3, %0)                \n\t"
1035
        "addl $8, %0                        \n\t"
1036
        "cmpl %4, %0                        \n\t"
1037
        " jb 1b                                \n\t"
1038
        : "+r" (i)
1039
        : "r"(src1), "r"(src2), "r"(dst), "r"(w)
1040
    );
1041

    
1042
    l= *left;
1043
    lt= *left_top;
1044
    
1045
    dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
1046
    
1047
    *left_top= src1[w-1];
1048
    *left    = src2[w-1];
1049
}
1050

    
1051
#define LBUTTERFLY2(a1,b1,a2,b2)\
1052
    "paddw " #b1 ", " #a1 "                \n\t"\
1053
    "paddw " #b2 ", " #a2 "                \n\t"\
1054
    "paddw " #b1 ", " #b1 "                \n\t"\
1055
    "paddw " #b2 ", " #b2 "                \n\t"\
1056
    "psubw " #a1 ", " #b1 "                \n\t"\
1057
    "psubw " #a2 ", " #b2 "                \n\t"
1058

    
1059
#define HADAMARD48\
1060
        LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
1061
        LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
1062
        LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
1063
        LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
1064
        LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
1065
        LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
1066

    
1067
#define MMABS(a,z)\
1068
    "pxor " #z ", " #z "                \n\t"\
1069
    "pcmpgtw " #a ", " #z "                \n\t"\
1070
    "pxor " #z ", " #a "                \n\t"\
1071
    "psubw " #z ", " #a "                \n\t"
1072

    
1073
#define MMABS_SUM(a,z, sum)\
1074
    "pxor " #z ", " #z "                \n\t"\
1075
    "pcmpgtw " #a ", " #z "                \n\t"\
1076
    "pxor " #z ", " #a "                \n\t"\
1077
    "psubw " #z ", " #a "                \n\t"\
1078
    "paddusw " #a ", " #sum "                \n\t"
1079

    
1080
#define MMABS_MMX2(a,z)\
1081
    "pxor " #z ", " #z "                \n\t"\
1082
    "psubw " #a ", " #z "                \n\t"\
1083
    "pmaxsw " #z ", " #a "                \n\t"
1084

    
1085
#define MMABS_SUM_MMX2(a,z, sum)\
1086
    "pxor " #z ", " #z "                \n\t"\
1087
    "psubw " #a ", " #z "                \n\t"\
1088
    "pmaxsw " #z ", " #a "                \n\t"\
1089
    "paddusw " #a ", " #sum "                \n\t"
1090
        
1091
#define SBUTTERFLY(a,b,t,n)\
1092
    "movq " #a ", " #t "                \n\t" /* abcd */\
1093
    "punpckl" #n " " #b ", " #a "        \n\t" /* aebf */\
1094
    "punpckh" #n " " #b ", " #t "        \n\t" /* cgdh */\
1095

    
1096
#define TRANSPOSE4(a,b,c,d,t)\
1097
    SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
1098
    SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
1099
    SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
1100
    SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
1101

    
1102
#define LOAD4(o, a, b, c, d)\
1103
        "movq "#o"(%1), " #a "                \n\t"\
1104
        "movq "#o"+16(%1), " #b "        \n\t"\
1105
        "movq "#o"+32(%1), " #c "        \n\t"\
1106
        "movq "#o"+48(%1), " #d "        \n\t"
1107

    
1108
#define STORE4(o, a, b, c, d)\
1109
        "movq "#a", "#o"(%1)                \n\t"\
1110
        "movq "#b", "#o"+16(%1)                \n\t"\
1111
        "movq "#c", "#o"+32(%1)                \n\t"\
1112
        "movq "#d", "#o"+48(%1)                \n\t"\
1113

    
1114
static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1115
    uint64_t temp[16] __align8;
1116
    int sum=0;
1117
    
1118
    assert(h==8);
1119

    
1120
    diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1121

    
1122
    asm volatile(
1123
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1124
        LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1125
        
1126
        HADAMARD48
1127
        
1128
        "movq %%mm7, 112(%1)                \n\t"
1129
        
1130
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1131
        STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1132
        
1133
        "movq 112(%1), %%mm7                 \n\t"
1134
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1135
        STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1136

    
1137
        LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1138
        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1139
        
1140
        HADAMARD48
1141
        
1142
        "movq %%mm7, 120(%1)                \n\t"
1143
        
1144
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1145
        STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1146
        
1147
        "movq 120(%1), %%mm7                 \n\t"
1148
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1149
        "movq %%mm7, %%mm5                \n\t"//FIXME remove
1150
        "movq %%mm6, %%mm7                \n\t"
1151
        "movq %%mm0, %%mm6                \n\t"
1152
//        STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1153
        
1154
        LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1155
//        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1156
        
1157
        HADAMARD48
1158
        "movq %%mm7, 64(%1)                \n\t"
1159
        MMABS(%%mm0, %%mm7)
1160
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1161
        MMABS_SUM(%%mm2, %%mm7, %%mm0)
1162
        MMABS_SUM(%%mm3, %%mm7, %%mm0)
1163
        MMABS_SUM(%%mm4, %%mm7, %%mm0)
1164
        MMABS_SUM(%%mm5, %%mm7, %%mm0)
1165
        MMABS_SUM(%%mm6, %%mm7, %%mm0)
1166
        "movq 64(%1), %%mm1                \n\t"
1167
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1168
        "movq %%mm0, 64(%1)                \n\t"
1169
        
1170
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1171
        LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1172
        
1173
        HADAMARD48
1174
        "movq %%mm7, (%1)                \n\t"
1175
        MMABS(%%mm0, %%mm7)
1176
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1177
        MMABS_SUM(%%mm2, %%mm7, %%mm0)
1178
        MMABS_SUM(%%mm3, %%mm7, %%mm0)
1179
        MMABS_SUM(%%mm4, %%mm7, %%mm0)
1180
        MMABS_SUM(%%mm5, %%mm7, %%mm0)
1181
        MMABS_SUM(%%mm6, %%mm7, %%mm0)
1182
        "movq (%1), %%mm1                \n\t"
1183
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1184
        "movq 64(%1), %%mm1                \n\t"
1185
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1186
        
1187
        "movq %%mm0, %%mm1                \n\t"
1188
        "psrlq $32, %%mm0                \n\t"
1189
        "paddusw %%mm1, %%mm0                \n\t"
1190
        "movq %%mm0, %%mm1                \n\t"
1191
        "psrlq $16, %%mm0                \n\t"
1192
        "paddusw %%mm1, %%mm0                \n\t"
1193
        "movd %%mm0, %0                        \n\t"
1194
                
1195
        : "=r" (sum)
1196
        : "r"(temp)
1197
    );
1198
    return sum&0xFFFF;
1199
}
1200

    
1201
static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1202
    uint64_t temp[16] __align8;
1203
    int sum=0;
1204
    
1205
    assert(h==8);
1206

    
1207
    diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1208

    
1209
    asm volatile(
1210
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1211
        LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1212
        
1213
        HADAMARD48
1214
        
1215
        "movq %%mm7, 112(%1)                \n\t"
1216
        
1217
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1218
        STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1219
        
1220
        "movq 112(%1), %%mm7                 \n\t"
1221
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1222
        STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1223

    
1224
        LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1225
        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1226
        
1227
        HADAMARD48
1228
        
1229
        "movq %%mm7, 120(%1)                \n\t"
1230
        
1231
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1232
        STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1233
        
1234
        "movq 120(%1), %%mm7                 \n\t"
1235
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1236
        "movq %%mm7, %%mm5                \n\t"//FIXME remove
1237
        "movq %%mm6, %%mm7                \n\t"
1238
        "movq %%mm0, %%mm6                \n\t"
1239
//        STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1240
        
1241
        LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1242
//        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1243
        
1244
        HADAMARD48
1245
        "movq %%mm7, 64(%1)                \n\t"
1246
        MMABS_MMX2(%%mm0, %%mm7)
1247
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1248
        MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1249
        MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1250
        MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1251
        MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1252
        MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1253
        "movq 64(%1), %%mm1                \n\t"
1254
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1255
        "movq %%mm0, 64(%1)                \n\t"
1256
        
1257
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1258
        LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1259
        
1260
        HADAMARD48
1261
        "movq %%mm7, (%1)                \n\t"
1262
        MMABS_MMX2(%%mm0, %%mm7)
1263
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1264
        MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1265
        MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1266
        MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1267
        MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1268
        MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1269
        "movq (%1), %%mm1                \n\t"
1270
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1271
        "movq 64(%1), %%mm1                \n\t"
1272
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1273
        
1274
        "movq %%mm0, %%mm1                \n\t"
1275
        "psrlq $32, %%mm0                \n\t"
1276
        "paddusw %%mm1, %%mm0                \n\t"
1277
        "movq %%mm0, %%mm1                \n\t"
1278
        "psrlq $16, %%mm0                \n\t"
1279
        "paddusw %%mm1, %%mm0                \n\t"
1280
        "movd %%mm0, %0                        \n\t"
1281
                
1282
        : "=r" (sum)
1283
        : "r"(temp)
1284
    );
1285
    return sum&0xFFFF;
1286
}
1287

    
1288

    
1289
WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
1290
WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
1291
#endif //CONFIG_ENCODERS
1292

    
1293
#define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
1294
#define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
1295

    
1296
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
1297
        "paddw " #m4 ", " #m3 "                \n\t" /* x1 */\
1298
        "movq "MANGLE(ff_pw_20)", %%mm4                \n\t" /* 20 */\
1299
        "pmullw " #m3 ", %%mm4                \n\t" /* 20x1 */\
1300
        "movq "#in7", " #m3 "                \n\t" /* d */\
1301
        "movq "#in0", %%mm5                \n\t" /* D */\
1302
        "paddw " #m3 ", %%mm5                \n\t" /* x4 */\
1303
        "psubw %%mm5, %%mm4                \n\t" /* 20x1 - x4 */\
1304
        "movq "#in1", %%mm5                \n\t" /* C */\
1305
        "movq "#in2", %%mm6                \n\t" /* B */\
1306
        "paddw " #m6 ", %%mm5                \n\t" /* x3 */\
1307
        "paddw " #m5 ", %%mm6                \n\t" /* x2 */\
1308
        "paddw %%mm6, %%mm6                \n\t" /* 2x2 */\
1309
        "psubw %%mm6, %%mm5                \n\t" /* -2x2 + x3 */\
1310
        "pmullw "MANGLE(ff_pw_3)", %%mm5        \n\t" /* -6x2 + 3x3 */\
1311
        "paddw " #rnd ", %%mm4                \n\t" /* x2 */\
1312
        "paddw %%mm4, %%mm5                \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
1313
        "psraw $5, %%mm5                \n\t"\
1314
        "packuswb %%mm5, %%mm5                \n\t"\
1315
        OP(%%mm5, out, %%mm7, d)
1316

    
1317
#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
1318
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1319
    uint64_t temp;\
1320
\
1321
    asm volatile(\
1322
        "pxor %%mm7, %%mm7                \n\t"\
1323
        "1:                                \n\t"\
1324
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1325
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1326
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1327
        "punpcklbw %%mm7, %%mm0                \n\t" /* 0A0B0C0D */\
1328
        "punpckhbw %%mm7, %%mm1                \n\t" /* 0E0F0G0H */\
1329
        "pshufw $0x90, %%mm0, %%mm5        \n\t" /* 0A0A0B0C */\
1330
        "pshufw $0x41, %%mm0, %%mm6        \n\t" /* 0B0A0A0B */\
1331
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1332
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1333
        "psllq $8, %%mm2                \n\t" /* 0ABCDEFG */\
1334
        "psllq $16, %%mm3                \n\t" /* 00ABCDEF */\
1335
        "psllq $24, %%mm4                \n\t" /* 000ABCDE */\
1336
        "punpckhbw %%mm7, %%mm2                \n\t" /* 0D0E0F0G */\
1337
        "punpckhbw %%mm7, %%mm3                \n\t" /* 0C0D0E0F */\
1338
        "punpckhbw %%mm7, %%mm4                \n\t" /* 0B0C0D0E */\
1339
        "paddw %%mm3, %%mm5                \n\t" /* b */\
1340
        "paddw %%mm2, %%mm6                \n\t" /* c */\
1341
        "paddw %%mm5, %%mm5                \n\t" /* 2b */\
1342
        "psubw %%mm5, %%mm6                \n\t" /* c - 2b */\
1343
        "pshufw $0x06, %%mm0, %%mm5        \n\t" /* 0C0B0A0A */\
1344
        "pmullw "MANGLE(ff_pw_3)", %%mm6                \n\t" /* 3c - 6b */\
1345
        "paddw %%mm4, %%mm0                \n\t" /* a */\
1346
        "paddw %%mm1, %%mm5                \n\t" /* d */\
1347
        "pmullw "MANGLE(ff_pw_20)", %%mm0                \n\t" /* 20a */\
1348
        "psubw %%mm5, %%mm0                \n\t" /* 20a - d */\
1349
        "paddw %6, %%mm6                \n\t"\
1350
        "paddw %%mm6, %%mm0                \n\t" /* 20a - 6b + 3c - d */\
1351
        "psraw $5, %%mm0                \n\t"\
1352
        "movq %%mm0, %5                        \n\t"\
1353
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1354
        \
1355
        "movq 5(%0), %%mm0                \n\t" /* FGHIJKLM */\
1356
        "movq %%mm0, %%mm5                \n\t" /* FGHIJKLM */\
1357
        "movq %%mm0, %%mm6                \n\t" /* FGHIJKLM */\
1358
        "psrlq $8, %%mm0                \n\t" /* GHIJKLM0 */\
1359
        "psrlq $16, %%mm5                \n\t" /* HIJKLM00 */\
1360
        "punpcklbw %%mm7, %%mm0                \n\t" /* 0G0H0I0J */\
1361
        "punpcklbw %%mm7, %%mm5                \n\t" /* 0H0I0J0K */\
1362
        "paddw %%mm0, %%mm2                \n\t" /* b */\
1363
        "paddw %%mm5, %%mm3                \n\t" /* c */\
1364
        "paddw %%mm2, %%mm2                \n\t" /* 2b */\
1365
        "psubw %%mm2, %%mm3                \n\t" /* c - 2b */\
1366
        "movq %%mm6, %%mm2                \n\t" /* FGHIJKLM */\
1367
        "psrlq $24, %%mm6                \n\t" /* IJKLM000 */\
1368
        "punpcklbw %%mm7, %%mm2                \n\t" /* 0F0G0H0I */\
1369
        "punpcklbw %%mm7, %%mm6                \n\t" /* 0I0J0K0L */\
1370
        "pmullw "MANGLE(ff_pw_3)", %%mm3                \n\t" /* 3c - 6b */\
1371
        "paddw %%mm2, %%mm1                \n\t" /* a */\
1372
        "paddw %%mm6, %%mm4                \n\t" /* d */\
1373
        "pmullw "MANGLE(ff_pw_20)", %%mm1                \n\t" /* 20a */\
1374
        "psubw %%mm4, %%mm3                \n\t" /* - 6b +3c - d */\
1375
        "paddw %6, %%mm1                \n\t"\
1376
        "paddw %%mm1, %%mm3                \n\t" /* 20a - 6b +3c - d */\
1377
        "psraw $5, %%mm3                \n\t"\
1378
        "movq %5, %%mm1                        \n\t"\
1379
        "packuswb %%mm3, %%mm1                \n\t"\
1380
        OP_MMX2(%%mm1, (%1),%%mm4, q)\
1381
        /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1382
        \
1383
        "movq 9(%0), %%mm1                \n\t" /* JKLMNOPQ */\
1384
        "movq %%mm1, %%mm4                \n\t" /* JKLMNOPQ */\
1385
        "movq %%mm1, %%mm3                \n\t" /* JKLMNOPQ */\
1386
        "psrlq $8, %%mm1                \n\t" /* KLMNOPQ0 */\
1387
        "psrlq $16, %%mm4                \n\t" /* LMNOPQ00 */\
1388
        "punpcklbw %%mm7, %%mm1                \n\t" /* 0K0L0M0N */\
1389
        "punpcklbw %%mm7, %%mm4                \n\t" /* 0L0M0N0O */\
1390
        "paddw %%mm1, %%mm5                \n\t" /* b */\
1391
        "paddw %%mm4, %%mm0                \n\t" /* c */\
1392
        "paddw %%mm5, %%mm5                \n\t" /* 2b */\
1393
        "psubw %%mm5, %%mm0                \n\t" /* c - 2b */\
1394
        "movq %%mm3, %%mm5                \n\t" /* JKLMNOPQ */\
1395
        "psrlq $24, %%mm3                \n\t" /* MNOPQ000 */\
1396
        "pmullw "MANGLE(ff_pw_3)", %%mm0                \n\t" /* 3c - 6b */\
1397
        "punpcklbw %%mm7, %%mm3                \n\t" /* 0M0N0O0P */\
1398
        "paddw %%mm3, %%mm2                \n\t" /* d */\
1399
        "psubw %%mm2, %%mm0                \n\t" /* -6b + 3c - d */\
1400
        "movq %%mm5, %%mm2                \n\t" /* JKLMNOPQ */\
1401
        "punpcklbw %%mm7, %%mm2                \n\t" /* 0J0K0L0M */\
1402
        "punpckhbw %%mm7, %%mm5                \n\t" /* 0N0O0P0Q */\
1403
        "paddw %%mm2, %%mm6                \n\t" /* a */\
1404
        "pmullw "MANGLE(ff_pw_20)", %%mm6                \n\t" /* 20a */\
1405
        "paddw %6, %%mm0                \n\t"\
1406
        "paddw %%mm6, %%mm0                \n\t" /* 20a - 6b + 3c - d */\
1407
        "psraw $5, %%mm0                \n\t"\
1408
        /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1409
        \
1410
        "paddw %%mm5, %%mm3                \n\t" /* a */\
1411
        "pshufw $0xF9, %%mm5, %%mm6        \n\t" /* 0O0P0Q0Q */\
1412
        "paddw %%mm4, %%mm6                \n\t" /* b */\
1413
        "pshufw $0xBE, %%mm5, %%mm4        \n\t" /* 0P0Q0Q0P */\
1414
        "pshufw $0x6F, %%mm5, %%mm5        \n\t" /* 0Q0Q0P0O */\
1415
        "paddw %%mm1, %%mm4                \n\t" /* c */\
1416
        "paddw %%mm2, %%mm5                \n\t" /* d */\
1417
        "paddw %%mm6, %%mm6                \n\t" /* 2b */\
1418
        "psubw %%mm6, %%mm4                \n\t" /* c - 2b */\
1419
        "pmullw "MANGLE(ff_pw_20)", %%mm3                \n\t" /* 20a */\
1420
        "pmullw "MANGLE(ff_pw_3)", %%mm4                \n\t" /* 3c - 6b */\
1421
        "psubw %%mm5, %%mm3                \n\t" /* -6b + 3c - d */\
1422
        "paddw %6, %%mm4                \n\t"\
1423
        "paddw %%mm3, %%mm4                \n\t" /* 20a - 6b + 3c - d */\
1424
        "psraw $5, %%mm4                \n\t"\
1425
        "packuswb %%mm4, %%mm0                \n\t"\
1426
        OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1427
        \
1428
        "addl %3, %0                        \n\t"\
1429
        "addl %4, %1                        \n\t"\
1430
        "decl %2                        \n\t"\
1431
        " jnz 1b                                \n\t"\
1432
        : "+a"(src), "+c"(dst), "+m"(h)\
1433
        : "d"(srcStride), "S"(dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1434
        : "memory"\
1435
    );\
1436
}\
1437
\
1438
static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1439
    int i;\
1440
    int16_t temp[16];\
1441
    /* quick HACK, XXX FIXME MUST be optimized */\
1442
    for(i=0; i<h; i++)\
1443
    {\
1444
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1445
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1446
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1447
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1448
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1449
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1450
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1451
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1452
        temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1453
        temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1454
        temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1455
        temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1456
        temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1457
        temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1458
        temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1459
        temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1460
        asm volatile(\
1461
            "movq (%0), %%mm0                \n\t"\
1462
            "movq 8(%0), %%mm1                \n\t"\
1463
            "paddw %2, %%mm0                \n\t"\
1464
            "paddw %2, %%mm1                \n\t"\
1465
            "psraw $5, %%mm0                \n\t"\
1466
            "psraw $5, %%mm1                \n\t"\
1467
            "packuswb %%mm1, %%mm0        \n\t"\
1468
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1469
            "movq 16(%0), %%mm0                \n\t"\
1470
            "movq 24(%0), %%mm1                \n\t"\
1471
            "paddw %2, %%mm0                \n\t"\
1472
            "paddw %2, %%mm1                \n\t"\
1473
            "psraw $5, %%mm0                \n\t"\
1474
            "psraw $5, %%mm1                \n\t"\
1475
            "packuswb %%mm1, %%mm0        \n\t"\
1476
            OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1477
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1478
            : "memory"\
1479
        );\
1480
        dst+=dstStride;\
1481
        src+=srcStride;\
1482
    }\
1483
}\
1484
\
1485
static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1486
    uint64_t temp;\
1487
\
1488
    asm volatile(\
1489
        "pxor %%mm7, %%mm7                \n\t"\
1490
        "1:                                \n\t"\
1491
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1492
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1493
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1494
        "punpcklbw %%mm7, %%mm0                \n\t" /* 0A0B0C0D */\
1495
        "punpckhbw %%mm7, %%mm1                \n\t" /* 0E0F0G0H */\
1496
        "pshufw $0x90, %%mm0, %%mm5        \n\t" /* 0A0A0B0C */\
1497
        "pshufw $0x41, %%mm0, %%mm6        \n\t" /* 0B0A0A0B */\
1498
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1499
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1500
        "psllq $8, %%mm2                \n\t" /* 0ABCDEFG */\
1501
        "psllq $16, %%mm3                \n\t" /* 00ABCDEF */\
1502
        "psllq $24, %%mm4                \n\t" /* 000ABCDE */\
1503
        "punpckhbw %%mm7, %%mm2                \n\t" /* 0D0E0F0G */\
1504
        "punpckhbw %%mm7, %%mm3                \n\t" /* 0C0D0E0F */\
1505
        "punpckhbw %%mm7, %%mm4                \n\t" /* 0B0C0D0E */\
1506
        "paddw %%mm3, %%mm5                \n\t" /* b */\
1507
        "paddw %%mm2, %%mm6                \n\t" /* c */\
1508
        "paddw %%mm5, %%mm5                \n\t" /* 2b */\
1509
        "psubw %%mm5, %%mm6                \n\t" /* c - 2b */\
1510
        "pshufw $0x06, %%mm0, %%mm5        \n\t" /* 0C0B0A0A */\
1511
        "pmullw "MANGLE(ff_pw_3)", %%mm6                \n\t" /* 3c - 6b */\
1512
        "paddw %%mm4, %%mm0                \n\t" /* a */\
1513
        "paddw %%mm1, %%mm5                \n\t" /* d */\
1514
        "pmullw "MANGLE(ff_pw_20)", %%mm0                \n\t" /* 20a */\
1515
        "psubw %%mm5, %%mm0                \n\t" /* 20a - d */\
1516
        "paddw %6, %%mm6                \n\t"\
1517
        "paddw %%mm6, %%mm0                \n\t" /* 20a - 6b + 3c - d */\
1518
        "psraw $5, %%mm0                \n\t"\
1519
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1520
        \
1521
        "movd 5(%0), %%mm5                \n\t" /* FGHI */\
1522
        "punpcklbw %%mm7, %%mm5                \n\t" /* 0F0G0H0I */\
1523
        "pshufw $0xF9, %%mm5, %%mm6        \n\t" /* 0G0H0I0I */\
1524
        "paddw %%mm5, %%mm1                \n\t" /* a */\
1525
        "paddw %%mm6, %%mm2                \n\t" /* b */\
1526
        "pshufw $0xBE, %%mm5, %%mm6        \n\t" /* 0H0I0I0H */\
1527
        "pshufw $0x6F, %%mm5, %%mm5        \n\t" /* 0I0I0H0G */\
1528
        "paddw %%mm6, %%mm3                \n\t" /* c */\
1529
        "paddw %%mm5, %%mm4                \n\t" /* d */\
1530
        "paddw %%mm2, %%mm2                \n\t" /* 2b */\
1531
        "psubw %%mm2, %%mm3                \n\t" /* c - 2b */\
1532
        "pmullw "MANGLE(ff_pw_20)", %%mm1                \n\t" /* 20a */\
1533
        "pmullw "MANGLE(ff_pw_3)", %%mm3                \n\t" /* 3c - 6b */\
1534
        "psubw %%mm4, %%mm3                \n\t" /* -6b + 3c - d */\
1535
        "paddw %6, %%mm1                \n\t"\
1536
        "paddw %%mm1, %%mm3                \n\t" /* 20a - 6b + 3c - d */\
1537
        "psraw $5, %%mm3                \n\t"\
1538
        "packuswb %%mm3, %%mm0                \n\t"\
1539
        OP_MMX2(%%mm0, (%1), %%mm4, q)\
1540
        \
1541
        "addl %3, %0                        \n\t"\
1542
        "addl %4, %1                        \n\t"\
1543
        "decl %2                        \n\t"\
1544
        " jnz 1b                        \n\t"\
1545
        : "+a"(src), "+c"(dst), "+m"(h)\
1546
        : "S"(srcStride), "D"(dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1547
        : "memory"\
1548
    );\
1549
}\
1550
\
1551
static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1552
    int i;\
1553
    int16_t temp[8];\
1554
    /* quick HACK, XXX FIXME MUST be optimized */\
1555
    for(i=0; i<h; i++)\
1556
    {\
1557
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1558
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1559
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1560
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1561
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1562
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1563
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1564
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1565
        asm volatile(\
1566
            "movq (%0), %%mm0                \n\t"\
1567
            "movq 8(%0), %%mm1                \n\t"\
1568
            "paddw %2, %%mm0                \n\t"\
1569
            "paddw %2, %%mm1                \n\t"\
1570
            "psraw $5, %%mm0                \n\t"\
1571
            "psraw $5, %%mm1                \n\t"\
1572
            "packuswb %%mm1, %%mm0        \n\t"\
1573
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1574
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1575
            :"memory"\
1576
        );\
1577
        dst+=dstStride;\
1578
        src+=srcStride;\
1579
    }\
1580
}
1581

    
1582
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1583
\
1584
static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1585
    uint64_t temp[17*4];\
1586
    uint64_t *temp_ptr= temp;\
1587
    int count= 17;\
1588
\
1589
    /*FIXME unroll */\
1590
    asm volatile(\
1591
        "pxor %%mm7, %%mm7                \n\t"\
1592
        "1:                                \n\t"\
1593
        "movq (%0), %%mm0                \n\t"\
1594
        "movq (%0), %%mm1                \n\t"\
1595
        "movq 8(%0), %%mm2                \n\t"\
1596
        "movq 8(%0), %%mm3                \n\t"\
1597
        "punpcklbw %%mm7, %%mm0                \n\t"\
1598
        "punpckhbw %%mm7, %%mm1                \n\t"\
1599
        "punpcklbw %%mm7, %%mm2                \n\t"\
1600
        "punpckhbw %%mm7, %%mm3                \n\t"\
1601
        "movq %%mm0, (%1)                \n\t"\
1602
        "movq %%mm1, 17*8(%1)                \n\t"\
1603
        "movq %%mm2, 2*17*8(%1)                \n\t"\
1604
        "movq %%mm3, 3*17*8(%1)                \n\t"\
1605
        "addl $8, %1                        \n\t"\
1606
        "addl %3, %0                        \n\t"\
1607
        "decl %2                        \n\t"\
1608
        " jnz 1b                        \n\t"\
1609
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1610
        : "r" (srcStride)\
1611
        : "memory"\
1612
    );\
1613
    \
1614
    temp_ptr= temp;\
1615
    count=4;\
1616
    \
1617
/*FIXME reorder for speed */\
1618
    asm volatile(\
1619
        /*"pxor %%mm7, %%mm7                \n\t"*/\
1620
        "1:                                \n\t"\
1621
        "movq (%0), %%mm0                \n\t"\
1622
        "movq 8(%0), %%mm1                \n\t"\
1623
        "movq 16(%0), %%mm2                \n\t"\
1624
        "movq 24(%0), %%mm3                \n\t"\
1625
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1626
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1627
        "addl %4, %1                        \n\t"\
1628
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1629
        \
1630
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1631
        "addl %4, %1                        \n\t"\
1632
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1633
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1634
        "addl %4, %1                        \n\t"\
1635
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1636
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1637
        "addl %4, %1                        \n\t"\
1638
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1639
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1640
        "addl %4, %1                        \n\t"\
1641
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1642
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1643
        "addl %4, %1                        \n\t"\
1644
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1645
        \
1646
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1647
        "addl %4, %1                        \n\t"  \
1648
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1649
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1650
        \
1651
        "addl $136, %0                        \n\t"\
1652
        "addl %6, %1                        \n\t"\
1653
        "decl %2                        \n\t"\
1654
        " jnz 1b                        \n\t"\
1655
        \
1656
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1657
        : "r"(dstStride), "r"(2*dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*dstStride)\
1658
        :"memory"\
1659
    );\
1660
}\
1661
\
1662
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1663
    uint64_t temp[9*4];\
1664
    uint64_t *temp_ptr= temp;\
1665
    int count= 9;\
1666
\
1667
    /*FIXME unroll */\
1668
    asm volatile(\
1669
        "pxor %%mm7, %%mm7                \n\t"\
1670
        "1:                                \n\t"\
1671
        "movq (%0), %%mm0                \n\t"\
1672
        "movq (%0), %%mm1                \n\t"\
1673
        "punpcklbw %%mm7, %%mm0                \n\t"\
1674
        "punpckhbw %%mm7, %%mm1                \n\t"\
1675
        "movq %%mm0, (%1)                \n\t"\
1676
        "movq %%mm1, 9*8(%1)                \n\t"\
1677
        "addl $8, %1                        \n\t"\
1678
        "addl %3, %0                        \n\t"\
1679
        "decl %2                        \n\t"\
1680
        " jnz 1b                        \n\t"\
1681
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1682
        : "r" (srcStride)\
1683
        : "memory"\
1684
    );\
1685
    \
1686
    temp_ptr= temp;\
1687
    count=2;\
1688
    \
1689
/*FIXME reorder for speed */\
1690
    asm volatile(\
1691
        /*"pxor %%mm7, %%mm7                \n\t"*/\
1692
        "1:                                \n\t"\
1693
        "movq (%0), %%mm0                \n\t"\
1694
        "movq 8(%0), %%mm1                \n\t"\
1695
        "movq 16(%0), %%mm2                \n\t"\
1696
        "movq 24(%0), %%mm3                \n\t"\
1697
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1698
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1699
        "addl %4, %1                        \n\t"\
1700
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1701
        \
1702
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1703
        "addl %4, %1                        \n\t"\
1704
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1705
        \
1706
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1707
        "addl %4, %1                        \n\t"\
1708
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1709
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1710
                \
1711
        "addl $72, %0                        \n\t"\
1712
        "addl %6, %1                        \n\t"\
1713
        "decl %2                        \n\t"\
1714
        " jnz 1b                        \n\t"\
1715
         \
1716
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1717
        : "r"(dstStride), "r"(2*dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*dstStride)\
1718
        : "memory"\
1719
   );\
1720
}\
1721
\
1722
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1723
    OPNAME ## pixels8_mmx(dst, src, stride, 8);\
1724
}\
1725
\
1726
static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1727
    uint64_t temp[8];\
1728
    uint8_t * const half= (uint8_t*)temp;\
1729
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1730
    OPNAME ## pixels8_l2_mmx(dst, src, half, stride, stride, 8);\
1731
}\
1732
\
1733
static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1734
    OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1735
}\
1736
\
1737
static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1738
    uint64_t temp[8];\
1739
    uint8_t * const half= (uint8_t*)temp;\
1740
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1741
    OPNAME ## pixels8_l2_mmx(dst, src+1, half, stride, stride, 8);\
1742
}\
1743
\
1744
static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1745
    uint64_t temp[8];\
1746
    uint8_t * const half= (uint8_t*)temp;\
1747
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1748
    OPNAME ## pixels8_l2_mmx(dst, src, half, stride, stride, 8);\
1749
}\
1750
\
1751
static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1752
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1753
}\
1754
\
1755
static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1756
    uint64_t temp[8];\
1757
    uint8_t * const half= (uint8_t*)temp;\
1758
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1759
    OPNAME ## pixels8_l2_mmx(dst, src+stride, half, stride, stride, 8);\
1760
}\
1761
static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1762
    uint64_t half[8 + 9];\
1763
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1764
    uint8_t * const halfHV= ((uint8_t*)half);\
1765
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1766
    put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
1767
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1768
    OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
1769
}\
1770
static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1771
    uint64_t half[8 + 9];\
1772
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1773
    uint8_t * const halfHV= ((uint8_t*)half);\
1774
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1775
    put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
1776
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1777
    OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
1778
}\
1779
static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1780
    uint64_t half[8 + 9];\
1781
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1782
    uint8_t * const halfHV= ((uint8_t*)half);\
1783
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1784
    put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
1785
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1786
    OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
1787
}\
1788
static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1789
    uint64_t half[8 + 9];\
1790
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1791
    uint8_t * const halfHV= ((uint8_t*)half);\
1792
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1793
    put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
1794
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1795
    OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
1796
}\
1797
static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1798
    uint64_t half[8 + 9];\
1799
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1800
    uint8_t * const halfHV= ((uint8_t*)half);\
1801
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1802
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1803
    OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\
1804
}\
1805
static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1806
    uint64_t half[8 + 9];\
1807
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1808
    uint8_t * const halfHV= ((uint8_t*)half);\
1809
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1810
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1811
    OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\
1812
}\
1813
static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1814
    uint64_t half[8 + 9];\
1815
    uint8_t * const halfH= ((uint8_t*)half);\
1816
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1817
    put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\
1818
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1819
}\
1820
static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1821
    uint64_t half[8 + 9];\
1822
    uint8_t * const halfH= ((uint8_t*)half);\
1823
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1824
    put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\
1825
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1826
}\
1827
static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1828
    uint64_t half[9];\
1829
    uint8_t * const halfH= ((uint8_t*)half);\
1830
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1831
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1832
}\
1833
static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1834
    OPNAME ## pixels16_mmx(dst, src, stride, 16);\
1835
}\
1836
\
1837
static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1838
    uint64_t temp[32];\
1839
    uint8_t * const half= (uint8_t*)temp;\
1840
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1841
    OPNAME ## pixels16_l2_mmx(dst, src, half, stride, stride, 16);\
1842
}\
1843
\
1844
static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1845
    OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1846
}\
1847
\
1848
static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1849
    uint64_t temp[32];\
1850
    uint8_t * const half= (uint8_t*)temp;\
1851
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1852
    OPNAME ## pixels16_l2_mmx(dst, src+1, half, stride, stride, 16);\
1853
}\
1854
\
1855
static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1856
    uint64_t temp[32];\
1857
    uint8_t * const half= (uint8_t*)temp;\
1858
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1859
    OPNAME ## pixels16_l2_mmx(dst, src, half, stride, stride, 16);\
1860
}\
1861
\
1862
static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1863
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1864
}\
1865
\
1866
static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1867
    uint64_t temp[32];\
1868
    uint8_t * const half= (uint8_t*)temp;\
1869
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1870
    OPNAME ## pixels16_l2_mmx(dst, src+stride, half, stride, stride, 16);\
1871
}\
1872
static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1873
    uint64_t half[16*2 + 17*2];\
1874
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1875
    uint8_t * const halfHV= ((uint8_t*)half);\
1876
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1877
    put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
1878
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1879
    OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
1880
}\
1881
static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1882
    uint64_t half[16*2 + 17*2];\
1883
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1884
    uint8_t * const halfHV= ((uint8_t*)half);\
1885
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1886
    put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
1887
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1888
    OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
1889
}\
1890
static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1891
    uint64_t half[16*2 + 17*2];\
1892
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1893
    uint8_t * const halfHV= ((uint8_t*)half);\
1894
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1895
    put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
1896
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1897
    OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
1898
}\
1899
static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1900
    uint64_t half[16*2 + 17*2];\
1901
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1902
    uint8_t * const halfHV= ((uint8_t*)half);\
1903
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1904
    put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
1905
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1906
    OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
1907
}\
1908
static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1909
    uint64_t half[16*2 + 17*2];\
1910
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1911
    uint8_t * const halfHV= ((uint8_t*)half);\
1912
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1913
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1914
    OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\
1915
}\
1916
static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1917
    uint64_t half[16*2 + 17*2];\
1918
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1919
    uint8_t * const halfHV= ((uint8_t*)half);\
1920
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1921
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1922
    OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\
1923
}\
1924
static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1925
    uint64_t half[17*2];\
1926
    uint8_t * const halfH= ((uint8_t*)half);\
1927
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1928
    put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\
1929
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1930
}\
1931
static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1932
    uint64_t half[17*2];\
1933
    uint8_t * const halfH= ((uint8_t*)half);\
1934
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1935
    put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\
1936
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1937
}\
1938
static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1939
    uint64_t half[17*2];\
1940
    uint8_t * const halfH= ((uint8_t*)half);\
1941
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1942
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1943
}
1944

    
1945

    
1946
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "        \n\t"
1947
#define AVG_3DNOW_OP(a,b,temp, size) \
1948
"mov" #size " " #b ", " #temp "        \n\t"\
1949
"pavgusb " #temp ", " #a "        \n\t"\
1950
"mov" #size " " #a ", " #b "        \n\t"
1951
#define AVG_MMX2_OP(a,b,temp, size) \
1952
"mov" #size " " #b ", " #temp "        \n\t"\
1953
"pavgb " #temp ", " #a "        \n\t"\
1954
"mov" #size " " #a ", " #b "        \n\t"
1955

    
1956
QPEL_BASE(put_       , ff_pw_16, _       , PUT_OP, PUT_OP)
1957
QPEL_BASE(avg_       , ff_pw_16, _       , AVG_MMX2_OP, AVG_3DNOW_OP)
1958
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1959
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, 3dnow)
1960
QPEL_OP(avg_       , ff_pw_16, _       , AVG_3DNOW_OP, 3dnow)
1961
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1962
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, mmx2)
1963
QPEL_OP(avg_       , ff_pw_16, _       , AVG_MMX2_OP, mmx2)
1964
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1965

    
1966
#if 0
1967
static void just_return() { return; }
1968
#endif
1969

    
1970
#define SET_QPEL_FUNC(postfix1, postfix2) \
1971
    c->put_ ## postfix1 = put_ ## postfix2;\
1972
    c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
1973
    c->avg_ ## postfix1 = avg_ ## postfix2;
1974

    
1975
/* external functions, from idct_mmx.c */
1976
void ff_mmx_idct(DCTELEM *block);
1977
void ff_mmxext_idct(DCTELEM *block);
1978

    
1979
/* XXX: those functions should be suppressed ASAP when all IDCTs are
1980
   converted */
1981
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1982
{
1983
    ff_mmx_idct (block);
1984
    put_pixels_clamped_mmx(block, dest, line_size);
1985
}
1986
static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1987
{
1988
    ff_mmx_idct (block);
1989
    add_pixels_clamped_mmx(block, dest, line_size);
1990
}
1991
static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1992
{
1993
    ff_mmxext_idct (block);
1994
    put_pixels_clamped_mmx(block, dest, line_size);
1995
}
1996
static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1997
{
1998
    ff_mmxext_idct (block);
1999
    add_pixels_clamped_mmx(block, dest, line_size);
2000
}
2001
    
2002
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2003
{
2004
    mm_flags = mm_support();
2005

    
2006
    if (avctx->dsp_mask) {
2007
        if (avctx->dsp_mask & FF_MM_FORCE)
2008
            mm_flags |= (avctx->dsp_mask & 0xffff);
2009
        else
2010
            mm_flags &= ~(avctx->dsp_mask & 0xffff);
2011
    }
2012

    
2013
#if 0
2014
    fprintf(stderr, "libavcodec: CPU flags:");
2015
    if (mm_flags & MM_MMX)
2016
        fprintf(stderr, " mmx");
2017
    if (mm_flags & MM_MMXEXT)
2018
        fprintf(stderr, " mmxext");
2019
    if (mm_flags & MM_3DNOW)
2020
        fprintf(stderr, " 3dnow");
2021
    if (mm_flags & MM_SSE)
2022
        fprintf(stderr, " sse");
2023
    if (mm_flags & MM_SSE2)
2024
        fprintf(stderr, " sse2");
2025
    fprintf(stderr, "\n");
2026
#endif
2027

    
2028
    if (mm_flags & MM_MMX) {
2029
        const int dct_algo = avctx->dct_algo;
2030
        const int idct_algo= avctx->idct_algo;
2031

    
2032
#ifdef CONFIG_ENCODERS
2033
        if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
2034
            if(mm_flags & MM_MMXEXT){
2035
                c->fdct = ff_fdct_mmx2;
2036
            }else{
2037
                c->fdct = ff_fdct_mmx;
2038
            }
2039
        }
2040
#endif //CONFIG_ENCODERS
2041

    
2042
        if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2043
            c->idct_put= ff_simple_idct_put_mmx;
2044
            c->idct_add= ff_simple_idct_add_mmx;
2045
            c->idct    = ff_simple_idct_mmx;
2046
            c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2047
        }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2048
            if(mm_flags & MM_MMXEXT){
2049
                c->idct_put= ff_libmpeg2mmx2_idct_put;
2050
                c->idct_add= ff_libmpeg2mmx2_idct_add;
2051
                c->idct    = ff_mmxext_idct;
2052
            }else{
2053
                c->idct_put= ff_libmpeg2mmx_idct_put;
2054
                c->idct_add= ff_libmpeg2mmx_idct_add;
2055
                c->idct    = ff_mmx_idct;
2056
            }
2057
            c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2058
        }
2059
        
2060
#ifdef CONFIG_ENCODERS
2061
        c->get_pixels = get_pixels_mmx;
2062
        c->diff_pixels = diff_pixels_mmx;
2063
#endif //CONFIG_ENCODERS
2064
        c->put_pixels_clamped = put_pixels_clamped_mmx;
2065
        c->add_pixels_clamped = add_pixels_clamped_mmx;
2066
        c->clear_blocks = clear_blocks_mmx;
2067
#ifdef CONFIG_ENCODERS
2068
        c->pix_sum = pix_sum16_mmx;
2069
#endif //CONFIG_ENCODERS
2070

    
2071
        c->put_pixels_tab[0][0] = put_pixels16_mmx;
2072
        c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
2073
        c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
2074
        c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
2075

    
2076
        c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
2077
        c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
2078
        c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
2079
        c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
2080

    
2081
        c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
2082
        c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
2083
        c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
2084
        c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
2085

    
2086
        c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
2087
        c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
2088
        c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
2089
        c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
2090

    
2091
        c->put_pixels_tab[1][0] = put_pixels8_mmx;
2092
        c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
2093
        c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
2094
        c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
2095

    
2096
        c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
2097
        c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
2098
        c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
2099
        c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
2100

    
2101
        c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
2102
        c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
2103
        c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
2104
        c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
2105

    
2106
        c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
2107
        c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
2108
        c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
2109
        c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
2110
                
2111
        c->add_bytes= add_bytes_mmx;
2112
#ifdef CONFIG_ENCODERS
2113
        c->diff_bytes= diff_bytes_mmx;
2114
        
2115
        c->hadamard8_diff[0]= hadamard8_diff16_mmx;
2116
        c->hadamard8_diff[1]= hadamard8_diff_mmx;
2117
        
2118
        c->pix_norm1 = pix_norm1_mmx;
2119
        c->sse[0] = sse16_mmx;
2120
        c->vsad[4]= vsad_intra16_mmx;
2121

    
2122
        if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2123
            c->vsad[0] = vsad16_mmx;
2124
        }
2125
#endif //CONFIG_ENCODERS
2126

    
2127
        c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2128
        c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2129
        
2130
        if (mm_flags & MM_MMXEXT) {
2131
            c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2132
            c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2133

    
2134
            c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2135
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2136
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2137

    
2138
            c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2139
            c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2140

    
2141
            c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2142
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2143
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2144

    
2145
#ifdef CONFIG_ENCODERS
2146
            c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
2147
            c->hadamard8_diff[1]= hadamard8_diff_mmx2;
2148
            c->vsad[4]= vsad_intra16_mmx2;
2149
#endif //CONFIG_ENCODERS
2150

    
2151
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2152
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2153
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2154
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2155
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2156
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2157
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2158
                c->vsad[0] = vsad16_mmx2;
2159
            }
2160

    
2161
#if 1
2162
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
2163
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
2164
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
2165
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
2166
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
2167
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
2168
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
2169
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
2170
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
2171
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
2172
            SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
2173
            SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
2174
            SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
2175
            SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
2176
            SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
2177
            SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
2178
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
2179
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
2180
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
2181
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
2182
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
2183
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
2184
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
2185
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
2186
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
2187
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
2188
            SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
2189
            SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
2190
            SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
2191
            SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
2192
            SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
2193
            SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
2194
#endif
2195

    
2196
#ifdef CONFIG_ENCODERS
2197
            c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
2198
#endif //CONFIG_ENCODERS
2199
        } else if (mm_flags & MM_3DNOW) {
2200
            c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2201
            c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2202

    
2203
            c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2204
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2205
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2206

    
2207
            c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2208
            c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2209

    
2210
            c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2211
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2212
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2213

    
2214
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2215
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2216
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2217
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2218
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2219
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2220
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2221
            }
2222

    
2223
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
2224
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
2225
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
2226
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
2227
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
2228
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
2229
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
2230
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
2231
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
2232
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
2233
            SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
2234
            SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
2235
            SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
2236
            SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
2237
            SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
2238
            SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
2239
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
2240
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
2241
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
2242
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
2243
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
2244
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
2245
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
2246
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
2247
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
2248
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
2249
            SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
2250
            SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
2251
            SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
2252
            SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
2253
            SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
2254
            SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
2255
        }
2256
    }
2257
        
2258
#ifdef CONFIG_ENCODERS
2259
    dsputil_init_pix_mmx(c, avctx);
2260
#endif //CONFIG_ENCODERS
2261
#if 0
2262
    // for speed testing
2263
    get_pixels = just_return;
2264
    put_pixels_clamped = just_return;
2265
    add_pixels_clamped = just_return;
2266

2267
    pix_abs16x16 = just_return;
2268
    pix_abs16x16_x2 = just_return;
2269
    pix_abs16x16_y2 = just_return;
2270
    pix_abs16x16_xy2 = just_return;
2271

2272
    put_pixels_tab[0] = just_return;
2273
    put_pixels_tab[1] = just_return;
2274
    put_pixels_tab[2] = just_return;
2275
    put_pixels_tab[3] = just_return;
2276

2277
    put_no_rnd_pixels_tab[0] = just_return;
2278
    put_no_rnd_pixels_tab[1] = just_return;
2279
    put_no_rnd_pixels_tab[2] = just_return;
2280
    put_no_rnd_pixels_tab[3] = just_return;
2281

2282
    avg_pixels_tab[0] = just_return;
2283
    avg_pixels_tab[1] = just_return;
2284
    avg_pixels_tab[2] = just_return;
2285
    avg_pixels_tab[3] = just_return;
2286

2287
    avg_no_rnd_pixels_tab[0] = just_return;
2288
    avg_no_rnd_pixels_tab[1] = just_return;
2289
    avg_no_rnd_pixels_tab[2] = just_return;
2290
    avg_no_rnd_pixels_tab[3] = just_return;
2291

2292
    //av_fdct = just_return;
2293
    //ff_idct = just_return;
2294
#endif
2295
}