Statistics
| Branch: | Revision:

ffmpeg / libavcodec / i386 / dsputil_mmx.c @ 79396ac6

History | View | Annotate | Download (100 KB)

1
/*
2
 * MMX optimized DSP utils
3
 * Copyright (c) 2000, 2001 Fabrice Bellard.
4
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 *
20
 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
21
 */
22

    
23
#include "../dsputil.h"
24
#include "../simple_idct.h"
25
#include "../mpegvideo.h"
26
#include "mmx.h"
27

    
28
//#undef NDEBUG
29
//#include <assert.h>
30

    
31
extern const uint8_t ff_h263_loop_filter_strength[32];
32

    
33
int mm_flags; /* multimedia extension flags */
34

    
35
/* pixel operations */
36
static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
37
static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
38
static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
39

    
40
static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
41
static const uint64_t ff_pw_3  attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
42
static const uint64_t ff_pw_4  attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
43
static const uint64_t ff_pw_5  attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
44
static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
45
static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
46
static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
47
static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
48

    
49
static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
50
static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
51

    
52
#define JUMPALIGN() __asm __volatile (".balign 8"::)
53
#define MOVQ_ZERO(regd)  __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
54

    
55
#define MOVQ_WONE(regd) \
56
    __asm __volatile ( \
57
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
58
    "psrlw $15, %%" #regd ::)
59

    
60
#define MOVQ_BFE(regd) \
61
    __asm __volatile ( \
62
    "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
63
    "paddb %%" #regd ", %%" #regd " \n\t" ::)
64

    
65
#ifndef PIC
66
#define MOVQ_BONE(regd)  __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
67
#define MOVQ_WTWO(regd)  __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
68
#else
69
// for shared library it's better to use this way for accessing constants
70
// pcmpeqd -> -1
71
#define MOVQ_BONE(regd) \
72
    __asm __volatile ( \
73
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
74
    "psrlw $15, %%" #regd " \n\t" \
75
    "packuswb %%" #regd ", %%" #regd " \n\t" ::)
76

    
77
#define MOVQ_WTWO(regd) \
78
    __asm __volatile ( \
79
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
80
    "psrlw $15, %%" #regd " \n\t" \
81
    "psllw $1, %%" #regd " \n\t"::)
82

    
83
#endif
84

    
85
// using regr as temporary and for the output result
86
// first argument is unmodifed and second is trashed
87
// regfe is supposed to contain 0xfefefefefefefefe
88
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
89
    "movq " #rega ", " #regr "        \n\t"\
90
    "pand " #regb ", " #regr "        \n\t"\
91
    "pxor " #rega ", " #regb "        \n\t"\
92
    "pand " #regfe "," #regb "        \n\t"\
93
    "psrlq $1, " #regb "         \n\t"\
94
    "paddb " #regb ", " #regr "        \n\t"
95

    
96
#define PAVGB_MMX(rega, regb, regr, regfe) \
97
    "movq " #rega ", " #regr "        \n\t"\
98
    "por  " #regb ", " #regr "        \n\t"\
99
    "pxor " #rega ", " #regb "        \n\t"\
100
    "pand " #regfe "," #regb "        \n\t"\
101
    "psrlq $1, " #regb "        \n\t"\
102
    "psubb " #regb ", " #regr "        \n\t"
103

    
104
// mm6 is supposed to contain 0xfefefefefefefefe
105
#define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp) \
106
    "movq " #rega ", " #regr "        \n\t"\
107
    "movq " #regc ", " #regp "        \n\t"\
108
    "pand " #regb ", " #regr "        \n\t"\
109
    "pand " #regd ", " #regp "        \n\t"\
110
    "pxor " #rega ", " #regb "        \n\t"\
111
    "pxor " #regc ", " #regd "        \n\t"\
112
    "pand %%mm6, " #regb "        \n\t"\
113
    "pand %%mm6, " #regd "        \n\t"\
114
    "psrlq $1, " #regb "         \n\t"\
115
    "psrlq $1, " #regd "         \n\t"\
116
    "paddb " #regb ", " #regr "        \n\t"\
117
    "paddb " #regd ", " #regp "        \n\t"
118

    
119
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
120
    "movq " #rega ", " #regr "        \n\t"\
121
    "movq " #regc ", " #regp "        \n\t"\
122
    "por  " #regb ", " #regr "        \n\t"\
123
    "por  " #regd ", " #regp "        \n\t"\
124
    "pxor " #rega ", " #regb "        \n\t"\
125
    "pxor " #regc ", " #regd "        \n\t"\
126
    "pand %%mm6, " #regb "             \n\t"\
127
    "pand %%mm6, " #regd "             \n\t"\
128
    "psrlq $1, " #regd "        \n\t"\
129
    "psrlq $1, " #regb "        \n\t"\
130
    "psubb " #regb ", " #regr "        \n\t"\
131
    "psubb " #regd ", " #regp "        \n\t"
132

    
133
/***********************************/
134
/* MMX no rounding */
135
#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
136
#define SET_RND  MOVQ_WONE
137
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
138
#define PAVGB(a, b, c, e)                PAVGB_MMX_NO_RND(a, b, c, e)
139

    
140
#include "dsputil_mmx_rnd.h"
141

    
142
#undef DEF
143
#undef SET_RND
144
#undef PAVGBP
145
#undef PAVGB
146
/***********************************/
147
/* MMX rounding */
148

    
149
#define DEF(x, y) x ## _ ## y ##_mmx
150
#define SET_RND  MOVQ_WTWO
151
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
152
#define PAVGB(a, b, c, e)                PAVGB_MMX(a, b, c, e)
153

    
154
#include "dsputil_mmx_rnd.h"
155

    
156
#undef DEF
157
#undef SET_RND
158
#undef PAVGBP
159
#undef PAVGB
160

    
161
/***********************************/
162
/* 3Dnow specific */
163

    
164
#define DEF(x) x ## _3dnow
165
/* for Athlons PAVGUSB is prefered */
166
#define PAVGB "pavgusb"
167

    
168
#include "dsputil_mmx_avg.h"
169

    
170
#undef DEF
171
#undef PAVGB
172

    
173
/***********************************/
174
/* MMX2 specific */
175

    
176
#define DEF(x) x ## _mmx2
177

    
178
/* Introduced only in MMX2 set */
179
#define PAVGB "pavgb"
180

    
181
#include "dsputil_mmx_avg.h"
182

    
183
#undef DEF
184
#undef PAVGB
185

    
186
/***********************************/
187
/* standard MMX */
188

    
189
#ifdef CONFIG_ENCODERS
190
static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
191
{
192
    asm volatile(
193
        "mov $-128, %%"REG_a"        \n\t"
194
        "pxor %%mm7, %%mm7        \n\t"
195
        ".balign 16                \n\t"
196
        "1:                        \n\t"
197
        "movq (%0), %%mm0        \n\t"
198
        "movq (%0, %2), %%mm2        \n\t"
199
        "movq %%mm0, %%mm1        \n\t"
200
        "movq %%mm2, %%mm3        \n\t"
201
        "punpcklbw %%mm7, %%mm0        \n\t"
202
        "punpckhbw %%mm7, %%mm1        \n\t"
203
        "punpcklbw %%mm7, %%mm2        \n\t"
204
        "punpckhbw %%mm7, %%mm3        \n\t"
205
        "movq %%mm0, (%1, %%"REG_a")\n\t"
206
        "movq %%mm1, 8(%1, %%"REG_a")\n\t"
207
        "movq %%mm2, 16(%1, %%"REG_a")\n\t"
208
        "movq %%mm3, 24(%1, %%"REG_a")\n\t"
209
        "add %3, %0                \n\t"
210
        "add $32, %%"REG_a"        \n\t"
211
        "js 1b                        \n\t"
212
        : "+r" (pixels)
213
        : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
214
        : "%"REG_a
215
    );
216
}
217

    
218
static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
219
{
220
    asm volatile(
221
        "pxor %%mm7, %%mm7        \n\t"
222
        "mov $-128, %%"REG_a"        \n\t"
223
        ".balign 16                \n\t"
224
        "1:                        \n\t"
225
        "movq (%0), %%mm0        \n\t"
226
        "movq (%1), %%mm2        \n\t"
227
        "movq %%mm0, %%mm1        \n\t"
228
        "movq %%mm2, %%mm3        \n\t"
229
        "punpcklbw %%mm7, %%mm0        \n\t"
230
        "punpckhbw %%mm7, %%mm1        \n\t"
231
        "punpcklbw %%mm7, %%mm2        \n\t"
232
        "punpckhbw %%mm7, %%mm3        \n\t"
233
        "psubw %%mm2, %%mm0        \n\t"
234
        "psubw %%mm3, %%mm1        \n\t"
235
        "movq %%mm0, (%2, %%"REG_a")\n\t"
236
        "movq %%mm1, 8(%2, %%"REG_a")\n\t"
237
        "add %3, %0                \n\t"
238
        "add %3, %1                \n\t"
239
        "add $16, %%"REG_a"        \n\t"
240
        "jnz 1b                        \n\t"
241
        : "+r" (s1), "+r" (s2)
242
        : "r" (block+64), "r" ((long)stride)
243
        : "%"REG_a
244
    );
245
}
246
#endif //CONFIG_ENCODERS
247

    
248
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
249
{
250
    const DCTELEM *p;
251
    uint8_t *pix;
252

    
253
    /* read the pixels */
254
    p = block;
255
    pix = pixels;
256
    /* unrolled loop */
257
        __asm __volatile(
258
                "movq        %3, %%mm0\n\t"
259
                "movq        8%3, %%mm1\n\t"
260
                "movq        16%3, %%mm2\n\t"
261
                "movq        24%3, %%mm3\n\t"
262
                "movq        32%3, %%mm4\n\t"
263
                "movq        40%3, %%mm5\n\t"
264
                "movq        48%3, %%mm6\n\t"
265
                "movq        56%3, %%mm7\n\t"
266
                "packuswb %%mm1, %%mm0\n\t"
267
                "packuswb %%mm3, %%mm2\n\t"
268
                "packuswb %%mm5, %%mm4\n\t"
269
                "packuswb %%mm7, %%mm6\n\t"
270
                "movq        %%mm0, (%0)\n\t"
271
                "movq        %%mm2, (%0, %1)\n\t"
272
                "movq        %%mm4, (%0, %1, 2)\n\t"
273
                "movq        %%mm6, (%0, %2)\n\t"
274
                ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
275
                :"memory");
276
        pix += line_size*4;
277
        p += 32;
278

    
279
    // if here would be an exact copy of the code above
280
    // compiler would generate some very strange code
281
    // thus using "r"
282
    __asm __volatile(
283
            "movq        (%3), %%mm0\n\t"
284
            "movq        8(%3), %%mm1\n\t"
285
            "movq        16(%3), %%mm2\n\t"
286
            "movq        24(%3), %%mm3\n\t"
287
            "movq        32(%3), %%mm4\n\t"
288
            "movq        40(%3), %%mm5\n\t"
289
            "movq        48(%3), %%mm6\n\t"
290
            "movq        56(%3), %%mm7\n\t"
291
            "packuswb %%mm1, %%mm0\n\t"
292
            "packuswb %%mm3, %%mm2\n\t"
293
            "packuswb %%mm5, %%mm4\n\t"
294
            "packuswb %%mm7, %%mm6\n\t"
295
            "movq        %%mm0, (%0)\n\t"
296
            "movq        %%mm2, (%0, %1)\n\t"
297
            "movq        %%mm4, (%0, %1, 2)\n\t"
298
            "movq        %%mm6, (%0, %2)\n\t"
299
            ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
300
            :"memory");
301
}
302

    
303
static const unsigned char __align8 vector128[8] =
304
  { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
305

    
306
void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
307
{
308
    int i;
309

    
310
    movq_m2r(*vector128, mm1);
311
    for (i = 0; i < 8; i++) {
312
        movq_m2r(*(block), mm0);
313
        packsswb_m2r(*(block + 4), mm0);
314
        block += 8;
315
        paddb_r2r(mm1, mm0);
316
        movq_r2m(mm0, *pixels);
317
        pixels += line_size;
318
    }
319
}
320

    
321
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
322
{
323
    const DCTELEM *p;
324
    uint8_t *pix;
325
    int i;
326

    
327
    /* read the pixels */
328
    p = block;
329
    pix = pixels;
330
    MOVQ_ZERO(mm7);
331
    i = 4;
332
    do {
333
        __asm __volatile(
334
                "movq        (%2), %%mm0\n\t"
335
                "movq        8(%2), %%mm1\n\t"
336
                "movq        16(%2), %%mm2\n\t"
337
                "movq        24(%2), %%mm3\n\t"
338
                "movq        %0, %%mm4\n\t"
339
                "movq        %1, %%mm6\n\t"
340
                "movq        %%mm4, %%mm5\n\t"
341
                "punpcklbw %%mm7, %%mm4\n\t"
342
                "punpckhbw %%mm7, %%mm5\n\t"
343
                "paddsw        %%mm4, %%mm0\n\t"
344
                "paddsw        %%mm5, %%mm1\n\t"
345
                "movq        %%mm6, %%mm5\n\t"
346
                "punpcklbw %%mm7, %%mm6\n\t"
347
                "punpckhbw %%mm7, %%mm5\n\t"
348
                "paddsw        %%mm6, %%mm2\n\t"
349
                "paddsw        %%mm5, %%mm3\n\t"
350
                "packuswb %%mm1, %%mm0\n\t"
351
                "packuswb %%mm3, %%mm2\n\t"
352
                "movq        %%mm0, %0\n\t"
353
                "movq        %%mm2, %1\n\t"
354
                :"+m"(*pix), "+m"(*(pix+line_size))
355
                :"r"(p)
356
                :"memory");
357
        pix += line_size*2;
358
        p += 16;
359
    } while (--i);
360
}
361

    
362
static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
363
{
364
    __asm __volatile(
365
         "lea (%3, %3), %%"REG_a"        \n\t"
366
         ".balign 8                        \n\t"
367
         "1:                                \n\t"
368
         "movd (%1), %%mm0                \n\t"
369
         "movd (%1, %3), %%mm1                \n\t"
370
         "movd %%mm0, (%2)                \n\t"
371
         "movd %%mm1, (%2, %3)                \n\t"
372
         "add %%"REG_a", %1                \n\t"
373
         "add %%"REG_a", %2                \n\t"
374
         "movd (%1), %%mm0                \n\t"
375
         "movd (%1, %3), %%mm1                \n\t"
376
         "movd %%mm0, (%2)                \n\t"
377
         "movd %%mm1, (%2, %3)                \n\t"
378
         "add %%"REG_a", %1                \n\t"
379
         "add %%"REG_a", %2                \n\t"
380
         "subl $4, %0                        \n\t"
381
         "jnz 1b                        \n\t"
382
         : "+g"(h), "+r" (pixels),  "+r" (block)
383
         : "r"((long)line_size)
384
         : "%"REG_a, "memory"
385
        );
386
}
387

    
388
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
389
{
390
    __asm __volatile(
391
         "lea (%3, %3), %%"REG_a"        \n\t"
392
         ".balign 8                        \n\t"
393
         "1:                                \n\t"
394
         "movq (%1), %%mm0                \n\t"
395
         "movq (%1, %3), %%mm1                \n\t"
396
              "movq %%mm0, (%2)                \n\t"
397
         "movq %%mm1, (%2, %3)                \n\t"
398
         "add %%"REG_a", %1                \n\t"
399
         "add %%"REG_a", %2                \n\t"
400
         "movq (%1), %%mm0                \n\t"
401
         "movq (%1, %3), %%mm1                \n\t"
402
         "movq %%mm0, (%2)                \n\t"
403
         "movq %%mm1, (%2, %3)                \n\t"
404
         "add %%"REG_a", %1                \n\t"
405
         "add %%"REG_a", %2                \n\t"
406
         "subl $4, %0                        \n\t"
407
         "jnz 1b                        \n\t"
408
         : "+g"(h), "+r" (pixels),  "+r" (block)
409
         : "r"((long)line_size)
410
         : "%"REG_a, "memory"
411
        );
412
}
413

    
414
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
415
{
416
    __asm __volatile(
417
         "lea (%3, %3), %%"REG_a"        \n\t"
418
         ".balign 8                        \n\t"
419
         "1:                                \n\t"
420
         "movq (%1), %%mm0                \n\t"
421
         "movq 8(%1), %%mm4                \n\t"
422
         "movq (%1, %3), %%mm1                \n\t"
423
         "movq 8(%1, %3), %%mm5                \n\t"
424
              "movq %%mm0, (%2)                \n\t"
425
              "movq %%mm4, 8(%2)                \n\t"
426
         "movq %%mm1, (%2, %3)                \n\t"
427
         "movq %%mm5, 8(%2, %3)                \n\t"
428
         "add %%"REG_a", %1                \n\t"
429
         "add %%"REG_a", %2               \n\t"
430
         "movq (%1), %%mm0                \n\t"
431
         "movq 8(%1), %%mm4                \n\t"
432
         "movq (%1, %3), %%mm1                \n\t"
433
         "movq 8(%1, %3), %%mm5                \n\t"
434
         "movq %%mm0, (%2)                \n\t"
435
         "movq %%mm4, 8(%2)                \n\t"
436
         "movq %%mm1, (%2, %3)                \n\t"
437
         "movq %%mm5, 8(%2, %3)                \n\t"
438
         "add %%"REG_a", %1                \n\t"
439
         "add %%"REG_a", %2               \n\t"
440
         "subl $4, %0                        \n\t"
441
         "jnz 1b                        \n\t"
442
         : "+g"(h), "+r" (pixels),  "+r" (block)
443
         : "r"((long)line_size)
444
         : "%"REG_a, "memory"
445
        );
446
}
447

    
448
static void clear_blocks_mmx(DCTELEM *blocks)
449
{
450
    __asm __volatile(
451
                "pxor %%mm7, %%mm7                \n\t"
452
                "mov $-128*6, %%"REG_a"        \n\t"
453
                "1:                                \n\t"
454
                "movq %%mm7, (%0, %%"REG_a")        \n\t"
455
                "movq %%mm7, 8(%0, %%"REG_a")        \n\t"
456
                "movq %%mm7, 16(%0, %%"REG_a")        \n\t"
457
                "movq %%mm7, 24(%0, %%"REG_a")        \n\t"
458
                "add $32, %%"REG_a"                \n\t"
459
                " js 1b                                \n\t"
460
                : : "r" (((uint8_t *)blocks)+128*6)
461
                : "%"REG_a
462
        );
463
}
464

    
465
#ifdef CONFIG_ENCODERS
466
static int pix_sum16_mmx(uint8_t * pix, int line_size){
467
    const int h=16;
468
    int sum;
469
    long index= -line_size*h;
470

    
471
    __asm __volatile(
472
                "pxor %%mm7, %%mm7                \n\t"
473
                "pxor %%mm6, %%mm6                \n\t"
474
                "1:                                \n\t"
475
                "movq (%2, %1), %%mm0                \n\t"
476
                "movq (%2, %1), %%mm1                \n\t"
477
                "movq 8(%2, %1), %%mm2                \n\t"
478
                "movq 8(%2, %1), %%mm3                \n\t"
479
                "punpcklbw %%mm7, %%mm0                \n\t"
480
                "punpckhbw %%mm7, %%mm1                \n\t"
481
                "punpcklbw %%mm7, %%mm2                \n\t"
482
                "punpckhbw %%mm7, %%mm3                \n\t"
483
                "paddw %%mm0, %%mm1                \n\t"
484
                "paddw %%mm2, %%mm3                \n\t"
485
                "paddw %%mm1, %%mm3                \n\t"
486
                "paddw %%mm3, %%mm6                \n\t"
487
                "add %3, %1                        \n\t"
488
                " js 1b                                \n\t"
489
                "movq %%mm6, %%mm5                \n\t"
490
                "psrlq $32, %%mm6                \n\t"
491
                "paddw %%mm5, %%mm6                \n\t"
492
                "movq %%mm6, %%mm5                \n\t"
493
                "psrlq $16, %%mm6                \n\t"
494
                "paddw %%mm5, %%mm6                \n\t"
495
                "movd %%mm6, %0                        \n\t"
496
                "andl $0xFFFF, %0                \n\t"
497
                : "=&r" (sum), "+r" (index)
498
                : "r" (pix - index), "r" ((long)line_size)
499
        );
500

    
501
        return sum;
502
}
503
#endif //CONFIG_ENCODERS
504

    
505
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
506
    long i=0;
507
    asm volatile(
508
        "1:                                \n\t"
509
        "movq  (%1, %0), %%mm0                \n\t"
510
        "movq  (%2, %0), %%mm1                \n\t"
511
        "paddb %%mm0, %%mm1                \n\t"
512
        "movq %%mm1, (%2, %0)                \n\t"
513
        "movq 8(%1, %0), %%mm0                \n\t"
514
        "movq 8(%2, %0), %%mm1                \n\t"
515
        "paddb %%mm0, %%mm1                \n\t"
516
        "movq %%mm1, 8(%2, %0)                \n\t"
517
        "add $16, %0                        \n\t"
518
        "cmp %3, %0                        \n\t"
519
        " jb 1b                                \n\t"
520
        : "+r" (i)
521
        : "r"(src), "r"(dst), "r"((long)w-15)
522
    );
523
    for(; i<w; i++)
524
        dst[i+0] += src[i+0];
525
}
526

    
527
#define H263_LOOP_FILTER \
528
        "pxor %%mm7, %%mm7                \n\t"\
529
        "movq  %0, %%mm0                \n\t"\
530
        "movq  %0, %%mm1                \n\t"\
531
        "movq  %3, %%mm2                \n\t"\
532
        "movq  %3, %%mm3                \n\t"\
533
        "punpcklbw %%mm7, %%mm0                \n\t"\
534
        "punpckhbw %%mm7, %%mm1                \n\t"\
535
        "punpcklbw %%mm7, %%mm2                \n\t"\
536
        "punpckhbw %%mm7, %%mm3                \n\t"\
537
        "psubw %%mm2, %%mm0                \n\t"\
538
        "psubw %%mm3, %%mm1                \n\t"\
539
        "movq  %1, %%mm2                \n\t"\
540
        "movq  %1, %%mm3                \n\t"\
541
        "movq  %2, %%mm4                \n\t"\
542
        "movq  %2, %%mm5                \n\t"\
543
        "punpcklbw %%mm7, %%mm2                \n\t"\
544
        "punpckhbw %%mm7, %%mm3                \n\t"\
545
        "punpcklbw %%mm7, %%mm4                \n\t"\
546
        "punpckhbw %%mm7, %%mm5                \n\t"\
547
        "psubw %%mm2, %%mm4                \n\t"\
548
        "psubw %%mm3, %%mm5                \n\t"\
549
        "psllw $2, %%mm4                \n\t"\
550
        "psllw $2, %%mm5                \n\t"\
551
        "paddw %%mm0, %%mm4                \n\t"\
552
        "paddw %%mm1, %%mm5                \n\t"\
553
        "pxor %%mm6, %%mm6                \n\t"\
554
        "pcmpgtw %%mm4, %%mm6                \n\t"\
555
        "pcmpgtw %%mm5, %%mm7                \n\t"\
556
        "pxor %%mm6, %%mm4                \n\t"\
557
        "pxor %%mm7, %%mm5                \n\t"\
558
        "psubw %%mm6, %%mm4                \n\t"\
559
        "psubw %%mm7, %%mm5                \n\t"\
560
        "psrlw $3, %%mm4                \n\t"\
561
        "psrlw $3, %%mm5                \n\t"\
562
        "packuswb %%mm5, %%mm4                \n\t"\
563
        "packsswb %%mm7, %%mm6                \n\t"\
564
        "pxor %%mm7, %%mm7                \n\t"\
565
        "movd %4, %%mm2                        \n\t"\
566
        "punpcklbw %%mm2, %%mm2                \n\t"\
567
        "punpcklbw %%mm2, %%mm2                \n\t"\
568
        "punpcklbw %%mm2, %%mm2                \n\t"\
569
        "psubusb %%mm4, %%mm2                \n\t"\
570
        "movq %%mm2, %%mm3                \n\t"\
571
        "psubusb %%mm4, %%mm3                \n\t"\
572
        "psubb %%mm3, %%mm2                \n\t"\
573
        "movq %1, %%mm3                        \n\t"\
574
        "movq %2, %%mm4                        \n\t"\
575
        "pxor %%mm6, %%mm3                \n\t"\
576
        "pxor %%mm6, %%mm4                \n\t"\
577
        "paddusb %%mm2, %%mm3                \n\t"\
578
        "psubusb %%mm2, %%mm4                \n\t"\
579
        "pxor %%mm6, %%mm3                \n\t"\
580
        "pxor %%mm6, %%mm4                \n\t"\
581
        "paddusb %%mm2, %%mm2                \n\t"\
582
        "packsswb %%mm1, %%mm0                \n\t"\
583
        "pcmpgtb %%mm0, %%mm7                \n\t"\
584
        "pxor %%mm7, %%mm0                \n\t"\
585
        "psubb %%mm7, %%mm0                \n\t"\
586
        "movq %%mm0, %%mm1                \n\t"\
587
        "psubusb %%mm2, %%mm0                \n\t"\
588
        "psubb %%mm0, %%mm1                \n\t"\
589
        "pand %5, %%mm1                        \n\t"\
590
        "psrlw $2, %%mm1                \n\t"\
591
        "pxor %%mm7, %%mm1                \n\t"\
592
        "psubb %%mm7, %%mm1                \n\t"\
593
        "movq %0, %%mm5                        \n\t"\
594
        "movq %3, %%mm6                        \n\t"\
595
        "psubb %%mm1, %%mm5                \n\t"\
596
        "paddb %%mm1, %%mm6                \n\t"
597

    
598
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
599
    const int strength= ff_h263_loop_filter_strength[qscale];
600

    
601
    asm volatile(
602
    
603
        H263_LOOP_FILTER
604
        
605
        "movq %%mm3, %1                        \n\t"
606
        "movq %%mm4, %2                        \n\t"
607
        "movq %%mm5, %0                        \n\t"
608
        "movq %%mm6, %3                        \n\t"
609
        : "+m" (*(uint64_t*)(src - 2*stride)),
610
          "+m" (*(uint64_t*)(src - 1*stride)),
611
          "+m" (*(uint64_t*)(src + 0*stride)),
612
          "+m" (*(uint64_t*)(src + 1*stride))
613
        : "g" (2*strength), "m"(ff_pb_FC)
614
    );
615
}
616

    
617
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
618
    asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
619
        "movd  %4, %%mm0                \n\t"
620
        "movd  %5, %%mm1                \n\t"
621
        "movd  %6, %%mm2                \n\t"
622
        "movd  %7, %%mm3                \n\t"
623
        "punpcklbw %%mm1, %%mm0                \n\t"
624
        "punpcklbw %%mm3, %%mm2                \n\t"
625
        "movq %%mm0, %%mm1                \n\t"
626
        "punpcklwd %%mm2, %%mm0                \n\t"
627
        "punpckhwd %%mm2, %%mm1                \n\t"
628
        "movd  %%mm0, %0                \n\t"
629
        "punpckhdq %%mm0, %%mm0                \n\t"
630
        "movd  %%mm0, %1                \n\t"
631
        "movd  %%mm1, %2                \n\t"
632
        "punpckhdq %%mm1, %%mm1                \n\t"
633
        "movd  %%mm1, %3                \n\t"
634
        
635
        : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
636
          "=m" (*(uint32_t*)(dst + 1*dst_stride)),
637
          "=m" (*(uint32_t*)(dst + 2*dst_stride)),
638
          "=m" (*(uint32_t*)(dst + 3*dst_stride))
639
        :  "m" (*(uint32_t*)(src + 0*src_stride)),
640
           "m" (*(uint32_t*)(src + 1*src_stride)),
641
           "m" (*(uint32_t*)(src + 2*src_stride)),
642
           "m" (*(uint32_t*)(src + 3*src_stride))
643
    );
644
}
645

    
646
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
647
    const int strength= ff_h263_loop_filter_strength[qscale];
648
    uint64_t temp[4] __attribute__ ((aligned(8)));
649
    uint8_t *btemp= (uint8_t*)temp;
650
    
651
    src -= 2;
652

    
653
    transpose4x4(btemp  , src           , 8, stride);
654
    transpose4x4(btemp+4, src + 4*stride, 8, stride);
655
    asm volatile(
656
        H263_LOOP_FILTER // 5 3 4 6
657
        
658
        : "+m" (temp[0]),
659
          "+m" (temp[1]),
660
          "+m" (temp[2]),
661
          "+m" (temp[3])
662
        : "g" (2*strength), "m"(ff_pb_FC)
663
    );
664

    
665
    asm volatile(
666
        "movq %%mm5, %%mm1                \n\t"
667
        "movq %%mm4, %%mm0                \n\t"
668
        "punpcklbw %%mm3, %%mm5                \n\t"
669
        "punpcklbw %%mm6, %%mm4                \n\t"
670
        "punpckhbw %%mm3, %%mm1                \n\t"
671
        "punpckhbw %%mm6, %%mm0                \n\t"
672
        "movq %%mm5, %%mm3                \n\t"
673
        "movq %%mm1, %%mm6                \n\t"
674
        "punpcklwd %%mm4, %%mm5                \n\t"
675
        "punpcklwd %%mm0, %%mm1                \n\t"
676
        "punpckhwd %%mm4, %%mm3                \n\t"
677
        "punpckhwd %%mm0, %%mm6                \n\t"
678
        "movd %%mm5, (%0)                \n\t"
679
        "punpckhdq %%mm5, %%mm5                \n\t"
680
        "movd %%mm5, (%0,%2)                \n\t"
681
        "movd %%mm3, (%0,%2,2)                \n\t"
682
        "punpckhdq %%mm3, %%mm3                \n\t"
683
        "movd %%mm3, (%0,%3)                \n\t"
684
        "movd %%mm1, (%1)                \n\t"
685
        "punpckhdq %%mm1, %%mm1                \n\t"
686
        "movd %%mm1, (%1,%2)                \n\t"
687
        "movd %%mm6, (%1,%2,2)                \n\t"
688
        "punpckhdq %%mm6, %%mm6                \n\t"
689
        "movd %%mm6, (%1,%3)                \n\t"
690
        :: "r" (src),
691
           "r" (src + 4*stride),
692
           "r" ((long)   stride ),
693
           "r" ((long)(3*stride))
694
    );
695
}
696

    
697
#ifdef CONFIG_ENCODERS
698
static int pix_norm1_mmx(uint8_t *pix, int line_size) {
699
    int tmp;
700
  asm volatile (
701
      "movl $16,%%ecx\n"
702
      "pxor %%mm0,%%mm0\n"
703
      "pxor %%mm7,%%mm7\n"
704
      "1:\n"
705
      "movq (%0),%%mm2\n"        /* mm2 = pix[0-7] */
706
      "movq 8(%0),%%mm3\n"        /* mm3 = pix[8-15] */
707

    
708
      "movq %%mm2,%%mm1\n"        /* mm1 = mm2 = pix[0-7] */
709

    
710
      "punpckhbw %%mm0,%%mm1\n"        /* mm1 = [pix4-7] */
711
      "punpcklbw %%mm0,%%mm2\n"        /* mm2 = [pix0-3] */
712

    
713
      "movq %%mm3,%%mm4\n"        /* mm4 = mm3 = pix[8-15] */
714
      "punpckhbw %%mm0,%%mm3\n"        /* mm3 = [pix12-15] */
715
      "punpcklbw %%mm0,%%mm4\n"        /* mm4 = [pix8-11] */
716

    
717
      "pmaddwd %%mm1,%%mm1\n"        /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
718
      "pmaddwd %%mm2,%%mm2\n"        /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
719

    
720
      "pmaddwd %%mm3,%%mm3\n"
721
      "pmaddwd %%mm4,%%mm4\n"
722

    
723
      "paddd %%mm1,%%mm2\n"        /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
724
                                          pix2^2+pix3^2+pix6^2+pix7^2) */
725
      "paddd %%mm3,%%mm4\n"
726
      "paddd %%mm2,%%mm7\n"
727

    
728
      "add %2, %0\n"
729
      "paddd %%mm4,%%mm7\n"
730
      "dec %%ecx\n"
731
      "jnz 1b\n"
732

    
733
      "movq %%mm7,%%mm1\n"
734
      "psrlq $32, %%mm7\n"        /* shift hi dword to lo */
735
      "paddd %%mm7,%%mm1\n"
736
      "movd %%mm1,%1\n"
737
      : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
738
    return tmp;
739
}
740

    
741
static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
742
    int tmp;
743
  asm volatile (
744
      "movl %4,%%ecx\n"
745
      "pxor %%mm0,%%mm0\n"        /* mm0 = 0 */
746
      "pxor %%mm7,%%mm7\n"        /* mm7 holds the sum */
747
      "1:\n"
748
      "movq (%0),%%mm1\n"        /* mm1 = pix1[0-7] */
749
      "movq (%1),%%mm2\n"        /* mm2 = pix2[0-7] */
750

    
751
      "movq %%mm1,%%mm5\n"
752
      "psubusb %%mm2,%%mm1\n"
753
      "psubusb %%mm5,%%mm2\n"
754

    
755
      "por %%mm1,%%mm2\n"
756

    
757
      "movq %%mm2,%%mm1\n"
758

    
759
      "punpckhbw %%mm0,%%mm2\n"
760
      "punpcklbw %%mm0,%%mm1\n"        /* mm1 now spread over (mm1,mm2) */
761

    
762
      "pmaddwd %%mm2,%%mm2\n"
763
      "pmaddwd %%mm1,%%mm1\n"
764

    
765
      "add %3,%0\n"
766
      "add %3,%1\n"
767

    
768
      "paddd %%mm2,%%mm1\n"
769
      "paddd %%mm1,%%mm7\n"
770

    
771
      "decl %%ecx\n"
772
      "jnz 1b\n"
773

    
774
      "movq %%mm7,%%mm1\n"
775
      "psrlq $32, %%mm7\n"        /* shift hi dword to lo */
776
      "paddd %%mm7,%%mm1\n"
777
      "movd %%mm1,%2\n"
778
      : "+r" (pix1), "+r" (pix2), "=r"(tmp) 
779
      : "r" ((long)line_size) , "m" (h)
780
      : "%ecx");
781
    return tmp;
782
}
783

    
784
static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
785
    int tmp;
786
  asm volatile (
787
      "movl %4,%%ecx\n"
788
      "pxor %%mm0,%%mm0\n"        /* mm0 = 0 */
789
      "pxor %%mm7,%%mm7\n"        /* mm7 holds the sum */
790
      "1:\n"
791
      "movq (%0),%%mm1\n"        /* mm1 = pix1[0-7] */
792
      "movq (%1),%%mm2\n"        /* mm2 = pix2[0-7] */
793
      "movq 8(%0),%%mm3\n"        /* mm3 = pix1[8-15] */
794
      "movq 8(%1),%%mm4\n"        /* mm4 = pix2[8-15] */
795

    
796
      /* todo: mm1-mm2, mm3-mm4 */
797
      /* algo: substract mm1 from mm2 with saturation and vice versa */
798
      /*       OR the results to get absolute difference */
799
      "movq %%mm1,%%mm5\n"
800
      "movq %%mm3,%%mm6\n"
801
      "psubusb %%mm2,%%mm1\n"
802
      "psubusb %%mm4,%%mm3\n"
803
      "psubusb %%mm5,%%mm2\n"
804
      "psubusb %%mm6,%%mm4\n"
805

    
806
      "por %%mm1,%%mm2\n"
807
      "por %%mm3,%%mm4\n"
808

    
809
      /* now convert to 16-bit vectors so we can square them */
810
      "movq %%mm2,%%mm1\n"
811
      "movq %%mm4,%%mm3\n"
812

    
813
      "punpckhbw %%mm0,%%mm2\n"
814
      "punpckhbw %%mm0,%%mm4\n"
815
      "punpcklbw %%mm0,%%mm1\n"        /* mm1 now spread over (mm1,mm2) */
816
      "punpcklbw %%mm0,%%mm3\n"        /* mm4 now spread over (mm3,mm4) */
817

    
818
      "pmaddwd %%mm2,%%mm2\n"
819
      "pmaddwd %%mm4,%%mm4\n"
820
      "pmaddwd %%mm1,%%mm1\n"
821
      "pmaddwd %%mm3,%%mm3\n"
822

    
823
      "add %3,%0\n"
824
      "add %3,%1\n"
825

    
826
      "paddd %%mm2,%%mm1\n"
827
      "paddd %%mm4,%%mm3\n"
828
      "paddd %%mm1,%%mm7\n"
829
      "paddd %%mm3,%%mm7\n"
830

    
831
      "decl %%ecx\n"
832
      "jnz 1b\n"
833

    
834
      "movq %%mm7,%%mm1\n"
835
      "psrlq $32, %%mm7\n"        /* shift hi dword to lo */
836
      "paddd %%mm7,%%mm1\n"
837
      "movd %%mm1,%2\n"
838
      : "+r" (pix1), "+r" (pix2), "=r"(tmp) 
839
      : "r" ((long)line_size) , "m" (h)
840
      : "%ecx");
841
    return tmp;
842
}
843

    
844
static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
845
    int tmp;
846
  asm volatile (
847
      "movl %3,%%ecx\n"
848
      "pxor %%mm7,%%mm7\n"
849
      "pxor %%mm6,%%mm6\n"
850
      
851
      "movq (%0),%%mm0\n"
852
      "movq %%mm0, %%mm1\n"
853
      "psllq $8, %%mm0\n"
854
      "psrlq $8, %%mm1\n"
855
      "psrlq $8, %%mm0\n"
856
      "movq %%mm0, %%mm2\n"
857
      "movq %%mm1, %%mm3\n"
858
      "punpcklbw %%mm7,%%mm0\n"
859
      "punpcklbw %%mm7,%%mm1\n"
860
      "punpckhbw %%mm7,%%mm2\n"
861
      "punpckhbw %%mm7,%%mm3\n"
862
      "psubw %%mm1, %%mm0\n"
863
      "psubw %%mm3, %%mm2\n"
864
      
865
      "add %2,%0\n"
866
      
867
      "movq (%0),%%mm4\n"
868
      "movq %%mm4, %%mm1\n"
869
      "psllq $8, %%mm4\n"
870
      "psrlq $8, %%mm1\n"
871
      "psrlq $8, %%mm4\n"
872
      "movq %%mm4, %%mm5\n"
873
      "movq %%mm1, %%mm3\n"
874
      "punpcklbw %%mm7,%%mm4\n"
875
      "punpcklbw %%mm7,%%mm1\n"
876
      "punpckhbw %%mm7,%%mm5\n"
877
      "punpckhbw %%mm7,%%mm3\n"
878
      "psubw %%mm1, %%mm4\n"
879
      "psubw %%mm3, %%mm5\n"
880
      "psubw %%mm4, %%mm0\n"
881
      "psubw %%mm5, %%mm2\n"
882
      "pxor %%mm3, %%mm3\n"
883
      "pxor %%mm1, %%mm1\n"
884
      "pcmpgtw %%mm0, %%mm3\n\t"
885
      "pcmpgtw %%mm2, %%mm1\n\t"
886
      "pxor %%mm3, %%mm0\n"
887
      "pxor %%mm1, %%mm2\n"
888
      "psubw %%mm3, %%mm0\n" 
889
      "psubw %%mm1, %%mm2\n"
890
      "paddw %%mm0, %%mm2\n"
891
      "paddw %%mm2, %%mm6\n"
892

    
893
      "add %2,%0\n"
894
      "1:\n"
895
  
896
      "movq (%0),%%mm0\n"
897
      "movq %%mm0, %%mm1\n"
898
      "psllq $8, %%mm0\n"
899
      "psrlq $8, %%mm1\n"
900
      "psrlq $8, %%mm0\n"
901
      "movq %%mm0, %%mm2\n"
902
      "movq %%mm1, %%mm3\n"
903
      "punpcklbw %%mm7,%%mm0\n"
904
      "punpcklbw %%mm7,%%mm1\n"
905
      "punpckhbw %%mm7,%%mm2\n"
906
      "punpckhbw %%mm7,%%mm3\n"
907
      "psubw %%mm1, %%mm0\n"
908
      "psubw %%mm3, %%mm2\n"
909
      "psubw %%mm0, %%mm4\n"
910
      "psubw %%mm2, %%mm5\n"
911
      "pxor %%mm3, %%mm3\n"
912
      "pxor %%mm1, %%mm1\n"
913
      "pcmpgtw %%mm4, %%mm3\n\t"
914
      "pcmpgtw %%mm5, %%mm1\n\t"
915
      "pxor %%mm3, %%mm4\n"
916
      "pxor %%mm1, %%mm5\n"
917
      "psubw %%mm3, %%mm4\n" 
918
      "psubw %%mm1, %%mm5\n"
919
      "paddw %%mm4, %%mm5\n"
920
      "paddw %%mm5, %%mm6\n"
921
      
922
      "add %2,%0\n"
923
      
924
      "movq (%0),%%mm4\n"
925
      "movq %%mm4, %%mm1\n"
926
      "psllq $8, %%mm4\n"
927
      "psrlq $8, %%mm1\n"
928
      "psrlq $8, %%mm4\n"
929
      "movq %%mm4, %%mm5\n"
930
      "movq %%mm1, %%mm3\n"
931
      "punpcklbw %%mm7,%%mm4\n"
932
      "punpcklbw %%mm7,%%mm1\n"
933
      "punpckhbw %%mm7,%%mm5\n"
934
      "punpckhbw %%mm7,%%mm3\n"
935
      "psubw %%mm1, %%mm4\n"
936
      "psubw %%mm3, %%mm5\n"
937
      "psubw %%mm4, %%mm0\n"
938
      "psubw %%mm5, %%mm2\n"
939
      "pxor %%mm3, %%mm3\n"
940
      "pxor %%mm1, %%mm1\n"
941
      "pcmpgtw %%mm0, %%mm3\n\t"
942
      "pcmpgtw %%mm2, %%mm1\n\t"
943
      "pxor %%mm3, %%mm0\n"
944
      "pxor %%mm1, %%mm2\n"
945
      "psubw %%mm3, %%mm0\n" 
946
      "psubw %%mm1, %%mm2\n"
947
      "paddw %%mm0, %%mm2\n"
948
      "paddw %%mm2, %%mm6\n"
949

    
950
      "add %2,%0\n"
951
      "subl $2, %%ecx\n"
952
      " jnz 1b\n"
953

    
954
      "movq %%mm6, %%mm0\n"
955
      "punpcklwd %%mm7,%%mm0\n"
956
      "punpckhwd %%mm7,%%mm6\n"
957
      "paddd %%mm0, %%mm6\n"
958
      
959
      "movq %%mm6,%%mm0\n"
960
      "psrlq $32, %%mm6\n"
961
      "paddd %%mm6,%%mm0\n"
962
      "movd %%mm0,%1\n"
963
      : "+r" (pix1), "=r"(tmp) 
964
      : "r" ((long)line_size) , "g" (h-2)
965
      : "%ecx");
966
      return tmp;
967
}
968

    
969
static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
970
    int tmp;
971
    uint8_t * pix= pix1;
972
  asm volatile (
973
      "movl %3,%%ecx\n"
974
      "pxor %%mm7,%%mm7\n"
975
      "pxor %%mm6,%%mm6\n"
976
      
977
      "movq (%0),%%mm0\n"
978
      "movq 1(%0),%%mm1\n"
979
      "movq %%mm0, %%mm2\n"
980
      "movq %%mm1, %%mm3\n"
981
      "punpcklbw %%mm7,%%mm0\n"
982
      "punpcklbw %%mm7,%%mm1\n"
983
      "punpckhbw %%mm7,%%mm2\n"
984
      "punpckhbw %%mm7,%%mm3\n"
985
      "psubw %%mm1, %%mm0\n"
986
      "psubw %%mm3, %%mm2\n"
987
      
988
      "add %2,%0\n"
989
      
990
      "movq (%0),%%mm4\n"
991
      "movq 1(%0),%%mm1\n"
992
      "movq %%mm4, %%mm5\n"
993
      "movq %%mm1, %%mm3\n"
994
      "punpcklbw %%mm7,%%mm4\n"
995
      "punpcklbw %%mm7,%%mm1\n"
996
      "punpckhbw %%mm7,%%mm5\n"
997
      "punpckhbw %%mm7,%%mm3\n"
998
      "psubw %%mm1, %%mm4\n"
999
      "psubw %%mm3, %%mm5\n"
1000
      "psubw %%mm4, %%mm0\n"
1001
      "psubw %%mm5, %%mm2\n"
1002
      "pxor %%mm3, %%mm3\n"
1003
      "pxor %%mm1, %%mm1\n"
1004
      "pcmpgtw %%mm0, %%mm3\n\t"
1005
      "pcmpgtw %%mm2, %%mm1\n\t"
1006
      "pxor %%mm3, %%mm0\n"
1007
      "pxor %%mm1, %%mm2\n"
1008
      "psubw %%mm3, %%mm0\n" 
1009
      "psubw %%mm1, %%mm2\n"
1010
      "paddw %%mm0, %%mm2\n"
1011
      "paddw %%mm2, %%mm6\n"
1012

    
1013
      "add %2,%0\n"
1014
      "1:\n"
1015
  
1016
      "movq (%0),%%mm0\n"
1017
      "movq 1(%0),%%mm1\n"
1018
      "movq %%mm0, %%mm2\n"
1019
      "movq %%mm1, %%mm3\n"
1020
      "punpcklbw %%mm7,%%mm0\n"
1021
      "punpcklbw %%mm7,%%mm1\n"
1022
      "punpckhbw %%mm7,%%mm2\n"
1023
      "punpckhbw %%mm7,%%mm3\n"
1024
      "psubw %%mm1, %%mm0\n"
1025
      "psubw %%mm3, %%mm2\n"
1026
      "psubw %%mm0, %%mm4\n"
1027
      "psubw %%mm2, %%mm5\n"
1028
      "pxor %%mm3, %%mm3\n"
1029
      "pxor %%mm1, %%mm1\n"
1030
      "pcmpgtw %%mm4, %%mm3\n\t"
1031
      "pcmpgtw %%mm5, %%mm1\n\t"
1032
      "pxor %%mm3, %%mm4\n"
1033
      "pxor %%mm1, %%mm5\n"
1034
      "psubw %%mm3, %%mm4\n"
1035
      "psubw %%mm1, %%mm5\n"
1036
      "paddw %%mm4, %%mm5\n"
1037
      "paddw %%mm5, %%mm6\n"
1038
      
1039
      "add %2,%0\n"
1040
      
1041
      "movq (%0),%%mm4\n"
1042
      "movq 1(%0),%%mm1\n"
1043
      "movq %%mm4, %%mm5\n"
1044
      "movq %%mm1, %%mm3\n"
1045
      "punpcklbw %%mm7,%%mm4\n"
1046
      "punpcklbw %%mm7,%%mm1\n"
1047
      "punpckhbw %%mm7,%%mm5\n"
1048
      "punpckhbw %%mm7,%%mm3\n"
1049
      "psubw %%mm1, %%mm4\n"
1050
      "psubw %%mm3, %%mm5\n"
1051
      "psubw %%mm4, %%mm0\n"
1052
      "psubw %%mm5, %%mm2\n"
1053
      "pxor %%mm3, %%mm3\n"
1054
      "pxor %%mm1, %%mm1\n"
1055
      "pcmpgtw %%mm0, %%mm3\n\t"
1056
      "pcmpgtw %%mm2, %%mm1\n\t"
1057
      "pxor %%mm3, %%mm0\n"
1058
      "pxor %%mm1, %%mm2\n"
1059
      "psubw %%mm3, %%mm0\n" 
1060
      "psubw %%mm1, %%mm2\n"
1061
      "paddw %%mm0, %%mm2\n"
1062
      "paddw %%mm2, %%mm6\n"
1063

    
1064
      "add %2,%0\n"
1065
      "subl $2, %%ecx\n"
1066
      " jnz 1b\n"
1067

    
1068
      "movq %%mm6, %%mm0\n"
1069
      "punpcklwd %%mm7,%%mm0\n"
1070
      "punpckhwd %%mm7,%%mm6\n"
1071
      "paddd %%mm0, %%mm6\n"
1072
      
1073
      "movq %%mm6,%%mm0\n"
1074
      "psrlq $32, %%mm6\n"
1075
      "paddd %%mm6,%%mm0\n"
1076
      "movd %%mm0,%1\n"
1077
      : "+r" (pix1), "=r"(tmp) 
1078
      : "r" ((long)line_size) , "g" (h-2)
1079
      : "%ecx");
1080
      return tmp + hf_noise8_mmx(pix+8, line_size, h);
1081
}
1082

    
1083
static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1084
    MpegEncContext *c = p;
1085
    int score1= sse16_mmx(c, pix1, pix2, line_size, h);
1086
    int score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
1087

    
1088
    if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
1089
    else  return score1 + ABS(score2)*8;
1090
}
1091

    
1092
static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1093
    MpegEncContext *c = p;
1094
    int score1= sse8_mmx(c, pix1, pix2, line_size, h);
1095
    int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
1096

    
1097
    if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
1098
    else  return score1 + ABS(score2)*8;
1099
}
1100

    
1101
static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
1102
    int tmp;
1103
    
1104
    assert( (((int)pix) & 7) == 0);
1105
    assert((line_size &7) ==0);
1106
    
1107
#define SUM(in0, in1, out0, out1) \
1108
      "movq (%0), %%mm2\n"\
1109
      "movq 8(%0), %%mm3\n"\
1110
      "add %2,%0\n"\
1111
      "movq %%mm2, " #out0 "\n"\
1112
      "movq %%mm3, " #out1 "\n"\
1113
      "psubusb " #in0 ", %%mm2\n"\
1114
      "psubusb " #in1 ", %%mm3\n"\
1115
      "psubusb " #out0 ", " #in0 "\n"\
1116
      "psubusb " #out1 ", " #in1 "\n"\
1117
      "por %%mm2, " #in0 "\n"\
1118
      "por %%mm3, " #in1 "\n"\
1119
      "movq " #in0 ", %%mm2\n"\
1120
      "movq " #in1 ", %%mm3\n"\
1121
      "punpcklbw %%mm7, " #in0 "\n"\
1122
      "punpcklbw %%mm7, " #in1 "\n"\
1123
      "punpckhbw %%mm7, %%mm2\n"\
1124
      "punpckhbw %%mm7, %%mm3\n"\
1125
      "paddw " #in1 ", " #in0 "\n"\
1126
      "paddw %%mm3, %%mm2\n"\
1127
      "paddw %%mm2, " #in0 "\n"\
1128
      "paddw " #in0 ", %%mm6\n"
1129

    
1130
    
1131
  asm volatile (
1132
      "movl %3,%%ecx\n"
1133
      "pxor %%mm6,%%mm6\n"
1134
      "pxor %%mm7,%%mm7\n"
1135
      "movq (%0),%%mm0\n"
1136
      "movq 8(%0),%%mm1\n"
1137
      "add %2,%0\n"
1138
      "subl $2, %%ecx\n"
1139
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1140
      "1:\n"
1141
      
1142
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1143
      
1144
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1145
      
1146
      "subl $2, %%ecx\n"
1147
      "jnz 1b\n"
1148

    
1149
      "movq %%mm6,%%mm0\n"
1150
      "psrlq $32, %%mm6\n"
1151
      "paddw %%mm6,%%mm0\n"
1152
      "movq %%mm0,%%mm6\n"
1153
      "psrlq $16, %%mm0\n"
1154
      "paddw %%mm6,%%mm0\n"
1155
      "movd %%mm0,%1\n"
1156
      : "+r" (pix), "=r"(tmp) 
1157
      : "r" ((long)line_size) , "m" (h)
1158
      : "%ecx");
1159
    return tmp & 0xFFFF;
1160
}
1161
#undef SUM
1162

    
1163
static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
1164
    int tmp;
1165
    
1166
    assert( (((int)pix) & 7) == 0);
1167
    assert((line_size &7) ==0);
1168
    
1169
#define SUM(in0, in1, out0, out1) \
1170
      "movq (%0), " #out0 "\n"\
1171
      "movq 8(%0), " #out1 "\n"\
1172
      "add %2,%0\n"\
1173
      "psadbw " #out0 ", " #in0 "\n"\
1174
      "psadbw " #out1 ", " #in1 "\n"\
1175
      "paddw " #in1 ", " #in0 "\n"\
1176
      "paddw " #in0 ", %%mm6\n"
1177

    
1178
  asm volatile (
1179
      "movl %3,%%ecx\n"
1180
      "pxor %%mm6,%%mm6\n"
1181
      "pxor %%mm7,%%mm7\n"
1182
      "movq (%0),%%mm0\n"
1183
      "movq 8(%0),%%mm1\n"
1184
      "add %2,%0\n"
1185
      "subl $2, %%ecx\n"
1186
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1187
      "1:\n"
1188
      
1189
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1190
      
1191
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1192
      
1193
      "subl $2, %%ecx\n"
1194
      "jnz 1b\n"
1195

    
1196
      "movd %%mm6,%1\n"
1197
      : "+r" (pix), "=r"(tmp) 
1198
      : "r" ((long)line_size) , "m" (h)
1199
      : "%ecx");
1200
    return tmp;
1201
}
1202
#undef SUM
1203

    
1204
static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1205
    int tmp;
1206
    
1207
    assert( (((int)pix1) & 7) == 0);
1208
    assert( (((int)pix2) & 7) == 0);
1209
    assert((line_size &7) ==0);
1210
    
1211
#define SUM(in0, in1, out0, out1) \
1212
      "movq (%0),%%mm2\n"\
1213
      "movq (%1)," #out0 "\n"\
1214
      "movq 8(%0),%%mm3\n"\
1215
      "movq 8(%1)," #out1 "\n"\
1216
      "add %3,%0\n"\
1217
      "add %3,%1\n"\
1218
      "psubb " #out0 ", %%mm2\n"\
1219
      "psubb " #out1 ", %%mm3\n"\
1220
      "pxor %%mm7, %%mm2\n"\
1221
      "pxor %%mm7, %%mm3\n"\
1222
      "movq %%mm2, " #out0 "\n"\
1223
      "movq %%mm3, " #out1 "\n"\
1224
      "psubusb " #in0 ", %%mm2\n"\
1225
      "psubusb " #in1 ", %%mm3\n"\
1226
      "psubusb " #out0 ", " #in0 "\n"\
1227
      "psubusb " #out1 ", " #in1 "\n"\
1228
      "por %%mm2, " #in0 "\n"\
1229
      "por %%mm3, " #in1 "\n"\
1230
      "movq " #in0 ", %%mm2\n"\
1231
      "movq " #in1 ", %%mm3\n"\
1232
      "punpcklbw %%mm7, " #in0 "\n"\
1233
      "punpcklbw %%mm7, " #in1 "\n"\
1234
      "punpckhbw %%mm7, %%mm2\n"\
1235
      "punpckhbw %%mm7, %%mm3\n"\
1236
      "paddw " #in1 ", " #in0 "\n"\
1237
      "paddw %%mm3, %%mm2\n"\
1238
      "paddw %%mm2, " #in0 "\n"\
1239
      "paddw " #in0 ", %%mm6\n"
1240

    
1241
    
1242
  asm volatile (
1243
      "movl %4,%%ecx\n"
1244
      "pxor %%mm6,%%mm6\n"
1245
      "pcmpeqw %%mm7,%%mm7\n"
1246
      "psllw $15, %%mm7\n"
1247
      "packsswb %%mm7, %%mm7\n"
1248
      "movq (%0),%%mm0\n"
1249
      "movq (%1),%%mm2\n"
1250
      "movq 8(%0),%%mm1\n"
1251
      "movq 8(%1),%%mm3\n"
1252
      "add %3,%0\n"
1253
      "add %3,%1\n"
1254
      "subl $2, %%ecx\n"
1255
      "psubb %%mm2, %%mm0\n"
1256
      "psubb %%mm3, %%mm1\n"
1257
      "pxor %%mm7, %%mm0\n"
1258
      "pxor %%mm7, %%mm1\n"
1259
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1260
      "1:\n"
1261
      
1262
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1263
      
1264
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1265
      
1266
      "subl $2, %%ecx\n"
1267
      "jnz 1b\n"
1268

    
1269
      "movq %%mm6,%%mm0\n"
1270
      "psrlq $32, %%mm6\n"
1271
      "paddw %%mm6,%%mm0\n"
1272
      "movq %%mm0,%%mm6\n"
1273
      "psrlq $16, %%mm0\n"
1274
      "paddw %%mm6,%%mm0\n"
1275
      "movd %%mm0,%2\n"
1276
      : "+r" (pix1), "+r" (pix2), "=r"(tmp) 
1277
      : "r" ((long)line_size) , "m" (h)
1278
      : "%ecx");
1279
    return tmp & 0x7FFF;
1280
}
1281
#undef SUM
1282

    
1283
static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1284
    int tmp;
1285
    
1286
    assert( (((int)pix1) & 7) == 0);
1287
    assert( (((int)pix2) & 7) == 0);
1288
    assert((line_size &7) ==0);
1289
    
1290
#define SUM(in0, in1, out0, out1) \
1291
      "movq (%0)," #out0 "\n"\
1292
      "movq (%1),%%mm2\n"\
1293
      "movq 8(%0)," #out1 "\n"\
1294
      "movq 8(%1),%%mm3\n"\
1295
      "add %3,%0\n"\
1296
      "add %3,%1\n"\
1297
      "psubb %%mm2, " #out0 "\n"\
1298
      "psubb %%mm3, " #out1 "\n"\
1299
      "pxor %%mm7, " #out0 "\n"\
1300
      "pxor %%mm7, " #out1 "\n"\
1301
      "psadbw " #out0 ", " #in0 "\n"\
1302
      "psadbw " #out1 ", " #in1 "\n"\
1303
      "paddw " #in1 ", " #in0 "\n"\
1304
      "paddw " #in0 ", %%mm6\n"
1305

    
1306
  asm volatile (
1307
      "movl %4,%%ecx\n"
1308
      "pxor %%mm6,%%mm6\n"
1309
      "pcmpeqw %%mm7,%%mm7\n"
1310
      "psllw $15, %%mm7\n"
1311
      "packsswb %%mm7, %%mm7\n"
1312
      "movq (%0),%%mm0\n"
1313
      "movq (%1),%%mm2\n"
1314
      "movq 8(%0),%%mm1\n"
1315
      "movq 8(%1),%%mm3\n"
1316
      "add %3,%0\n"
1317
      "add %3,%1\n"
1318
      "subl $2, %%ecx\n"
1319
      "psubb %%mm2, %%mm0\n"
1320
      "psubb %%mm3, %%mm1\n"
1321
      "pxor %%mm7, %%mm0\n"
1322
      "pxor %%mm7, %%mm1\n"
1323
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1324
      "1:\n"
1325
      
1326
      SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1327
      
1328
      SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1329
      
1330
      "subl $2, %%ecx\n"
1331
      "jnz 1b\n"
1332

    
1333
      "movd %%mm6,%2\n"
1334
      : "+r" (pix1), "+r" (pix2), "=r"(tmp) 
1335
      : "r" ((long)line_size) , "m" (h)
1336
      : "%ecx");
1337
    return tmp;
1338
}
1339
#undef SUM
1340

    
1341
static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
1342
    long i=0;
1343
    asm volatile(
1344
        "1:                                \n\t"
1345
        "movq  (%2, %0), %%mm0                \n\t"
1346
        "movq  (%1, %0), %%mm1                \n\t"
1347
        "psubb %%mm0, %%mm1                \n\t"
1348
        "movq %%mm1, (%3, %0)                \n\t"
1349
        "movq 8(%2, %0), %%mm0                \n\t"
1350
        "movq 8(%1, %0), %%mm1                \n\t"
1351
        "psubb %%mm0, %%mm1                \n\t"
1352
        "movq %%mm1, 8(%3, %0)                \n\t"
1353
        "add $16, %0                        \n\t"
1354
        "cmp %4, %0                        \n\t"
1355
        " jb 1b                                \n\t"
1356
        : "+r" (i)
1357
        : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
1358
    );
1359
    for(; i<w; i++)
1360
        dst[i+0] = src1[i+0]-src2[i+0];
1361
}
1362

    
1363
static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
1364
    long i=0;
1365
    uint8_t l, lt;
1366
    
1367
    asm volatile(
1368
        "1:                                \n\t"
1369
        "movq  -1(%1, %0), %%mm0        \n\t" // LT
1370
        "movq  (%1, %0), %%mm1                \n\t" // T
1371
        "movq  -1(%2, %0), %%mm2        \n\t" // L
1372
        "movq  (%2, %0), %%mm3                \n\t" // X
1373
        "movq %%mm2, %%mm4                \n\t" // L
1374
        "psubb %%mm0, %%mm2                \n\t"
1375
        "paddb %%mm1, %%mm2                \n\t" // L + T - LT
1376
        "movq %%mm4, %%mm5                \n\t" // L
1377
        "pmaxub %%mm1, %%mm4                \n\t" // max(T, L)
1378
        "pminub %%mm5, %%mm1                \n\t" // min(T, L)
1379
        "pminub %%mm2, %%mm4                \n\t" 
1380
        "pmaxub %%mm1, %%mm4                \n\t"
1381
        "psubb %%mm4, %%mm3                \n\t" // dst - pred
1382
        "movq %%mm3, (%3, %0)                \n\t"
1383
        "add $8, %0                        \n\t"
1384
        "cmp %4, %0                        \n\t"
1385
        " jb 1b                                \n\t"
1386
        : "+r" (i)
1387
        : "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
1388
    );
1389

    
1390
    l= *left;
1391
    lt= *left_top;
1392
    
1393
    dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
1394
    
1395
    *left_top= src1[w-1];
1396
    *left    = src2[w-1];
1397
}
1398

    
1399
#define LBUTTERFLY2(a1,b1,a2,b2)\
1400
    "paddw " #b1 ", " #a1 "                \n\t"\
1401
    "paddw " #b2 ", " #a2 "                \n\t"\
1402
    "paddw " #b1 ", " #b1 "                \n\t"\
1403
    "paddw " #b2 ", " #b2 "                \n\t"\
1404
    "psubw " #a1 ", " #b1 "                \n\t"\
1405
    "psubw " #a2 ", " #b2 "                \n\t"
1406

    
1407
#define HADAMARD48\
1408
        LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
1409
        LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
1410
        LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
1411
        LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
1412
        LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
1413
        LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
1414

    
1415
#define MMABS(a,z)\
1416
    "pxor " #z ", " #z "                \n\t"\
1417
    "pcmpgtw " #a ", " #z "                \n\t"\
1418
    "pxor " #z ", " #a "                \n\t"\
1419
    "psubw " #z ", " #a "                \n\t"
1420

    
1421
#define MMABS_SUM(a,z, sum)\
1422
    "pxor " #z ", " #z "                \n\t"\
1423
    "pcmpgtw " #a ", " #z "                \n\t"\
1424
    "pxor " #z ", " #a "                \n\t"\
1425
    "psubw " #z ", " #a "                \n\t"\
1426
    "paddusw " #a ", " #sum "                \n\t"
1427

    
1428
#define MMABS_MMX2(a,z)\
1429
    "pxor " #z ", " #z "                \n\t"\
1430
    "psubw " #a ", " #z "                \n\t"\
1431
    "pmaxsw " #z ", " #a "                \n\t"
1432

    
1433
#define MMABS_SUM_MMX2(a,z, sum)\
1434
    "pxor " #z ", " #z "                \n\t"\
1435
    "psubw " #a ", " #z "                \n\t"\
1436
    "pmaxsw " #z ", " #a "                \n\t"\
1437
    "paddusw " #a ", " #sum "                \n\t"
1438
        
1439
#define SBUTTERFLY(a,b,t,n)\
1440
    "movq " #a ", " #t "                \n\t" /* abcd */\
1441
    "punpckl" #n " " #b ", " #a "        \n\t" /* aebf */\
1442
    "punpckh" #n " " #b ", " #t "        \n\t" /* cgdh */\
1443

    
1444
#define TRANSPOSE4(a,b,c,d,t)\
1445
    SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
1446
    SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
1447
    SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
1448
    SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
1449

    
1450
#define LOAD4(o, a, b, c, d)\
1451
        "movq "#o"(%1), " #a "                \n\t"\
1452
        "movq "#o"+16(%1), " #b "        \n\t"\
1453
        "movq "#o"+32(%1), " #c "        \n\t"\
1454
        "movq "#o"+48(%1), " #d "        \n\t"
1455

    
1456
#define STORE4(o, a, b, c, d)\
1457
        "movq "#a", "#o"(%1)                \n\t"\
1458
        "movq "#b", "#o"+16(%1)                \n\t"\
1459
        "movq "#c", "#o"+32(%1)                \n\t"\
1460
        "movq "#d", "#o"+48(%1)                \n\t"\
1461

    
1462
static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1463
    uint64_t temp[16] __align8;
1464
    int sum=0;
1465
    
1466
    assert(h==8);
1467

    
1468
    diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1469

    
1470
    asm volatile(
1471
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1472
        LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1473
        
1474
        HADAMARD48
1475
        
1476
        "movq %%mm7, 112(%1)                \n\t"
1477
        
1478
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1479
        STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1480
        
1481
        "movq 112(%1), %%mm7                 \n\t"
1482
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1483
        STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1484

    
1485
        LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1486
        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1487
        
1488
        HADAMARD48
1489
        
1490
        "movq %%mm7, 120(%1)                \n\t"
1491
        
1492
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1493
        STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1494
        
1495
        "movq 120(%1), %%mm7                 \n\t"
1496
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1497
        "movq %%mm7, %%mm5                \n\t"//FIXME remove
1498
        "movq %%mm6, %%mm7                \n\t"
1499
        "movq %%mm0, %%mm6                \n\t"
1500
//        STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1501
        
1502
        LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1503
//        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1504
        
1505
        HADAMARD48
1506
        "movq %%mm7, 64(%1)                \n\t"
1507
        MMABS(%%mm0, %%mm7)
1508
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1509
        MMABS_SUM(%%mm2, %%mm7, %%mm0)
1510
        MMABS_SUM(%%mm3, %%mm7, %%mm0)
1511
        MMABS_SUM(%%mm4, %%mm7, %%mm0)
1512
        MMABS_SUM(%%mm5, %%mm7, %%mm0)
1513
        MMABS_SUM(%%mm6, %%mm7, %%mm0)
1514
        "movq 64(%1), %%mm1                \n\t"
1515
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1516
        "movq %%mm0, 64(%1)                \n\t"
1517
        
1518
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1519
        LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1520
        
1521
        HADAMARD48
1522
        "movq %%mm7, (%1)                \n\t"
1523
        MMABS(%%mm0, %%mm7)
1524
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1525
        MMABS_SUM(%%mm2, %%mm7, %%mm0)
1526
        MMABS_SUM(%%mm3, %%mm7, %%mm0)
1527
        MMABS_SUM(%%mm4, %%mm7, %%mm0)
1528
        MMABS_SUM(%%mm5, %%mm7, %%mm0)
1529
        MMABS_SUM(%%mm6, %%mm7, %%mm0)
1530
        "movq (%1), %%mm1                \n\t"
1531
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1532
        "movq 64(%1), %%mm1                \n\t"
1533
        MMABS_SUM(%%mm1, %%mm7, %%mm0)
1534
        
1535
        "movq %%mm0, %%mm1                \n\t"
1536
        "psrlq $32, %%mm0                \n\t"
1537
        "paddusw %%mm1, %%mm0                \n\t"
1538
        "movq %%mm0, %%mm1                \n\t"
1539
        "psrlq $16, %%mm0                \n\t"
1540
        "paddusw %%mm1, %%mm0                \n\t"
1541
        "movd %%mm0, %0                        \n\t"
1542
                
1543
        : "=r" (sum)
1544
        : "r"(temp)
1545
    );
1546
    return sum&0xFFFF;
1547
}
1548

    
1549
static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1550
    uint64_t temp[16] __align8;
1551
    int sum=0;
1552
    
1553
    assert(h==8);
1554

    
1555
    diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1556

    
1557
    asm volatile(
1558
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1559
        LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1560
        
1561
        HADAMARD48
1562
        
1563
        "movq %%mm7, 112(%1)                \n\t"
1564
        
1565
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1566
        STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1567
        
1568
        "movq 112(%1), %%mm7                 \n\t"
1569
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1570
        STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1571

    
1572
        LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1573
        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1574
        
1575
        HADAMARD48
1576
        
1577
        "movq %%mm7, 120(%1)                \n\t"
1578
        
1579
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1580
        STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1581
        
1582
        "movq 120(%1), %%mm7                 \n\t"
1583
        TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1584
        "movq %%mm7, %%mm5                \n\t"//FIXME remove
1585
        "movq %%mm6, %%mm7                \n\t"
1586
        "movq %%mm0, %%mm6                \n\t"
1587
//        STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1588
        
1589
        LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1590
//        LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1591
        
1592
        HADAMARD48
1593
        "movq %%mm7, 64(%1)                \n\t"
1594
        MMABS_MMX2(%%mm0, %%mm7)
1595
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1596
        MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1597
        MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1598
        MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1599
        MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1600
        MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1601
        "movq 64(%1), %%mm1                \n\t"
1602
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1603
        "movq %%mm0, 64(%1)                \n\t"
1604
        
1605
        LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1606
        LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1607
        
1608
        HADAMARD48
1609
        "movq %%mm7, (%1)                \n\t"
1610
        MMABS_MMX2(%%mm0, %%mm7)
1611
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1612
        MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1613
        MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1614
        MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1615
        MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1616
        MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1617
        "movq (%1), %%mm1                \n\t"
1618
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1619
        "movq 64(%1), %%mm1                \n\t"
1620
        MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1621
        
1622
        "movq %%mm0, %%mm1                \n\t"
1623
        "psrlq $32, %%mm0                \n\t"
1624
        "paddusw %%mm1, %%mm0                \n\t"
1625
        "movq %%mm0, %%mm1                \n\t"
1626
        "psrlq $16, %%mm0                \n\t"
1627
        "paddusw %%mm1, %%mm0                \n\t"
1628
        "movd %%mm0, %0                        \n\t"
1629
                
1630
        : "=r" (sum)
1631
        : "r"(temp)
1632
    );
1633
    return sum&0xFFFF;
1634
}
1635

    
1636

    
1637
WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
1638
WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
1639
#endif //CONFIG_ENCODERS
1640

    
1641
#define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
1642
#define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
1643

    
1644
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
1645
        "paddw " #m4 ", " #m3 "                \n\t" /* x1 */\
1646
        "movq "MANGLE(ff_pw_20)", %%mm4                \n\t" /* 20 */\
1647
        "pmullw " #m3 ", %%mm4                \n\t" /* 20x1 */\
1648
        "movq "#in7", " #m3 "                \n\t" /* d */\
1649
        "movq "#in0", %%mm5                \n\t" /* D */\
1650
        "paddw " #m3 ", %%mm5                \n\t" /* x4 */\
1651
        "psubw %%mm5, %%mm4                \n\t" /* 20x1 - x4 */\
1652
        "movq "#in1", %%mm5                \n\t" /* C */\
1653
        "movq "#in2", %%mm6                \n\t" /* B */\
1654
        "paddw " #m6 ", %%mm5                \n\t" /* x3 */\
1655
        "paddw " #m5 ", %%mm6                \n\t" /* x2 */\
1656
        "paddw %%mm6, %%mm6                \n\t" /* 2x2 */\
1657
        "psubw %%mm6, %%mm5                \n\t" /* -2x2 + x3 */\
1658
        "pmullw "MANGLE(ff_pw_3)", %%mm5        \n\t" /* -6x2 + 3x3 */\
1659
        "paddw " #rnd ", %%mm4                \n\t" /* x2 */\
1660
        "paddw %%mm4, %%mm5                \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
1661
        "psraw $5, %%mm5                \n\t"\
1662
        "packuswb %%mm5, %%mm5                \n\t"\
1663
        OP(%%mm5, out, %%mm7, d)
1664

    
1665
#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
1666
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1667
    uint64_t temp;\
1668
\
1669
    asm volatile(\
1670
        "pxor %%mm7, %%mm7                \n\t"\
1671
        "1:                                \n\t"\
1672
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1673
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1674
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1675
        "punpcklbw %%mm7, %%mm0                \n\t" /* 0A0B0C0D */\
1676
        "punpckhbw %%mm7, %%mm1                \n\t" /* 0E0F0G0H */\
1677
        "pshufw $0x90, %%mm0, %%mm5        \n\t" /* 0A0A0B0C */\
1678
        "pshufw $0x41, %%mm0, %%mm6        \n\t" /* 0B0A0A0B */\
1679
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1680
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1681
        "psllq $8, %%mm2                \n\t" /* 0ABCDEFG */\
1682
        "psllq $16, %%mm3                \n\t" /* 00ABCDEF */\
1683
        "psllq $24, %%mm4                \n\t" /* 000ABCDE */\
1684
        "punpckhbw %%mm7, %%mm2                \n\t" /* 0D0E0F0G */\
1685
        "punpckhbw %%mm7, %%mm3                \n\t" /* 0C0D0E0F */\
1686
        "punpckhbw %%mm7, %%mm4                \n\t" /* 0B0C0D0E */\
1687
        "paddw %%mm3, %%mm5                \n\t" /* b */\
1688
        "paddw %%mm2, %%mm6                \n\t" /* c */\
1689
        "paddw %%mm5, %%mm5                \n\t" /* 2b */\
1690
        "psubw %%mm5, %%mm6                \n\t" /* c - 2b */\
1691
        "pshufw $0x06, %%mm0, %%mm5        \n\t" /* 0C0B0A0A */\
1692
        "pmullw "MANGLE(ff_pw_3)", %%mm6                \n\t" /* 3c - 6b */\
1693
        "paddw %%mm4, %%mm0                \n\t" /* a */\
1694
        "paddw %%mm1, %%mm5                \n\t" /* d */\
1695
        "pmullw "MANGLE(ff_pw_20)", %%mm0                \n\t" /* 20a */\
1696
        "psubw %%mm5, %%mm0                \n\t" /* 20a - d */\
1697
        "paddw %6, %%mm6                \n\t"\
1698
        "paddw %%mm6, %%mm0                \n\t" /* 20a - 6b + 3c - d */\
1699
        "psraw $5, %%mm0                \n\t"\
1700
        "movq %%mm0, %5                        \n\t"\
1701
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1702
        \
1703
        "movq 5(%0), %%mm0                \n\t" /* FGHIJKLM */\
1704
        "movq %%mm0, %%mm5                \n\t" /* FGHIJKLM */\
1705
        "movq %%mm0, %%mm6                \n\t" /* FGHIJKLM */\
1706
        "psrlq $8, %%mm0                \n\t" /* GHIJKLM0 */\
1707
        "psrlq $16, %%mm5                \n\t" /* HIJKLM00 */\
1708
        "punpcklbw %%mm7, %%mm0                \n\t" /* 0G0H0I0J */\
1709
        "punpcklbw %%mm7, %%mm5                \n\t" /* 0H0I0J0K */\
1710
        "paddw %%mm0, %%mm2                \n\t" /* b */\
1711
        "paddw %%mm5, %%mm3                \n\t" /* c */\
1712
        "paddw %%mm2, %%mm2                \n\t" /* 2b */\
1713
        "psubw %%mm2, %%mm3                \n\t" /* c - 2b */\
1714
        "movq %%mm6, %%mm2                \n\t" /* FGHIJKLM */\
1715
        "psrlq $24, %%mm6                \n\t" /* IJKLM000 */\
1716
        "punpcklbw %%mm7, %%mm2                \n\t" /* 0F0G0H0I */\
1717
        "punpcklbw %%mm7, %%mm6                \n\t" /* 0I0J0K0L */\
1718
        "pmullw "MANGLE(ff_pw_3)", %%mm3                \n\t" /* 3c - 6b */\
1719
        "paddw %%mm2, %%mm1                \n\t" /* a */\
1720
        "paddw %%mm6, %%mm4                \n\t" /* d */\
1721
        "pmullw "MANGLE(ff_pw_20)", %%mm1                \n\t" /* 20a */\
1722
        "psubw %%mm4, %%mm3                \n\t" /* - 6b +3c - d */\
1723
        "paddw %6, %%mm1                \n\t"\
1724
        "paddw %%mm1, %%mm3                \n\t" /* 20a - 6b +3c - d */\
1725
        "psraw $5, %%mm3                \n\t"\
1726
        "movq %5, %%mm1                        \n\t"\
1727
        "packuswb %%mm3, %%mm1                \n\t"\
1728
        OP_MMX2(%%mm1, (%1),%%mm4, q)\
1729
        /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1730
        \
1731
        "movq 9(%0), %%mm1                \n\t" /* JKLMNOPQ */\
1732
        "movq %%mm1, %%mm4                \n\t" /* JKLMNOPQ */\
1733
        "movq %%mm1, %%mm3                \n\t" /* JKLMNOPQ */\
1734
        "psrlq $8, %%mm1                \n\t" /* KLMNOPQ0 */\
1735
        "psrlq $16, %%mm4                \n\t" /* LMNOPQ00 */\
1736
        "punpcklbw %%mm7, %%mm1                \n\t" /* 0K0L0M0N */\
1737
        "punpcklbw %%mm7, %%mm4                \n\t" /* 0L0M0N0O */\
1738
        "paddw %%mm1, %%mm5                \n\t" /* b */\
1739
        "paddw %%mm4, %%mm0                \n\t" /* c */\
1740
        "paddw %%mm5, %%mm5                \n\t" /* 2b */\
1741
        "psubw %%mm5, %%mm0                \n\t" /* c - 2b */\
1742
        "movq %%mm3, %%mm5                \n\t" /* JKLMNOPQ */\
1743
        "psrlq $24, %%mm3                \n\t" /* MNOPQ000 */\
1744
        "pmullw "MANGLE(ff_pw_3)", %%mm0                \n\t" /* 3c - 6b */\
1745
        "punpcklbw %%mm7, %%mm3                \n\t" /* 0M0N0O0P */\
1746
        "paddw %%mm3, %%mm2                \n\t" /* d */\
1747
        "psubw %%mm2, %%mm0                \n\t" /* -6b + 3c - d */\
1748
        "movq %%mm5, %%mm2                \n\t" /* JKLMNOPQ */\
1749
        "punpcklbw %%mm7, %%mm2                \n\t" /* 0J0K0L0M */\
1750
        "punpckhbw %%mm7, %%mm5                \n\t" /* 0N0O0P0Q */\
1751
        "paddw %%mm2, %%mm6                \n\t" /* a */\
1752
        "pmullw "MANGLE(ff_pw_20)", %%mm6                \n\t" /* 20a */\
1753
        "paddw %6, %%mm0                \n\t"\
1754
        "paddw %%mm6, %%mm0                \n\t" /* 20a - 6b + 3c - d */\
1755
        "psraw $5, %%mm0                \n\t"\
1756
        /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1757
        \
1758
        "paddw %%mm5, %%mm3                \n\t" /* a */\
1759
        "pshufw $0xF9, %%mm5, %%mm6        \n\t" /* 0O0P0Q0Q */\
1760
        "paddw %%mm4, %%mm6                \n\t" /* b */\
1761
        "pshufw $0xBE, %%mm5, %%mm4        \n\t" /* 0P0Q0Q0P */\
1762
        "pshufw $0x6F, %%mm5, %%mm5        \n\t" /* 0Q0Q0P0O */\
1763
        "paddw %%mm1, %%mm4                \n\t" /* c */\
1764
        "paddw %%mm2, %%mm5                \n\t" /* d */\
1765
        "paddw %%mm6, %%mm6                \n\t" /* 2b */\
1766
        "psubw %%mm6, %%mm4                \n\t" /* c - 2b */\
1767
        "pmullw "MANGLE(ff_pw_20)", %%mm3                \n\t" /* 20a */\
1768
        "pmullw "MANGLE(ff_pw_3)", %%mm4                \n\t" /* 3c - 6b */\
1769
        "psubw %%mm5, %%mm3                \n\t" /* -6b + 3c - d */\
1770
        "paddw %6, %%mm4                \n\t"\
1771
        "paddw %%mm3, %%mm4                \n\t" /* 20a - 6b + 3c - d */\
1772
        "psraw $5, %%mm4                \n\t"\
1773
        "packuswb %%mm4, %%mm0                \n\t"\
1774
        OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1775
        \
1776
        "add %3, %0                        \n\t"\
1777
        "add %4, %1                        \n\t"\
1778
        "decl %2                        \n\t"\
1779
        " jnz 1b                                \n\t"\
1780
        : "+a"(src), "+c"(dst), "+m"(h)\
1781
        : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1782
        : "memory"\
1783
    );\
1784
}\
1785
\
1786
static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1787
    int i;\
1788
    int16_t temp[16];\
1789
    /* quick HACK, XXX FIXME MUST be optimized */\
1790
    for(i=0; i<h; i++)\
1791
    {\
1792
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1793
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1794
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1795
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1796
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1797
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1798
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1799
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1800
        temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1801
        temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1802
        temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1803
        temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1804
        temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1805
        temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1806
        temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1807
        temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1808
        asm volatile(\
1809
            "movq (%0), %%mm0                \n\t"\
1810
            "movq 8(%0), %%mm1                \n\t"\
1811
            "paddw %2, %%mm0                \n\t"\
1812
            "paddw %2, %%mm1                \n\t"\
1813
            "psraw $5, %%mm0                \n\t"\
1814
            "psraw $5, %%mm1                \n\t"\
1815
            "packuswb %%mm1, %%mm0        \n\t"\
1816
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1817
            "movq 16(%0), %%mm0                \n\t"\
1818
            "movq 24(%0), %%mm1                \n\t"\
1819
            "paddw %2, %%mm0                \n\t"\
1820
            "paddw %2, %%mm1                \n\t"\
1821
            "psraw $5, %%mm0                \n\t"\
1822
            "psraw $5, %%mm1                \n\t"\
1823
            "packuswb %%mm1, %%mm0        \n\t"\
1824
            OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1825
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1826
            : "memory"\
1827
        );\
1828
        dst+=dstStride;\
1829
        src+=srcStride;\
1830
    }\
1831
}\
1832
\
1833
static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1834
    uint64_t temp;\
1835
\
1836
    asm volatile(\
1837
        "pxor %%mm7, %%mm7                \n\t"\
1838
        "1:                                \n\t"\
1839
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1840
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1841
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1842
        "punpcklbw %%mm7, %%mm0                \n\t" /* 0A0B0C0D */\
1843
        "punpckhbw %%mm7, %%mm1                \n\t" /* 0E0F0G0H */\
1844
        "pshufw $0x90, %%mm0, %%mm5        \n\t" /* 0A0A0B0C */\
1845
        "pshufw $0x41, %%mm0, %%mm6        \n\t" /* 0B0A0A0B */\
1846
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1847
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1848
        "psllq $8, %%mm2                \n\t" /* 0ABCDEFG */\
1849
        "psllq $16, %%mm3                \n\t" /* 00ABCDEF */\
1850
        "psllq $24, %%mm4                \n\t" /* 000ABCDE */\
1851
        "punpckhbw %%mm7, %%mm2                \n\t" /* 0D0E0F0G */\
1852
        "punpckhbw %%mm7, %%mm3                \n\t" /* 0C0D0E0F */\
1853
        "punpckhbw %%mm7, %%mm4                \n\t" /* 0B0C0D0E */\
1854
        "paddw %%mm3, %%mm5                \n\t" /* b */\
1855
        "paddw %%mm2, %%mm6                \n\t" /* c */\
1856
        "paddw %%mm5, %%mm5                \n\t" /* 2b */\
1857
        "psubw %%mm5, %%mm6                \n\t" /* c - 2b */\
1858
        "pshufw $0x06, %%mm0, %%mm5        \n\t" /* 0C0B0A0A */\
1859
        "pmullw "MANGLE(ff_pw_3)", %%mm6                \n\t" /* 3c - 6b */\
1860
        "paddw %%mm4, %%mm0                \n\t" /* a */\
1861
        "paddw %%mm1, %%mm5                \n\t" /* d */\
1862
        "pmullw "MANGLE(ff_pw_20)", %%mm0                \n\t" /* 20a */\
1863
        "psubw %%mm5, %%mm0                \n\t" /* 20a - d */\
1864
        "paddw %6, %%mm6                \n\t"\
1865
        "paddw %%mm6, %%mm0                \n\t" /* 20a - 6b + 3c - d */\
1866
        "psraw $5, %%mm0                \n\t"\
1867
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1868
        \
1869
        "movd 5(%0), %%mm5                \n\t" /* FGHI */\
1870
        "punpcklbw %%mm7, %%mm5                \n\t" /* 0F0G0H0I */\
1871
        "pshufw $0xF9, %%mm5, %%mm6        \n\t" /* 0G0H0I0I */\
1872
        "paddw %%mm5, %%mm1                \n\t" /* a */\
1873
        "paddw %%mm6, %%mm2                \n\t" /* b */\
1874
        "pshufw $0xBE, %%mm5, %%mm6        \n\t" /* 0H0I0I0H */\
1875
        "pshufw $0x6F, %%mm5, %%mm5        \n\t" /* 0I0I0H0G */\
1876
        "paddw %%mm6, %%mm3                \n\t" /* c */\
1877
        "paddw %%mm5, %%mm4                \n\t" /* d */\
1878
        "paddw %%mm2, %%mm2                \n\t" /* 2b */\
1879
        "psubw %%mm2, %%mm3                \n\t" /* c - 2b */\
1880
        "pmullw "MANGLE(ff_pw_20)", %%mm1                \n\t" /* 20a */\
1881
        "pmullw "MANGLE(ff_pw_3)", %%mm3                \n\t" /* 3c - 6b */\
1882
        "psubw %%mm4, %%mm3                \n\t" /* -6b + 3c - d */\
1883
        "paddw %6, %%mm1                \n\t"\
1884
        "paddw %%mm1, %%mm3                \n\t" /* 20a - 6b + 3c - d */\
1885
        "psraw $5, %%mm3                \n\t"\
1886
        "packuswb %%mm3, %%mm0                \n\t"\
1887
        OP_MMX2(%%mm0, (%1), %%mm4, q)\
1888
        \
1889
        "add %3, %0                        \n\t"\
1890
        "add %4, %1                        \n\t"\
1891
        "decl %2                        \n\t"\
1892
        " jnz 1b                        \n\t"\
1893
        : "+a"(src), "+c"(dst), "+m"(h)\
1894
        : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1895
        : "memory"\
1896
    );\
1897
}\
1898
\
1899
static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1900
    int i;\
1901
    int16_t temp[8];\
1902
    /* quick HACK, XXX FIXME MUST be optimized */\
1903
    for(i=0; i<h; i++)\
1904
    {\
1905
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1906
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1907
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1908
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1909
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1910
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1911
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1912
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1913
        asm volatile(\
1914
            "movq (%0), %%mm0                \n\t"\
1915
            "movq 8(%0), %%mm1                \n\t"\
1916
            "paddw %2, %%mm0                \n\t"\
1917
            "paddw %2, %%mm1                \n\t"\
1918
            "psraw $5, %%mm0                \n\t"\
1919
            "psraw $5, %%mm1                \n\t"\
1920
            "packuswb %%mm1, %%mm0        \n\t"\
1921
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1922
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1923
            :"memory"\
1924
        );\
1925
        dst+=dstStride;\
1926
        src+=srcStride;\
1927
    }\
1928
}
1929

    
1930
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1931
\
1932
static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1933
    uint64_t temp[17*4];\
1934
    uint64_t *temp_ptr= temp;\
1935
    int count= 17;\
1936
\
1937
    /*FIXME unroll */\
1938
    asm volatile(\
1939
        "pxor %%mm7, %%mm7                \n\t"\
1940
        "1:                                \n\t"\
1941
        "movq (%0), %%mm0                \n\t"\
1942
        "movq (%0), %%mm1                \n\t"\
1943
        "movq 8(%0), %%mm2                \n\t"\
1944
        "movq 8(%0), %%mm3                \n\t"\
1945
        "punpcklbw %%mm7, %%mm0                \n\t"\
1946
        "punpckhbw %%mm7, %%mm1                \n\t"\
1947
        "punpcklbw %%mm7, %%mm2                \n\t"\
1948
        "punpckhbw %%mm7, %%mm3                \n\t"\
1949
        "movq %%mm0, (%1)                \n\t"\
1950
        "movq %%mm1, 17*8(%1)                \n\t"\
1951
        "movq %%mm2, 2*17*8(%1)                \n\t"\
1952
        "movq %%mm3, 3*17*8(%1)                \n\t"\
1953
        "add $8, %1                        \n\t"\
1954
        "add %3, %0                        \n\t"\
1955
        "decl %2                        \n\t"\
1956
        " jnz 1b                        \n\t"\
1957
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1958
        : "r" ((long)srcStride)\
1959
        : "memory"\
1960
    );\
1961
    \
1962
    temp_ptr= temp;\
1963
    count=4;\
1964
    \
1965
/*FIXME reorder for speed */\
1966
    asm volatile(\
1967
        /*"pxor %%mm7, %%mm7                \n\t"*/\
1968
        "1:                                \n\t"\
1969
        "movq (%0), %%mm0                \n\t"\
1970
        "movq 8(%0), %%mm1                \n\t"\
1971
        "movq 16(%0), %%mm2                \n\t"\
1972
        "movq 24(%0), %%mm3                \n\t"\
1973
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1974
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1975
        "add %4, %1                        \n\t"\
1976
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1977
        \
1978
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1979
        "add %4, %1                        \n\t"\
1980
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1981
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1982
        "add %4, %1                        \n\t"\
1983
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1984
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1985
        "add %4, %1                        \n\t"\
1986
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1987
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1988
        "add %4, %1                        \n\t"\
1989
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1990
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1991
        "add %4, %1                        \n\t"\
1992
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1993
        \
1994
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1995
        "add %4, %1                        \n\t"  \
1996
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1997
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1998
        \
1999
        "add $136, %0                        \n\t"\
2000
        "add %6, %1                        \n\t"\
2001
        "decl %2                        \n\t"\
2002
        " jnz 1b                        \n\t"\
2003
        \
2004
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
2005
        : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
2006
        :"memory"\
2007
    );\
2008
}\
2009
\
2010
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2011
    uint64_t temp[9*2];\
2012
    uint64_t *temp_ptr= temp;\
2013
    int count= 9;\
2014
\
2015
    /*FIXME unroll */\
2016
    asm volatile(\
2017
        "pxor %%mm7, %%mm7                \n\t"\
2018
        "1:                                \n\t"\
2019
        "movq (%0), %%mm0                \n\t"\
2020
        "movq (%0), %%mm1                \n\t"\
2021
        "punpcklbw %%mm7, %%mm0                \n\t"\
2022
        "punpckhbw %%mm7, %%mm1                \n\t"\
2023
        "movq %%mm0, (%1)                \n\t"\
2024
        "movq %%mm1, 9*8(%1)                \n\t"\
2025
        "add $8, %1                        \n\t"\
2026
        "add %3, %0                        \n\t"\
2027
        "decl %2                        \n\t"\
2028
        " jnz 1b                        \n\t"\
2029
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
2030
        : "r" ((long)srcStride)\
2031
        : "memory"\
2032
    );\
2033
    \
2034
    temp_ptr= temp;\
2035
    count=2;\
2036
    \
2037
/*FIXME reorder for speed */\
2038
    asm volatile(\
2039
        /*"pxor %%mm7, %%mm7                \n\t"*/\
2040
        "1:                                \n\t"\
2041
        "movq (%0), %%mm0                \n\t"\
2042
        "movq 8(%0), %%mm1                \n\t"\
2043
        "movq 16(%0), %%mm2                \n\t"\
2044
        "movq 24(%0), %%mm3                \n\t"\
2045
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
2046
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
2047
        "add %4, %1                        \n\t"\
2048
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
2049
        \
2050
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
2051
        "add %4, %1                        \n\t"\
2052
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
2053
        \
2054
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
2055
        "add %4, %1                        \n\t"\
2056
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
2057
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
2058
                \
2059
        "add $72, %0                        \n\t"\
2060
        "add %6, %1                        \n\t"\
2061
        "decl %2                        \n\t"\
2062
        " jnz 1b                        \n\t"\
2063
         \
2064
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
2065
        : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
2066
        : "memory"\
2067
   );\
2068
}\
2069
\
2070
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2071
    OPNAME ## pixels8_mmx(dst, src, stride, 8);\
2072
}\
2073
\
2074
static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2075
    uint64_t temp[8];\
2076
    uint8_t * const half= (uint8_t*)temp;\
2077
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
2078
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
2079
}\
2080
\
2081
static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2082
    OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
2083
}\
2084
\
2085
static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2086
    uint64_t temp[8];\
2087
    uint8_t * const half= (uint8_t*)temp;\
2088
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
2089
    OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
2090
}\
2091
\
2092
static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2093
    uint64_t temp[8];\
2094
    uint8_t * const half= (uint8_t*)temp;\
2095
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
2096
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
2097
}\
2098
\
2099
static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2100
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
2101
}\
2102
\
2103
static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2104
    uint64_t temp[8];\
2105
    uint8_t * const half= (uint8_t*)temp;\
2106
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
2107
    OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
2108
}\
2109
static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2110
    uint64_t half[8 + 9];\
2111
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2112
    uint8_t * const halfHV= ((uint8_t*)half);\
2113
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2114
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2115
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2116
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2117
}\
2118
static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2119
    uint64_t half[8 + 9];\
2120
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2121
    uint8_t * const halfHV= ((uint8_t*)half);\
2122
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2123
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2124
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2125
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2126
}\
2127
static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2128
    uint64_t half[8 + 9];\
2129
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2130
    uint8_t * const halfHV= ((uint8_t*)half);\
2131
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2132
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2133
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2134
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2135
}\
2136
static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2137
    uint64_t half[8 + 9];\
2138
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2139
    uint8_t * const halfHV= ((uint8_t*)half);\
2140
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2141
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2142
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2143
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2144
}\
2145
static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2146
    uint64_t half[8 + 9];\
2147
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2148
    uint8_t * const halfHV= ((uint8_t*)half);\
2149
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2150
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2151
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2152
}\
2153
static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2154
    uint64_t half[8 + 9];\
2155
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
2156
    uint8_t * const halfHV= ((uint8_t*)half);\
2157
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2158
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2159
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2160
}\
2161
static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2162
    uint64_t half[8 + 9];\
2163
    uint8_t * const halfH= ((uint8_t*)half);\
2164
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2165
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2166
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2167
}\
2168
static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2169
    uint64_t half[8 + 9];\
2170
    uint8_t * const halfH= ((uint8_t*)half);\
2171
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2172
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2173
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2174
}\
2175
static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2176
    uint64_t half[9];\
2177
    uint8_t * const halfH= ((uint8_t*)half);\
2178
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2179
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2180
}\
2181
static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2182
    OPNAME ## pixels16_mmx(dst, src, stride, 16);\
2183
}\
2184
\
2185
static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2186
    uint64_t temp[32];\
2187
    uint8_t * const half= (uint8_t*)temp;\
2188
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
2189
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
2190
}\
2191
\
2192
static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2193
    OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
2194
}\
2195
\
2196
static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2197
    uint64_t temp[32];\
2198
    uint8_t * const half= (uint8_t*)temp;\
2199
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
2200
    OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
2201
}\
2202
\
2203
static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2204
    uint64_t temp[32];\
2205
    uint8_t * const half= (uint8_t*)temp;\
2206
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
2207
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
2208
}\
2209
\
2210
static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2211
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
2212
}\
2213
\
2214
static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2215
    uint64_t temp[32];\
2216
    uint8_t * const half= (uint8_t*)temp;\
2217
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
2218
    OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
2219
}\
2220
static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2221
    uint64_t half[16*2 + 17*2];\
2222
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2223
    uint8_t * const halfHV= ((uint8_t*)half);\
2224
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2225
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2226
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2227
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2228
}\
2229
static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2230
    uint64_t half[16*2 + 17*2];\
2231
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2232
    uint8_t * const halfHV= ((uint8_t*)half);\
2233
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2234
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2235
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2236
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2237
}\
2238
static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2239
    uint64_t half[16*2 + 17*2];\
2240
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2241
    uint8_t * const halfHV= ((uint8_t*)half);\
2242
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2243
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2244
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2245
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2246
}\
2247
static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2248
    uint64_t half[16*2 + 17*2];\
2249
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2250
    uint8_t * const halfHV= ((uint8_t*)half);\
2251
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2252
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2253
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2254
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2255
}\
2256
static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2257
    uint64_t half[16*2 + 17*2];\
2258
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2259
    uint8_t * const halfHV= ((uint8_t*)half);\
2260
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2261
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2262
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2263
}\
2264
static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2265
    uint64_t half[16*2 + 17*2];\
2266
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
2267
    uint8_t * const halfHV= ((uint8_t*)half);\
2268
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2269
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2270
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2271
}\
2272
static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2273
    uint64_t half[17*2];\
2274
    uint8_t * const halfH= ((uint8_t*)half);\
2275
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2276
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2277
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2278
}\
2279
static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2280
    uint64_t half[17*2];\
2281
    uint8_t * const halfH= ((uint8_t*)half);\
2282
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2283
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2284
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2285
}\
2286
static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2287
    uint64_t half[17*2];\
2288
    uint8_t * const halfH= ((uint8_t*)half);\
2289
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2290
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2291
}
2292

    
2293
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "        \n\t"
2294
#define AVG_3DNOW_OP(a,b,temp, size) \
2295
"mov" #size " " #b ", " #temp "        \n\t"\
2296
"pavgusb " #temp ", " #a "        \n\t"\
2297
"mov" #size " " #a ", " #b "        \n\t"
2298
#define AVG_MMX2_OP(a,b,temp, size) \
2299
"mov" #size " " #b ", " #temp "        \n\t"\
2300
"pavgb " #temp ", " #a "        \n\t"\
2301
"mov" #size " " #a ", " #b "        \n\t"
2302

    
2303
QPEL_BASE(put_       , ff_pw_16, _       , PUT_OP, PUT_OP)
2304
QPEL_BASE(avg_       , ff_pw_16, _       , AVG_MMX2_OP, AVG_3DNOW_OP)
2305
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
2306
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, 3dnow)
2307
QPEL_OP(avg_       , ff_pw_16, _       , AVG_3DNOW_OP, 3dnow)
2308
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
2309
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, mmx2)
2310
QPEL_OP(avg_       , ff_pw_16, _       , AVG_MMX2_OP, mmx2)
2311
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
2312

    
2313
#if 0
2314
static void just_return() { return; }
2315
#endif
2316

    
2317
#define SET_QPEL_FUNC(postfix1, postfix2) \
2318
    c->put_ ## postfix1 = put_ ## postfix2;\
2319
    c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
2320
    c->avg_ ## postfix1 = avg_ ## postfix2;
2321

    
2322
static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
2323
    long i=0;
2324
    
2325
    assert(ABS(scale) < 256);
2326
    scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
2327

    
2328
    asm volatile(
2329
        "pcmpeqw %%mm6, %%mm6                \n\t" // -1w
2330
        "psrlw $15, %%mm6                \n\t" //  1w
2331
        "pxor %%mm7, %%mm7                \n\t"
2332
        "movd  %4, %%mm5                \n\t" 
2333
        "punpcklwd %%mm5, %%mm5                \n\t" 
2334
        "punpcklwd %%mm5, %%mm5                \n\t" 
2335
        "1:                                \n\t"
2336
        "movq  (%1, %0), %%mm0                \n\t" 
2337
        "movq  8(%1, %0), %%mm1                \n\t"
2338
        "pmulhw %%mm5, %%mm0                \n\t"
2339
        "pmulhw %%mm5, %%mm1                \n\t"
2340
        "paddw %%mm6, %%mm0                \n\t"
2341
        "paddw %%mm6, %%mm1                \n\t"
2342
        "psraw $1, %%mm0                \n\t"
2343
        "psraw $1, %%mm1                \n\t"
2344
        "paddw (%2, %0), %%mm0                \n\t"
2345
        "paddw 8(%2, %0), %%mm1                \n\t"
2346
        "psraw $6, %%mm0                \n\t"
2347
        "psraw $6, %%mm1                \n\t"
2348
        "pmullw (%3, %0), %%mm0                \n\t"
2349
        "pmullw 8(%3, %0), %%mm1        \n\t"
2350
        "pmaddwd %%mm0, %%mm0                \n\t"
2351
        "pmaddwd %%mm1, %%mm1                \n\t"
2352
        "paddd %%mm1, %%mm0                \n\t"
2353
        "psrld $4, %%mm0                \n\t"
2354
        "paddd %%mm0, %%mm7                \n\t"
2355
        "add $16, %0                        \n\t"
2356
        "cmp $128, %0                        \n\t" //FIXME optimize & bench
2357
        " jb 1b                                \n\t"
2358
        "movq %%mm7, %%mm6                \n\t"
2359
        "psrlq $32, %%mm7                \n\t"
2360
        "paddd %%mm6, %%mm7                \n\t"
2361
        "psrld $2, %%mm7                \n\t"
2362
        "movd %%mm7, %0                        \n\t"
2363
        
2364
        : "+r" (i)
2365
        : "r"(basis), "r"(rem), "r"(weight), "g"(scale)
2366
    );
2367
    return i;
2368
}
2369

    
2370
static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
2371
    long i=0;
2372
    
2373
    if(ABS(scale) < 256){
2374
        scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
2375
        asm volatile(
2376
                "pcmpeqw %%mm6, %%mm6                \n\t" // -1w
2377
                "psrlw $15, %%mm6                \n\t" //  1w
2378
                "movd  %3, %%mm5                \n\t" 
2379
                "punpcklwd %%mm5, %%mm5                \n\t" 
2380
                "punpcklwd %%mm5, %%mm5                \n\t" 
2381
                "1:                                \n\t"
2382
                "movq  (%1, %0), %%mm0                \n\t" 
2383
                "movq  8(%1, %0), %%mm1                \n\t"
2384
                "pmulhw %%mm5, %%mm0                \n\t"
2385
                "pmulhw %%mm5, %%mm1                \n\t"
2386
                "paddw %%mm6, %%mm0                \n\t" 
2387
                "paddw %%mm6, %%mm1                \n\t"
2388
                "psraw $1, %%mm0                \n\t"
2389
                "psraw $1, %%mm1                \n\t"
2390
                "paddw (%2, %0), %%mm0                \n\t"
2391
                "paddw 8(%2, %0), %%mm1                \n\t"
2392
                "movq %%mm0, (%2, %0)                \n\t"
2393
                "movq %%mm1, 8(%2, %0)                \n\t"
2394
                "add $16, %0                        \n\t"
2395
                "cmp $128, %0                        \n\t" //FIXME optimize & bench
2396
                " jb 1b                                \n\t"
2397
                
2398
                : "+r" (i)
2399
                : "r"(basis), "r"(rem), "g"(scale)
2400
        );
2401
    }else{
2402
        for(i=0; i<8*8; i++){
2403
            rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
2404
        }    
2405
    }
2406
}
2407

    
2408
#include "h264dsp_mmx.c"
2409
    
2410
/* external functions, from idct_mmx.c */
2411
void ff_mmx_idct(DCTELEM *block);
2412
void ff_mmxext_idct(DCTELEM *block);
2413

    
2414
void ff_vp3_idct_sse2(int16_t *input_data);
2415
void ff_vp3_idct_mmx(int16_t *data);
2416
void ff_vp3_dsp_init_mmx(void);
2417

    
2418
/* XXX: those functions should be suppressed ASAP when all IDCTs are
2419
   converted */
2420
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
2421
{
2422
    ff_mmx_idct (block);
2423
    put_pixels_clamped_mmx(block, dest, line_size);
2424
}
2425
static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
2426
{
2427
    ff_mmx_idct (block);
2428
    add_pixels_clamped_mmx(block, dest, line_size);
2429
}
2430
static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
2431
{
2432
    ff_mmxext_idct (block);
2433
    put_pixels_clamped_mmx(block, dest, line_size);
2434
}
2435
static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
2436
{
2437
    ff_mmxext_idct (block);
2438
    add_pixels_clamped_mmx(block, dest, line_size);
2439
}
2440
static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
2441
{
2442
    ff_vp3_idct_sse2(block);
2443
    put_signed_pixels_clamped_mmx(block, dest, line_size);
2444
}
2445
static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
2446
{
2447
    ff_vp3_idct_sse2(block);
2448
    add_pixels_clamped_mmx(block, dest, line_size);
2449
}
2450
static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
2451
{
2452
    ff_vp3_idct_mmx(block);
2453
    put_signed_pixels_clamped_mmx(block, dest, line_size);
2454
}
2455
static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
2456
{
2457
    ff_vp3_idct_mmx(block);
2458
    add_pixels_clamped_mmx(block, dest, line_size);
2459
}
2460
    
2461
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2462
{
2463
    mm_flags = mm_support();
2464

    
2465
    if (avctx->dsp_mask) {
2466
        if (avctx->dsp_mask & FF_MM_FORCE)
2467
            mm_flags |= (avctx->dsp_mask & 0xffff);
2468
        else
2469
            mm_flags &= ~(avctx->dsp_mask & 0xffff);
2470
    }
2471

    
2472
#if 0
2473
    av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2474
    if (mm_flags & MM_MMX)
2475
        av_log(avctx, AV_LOG_INFO, " mmx");
2476
    if (mm_flags & MM_MMXEXT)
2477
        av_log(avctx, AV_LOG_INFO, " mmxext");
2478
    if (mm_flags & MM_3DNOW)
2479
        av_log(avctx, AV_LOG_INFO, " 3dnow");
2480
    if (mm_flags & MM_SSE)
2481
        av_log(avctx, AV_LOG_INFO, " sse");
2482
    if (mm_flags & MM_SSE2)
2483
        av_log(avctx, AV_LOG_INFO, " sse2");
2484
    av_log(avctx, AV_LOG_INFO, "\n");
2485
#endif
2486

    
2487
    if (mm_flags & MM_MMX) {
2488
        const int idct_algo= avctx->idct_algo;
2489

    
2490
#ifdef CONFIG_ENCODERS
2491
        const int dct_algo = avctx->dct_algo;
2492
        if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
2493
            if(mm_flags & MM_SSE2){
2494
                c->fdct = ff_fdct_sse2;
2495
            }else if(mm_flags & MM_MMXEXT){
2496
                c->fdct = ff_fdct_mmx2;
2497
            }else{
2498
                c->fdct = ff_fdct_mmx;
2499
            }
2500
        }
2501
#endif //CONFIG_ENCODERS
2502
        if(avctx->lowres==0){
2503
            if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2504
                c->idct_put= ff_simple_idct_put_mmx;
2505
                c->idct_add= ff_simple_idct_add_mmx;
2506
                c->idct    = ff_simple_idct_mmx;
2507
                c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2508
            }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2509
                if(mm_flags & MM_MMXEXT){
2510
                    c->idct_put= ff_libmpeg2mmx2_idct_put;
2511
                    c->idct_add= ff_libmpeg2mmx2_idct_add;
2512
                    c->idct    = ff_mmxext_idct;
2513
                }else{
2514
                    c->idct_put= ff_libmpeg2mmx_idct_put;
2515
                    c->idct_add= ff_libmpeg2mmx_idct_add;
2516
                    c->idct    = ff_mmx_idct;
2517
                }
2518
                c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2519
            }else if(idct_algo==FF_IDCT_VP3){
2520
                if(mm_flags & MM_SSE2){
2521
                    c->idct_put= ff_vp3_idct_put_sse2;
2522
                    c->idct_add= ff_vp3_idct_add_sse2;
2523
                    c->idct    = ff_vp3_idct_sse2;
2524
                    c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2525
                }else{
2526
                    ff_vp3_dsp_init_mmx();
2527
                    c->idct_put= ff_vp3_idct_put_mmx;
2528
                    c->idct_add= ff_vp3_idct_add_mmx;
2529
                    c->idct    = ff_vp3_idct_mmx;
2530
                    c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2531
                }
2532
            }
2533
        }
2534

    
2535
#ifdef CONFIG_ENCODERS
2536
        c->get_pixels = get_pixels_mmx;
2537
        c->diff_pixels = diff_pixels_mmx;
2538
#endif //CONFIG_ENCODERS
2539
        c->put_pixels_clamped = put_pixels_clamped_mmx;
2540
        c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2541
        c->add_pixels_clamped = add_pixels_clamped_mmx;
2542
        c->clear_blocks = clear_blocks_mmx;
2543
#ifdef CONFIG_ENCODERS
2544
        c->pix_sum = pix_sum16_mmx;
2545
#endif //CONFIG_ENCODERS
2546

    
2547
        c->put_pixels_tab[0][0] = put_pixels16_mmx;
2548
        c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
2549
        c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
2550
        c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
2551

    
2552
        c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
2553
        c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
2554
        c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
2555
        c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
2556

    
2557
        c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
2558
        c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
2559
        c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
2560
        c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
2561

    
2562
        c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
2563
        c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
2564
        c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
2565
        c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
2566

    
2567
        c->put_pixels_tab[1][0] = put_pixels8_mmx;
2568
        c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
2569
        c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
2570
        c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
2571

    
2572
        c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
2573
        c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
2574
        c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
2575
        c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
2576

    
2577
        c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
2578
        c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
2579
        c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
2580
        c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
2581

    
2582
        c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
2583
        c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
2584
        c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
2585
        c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
2586
                
2587
        c->add_bytes= add_bytes_mmx;
2588
#ifdef CONFIG_ENCODERS
2589
        c->diff_bytes= diff_bytes_mmx;
2590
        
2591
        c->hadamard8_diff[0]= hadamard8_diff16_mmx;
2592
        c->hadamard8_diff[1]= hadamard8_diff_mmx;
2593
        
2594
        c->pix_norm1 = pix_norm1_mmx;
2595
        c->sse[0] = sse16_mmx;
2596
          c->sse[1] = sse8_mmx;
2597
        c->vsad[4]= vsad_intra16_mmx;
2598

    
2599
        c->nsse[0] = nsse16_mmx;
2600
        c->nsse[1] = nsse8_mmx;
2601
        if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2602
            c->vsad[0] = vsad16_mmx;
2603
        }
2604
        
2605
        if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2606
            c->try_8x8basis= try_8x8basis_mmx;
2607
        }
2608
        c->add_8x8basis= add_8x8basis_mmx;
2609
        
2610
#endif //CONFIG_ENCODERS
2611

    
2612
        c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2613
        c->h263_h_loop_filter= h263_h_loop_filter_mmx;        
2614
        c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
2615
        
2616
        if (mm_flags & MM_MMXEXT) {
2617
            c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2618
            c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2619

    
2620
            c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2621
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2622
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2623

    
2624
            c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2625
            c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2626

    
2627
            c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2628
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2629
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2630

    
2631
#ifdef CONFIG_ENCODERS
2632
            c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
2633
            c->hadamard8_diff[1]= hadamard8_diff_mmx2;
2634
            c->vsad[4]= vsad_intra16_mmx2;
2635
#endif //CONFIG_ENCODERS
2636

    
2637
            c->h264_idct_add= ff_h264_idct_add_mmx2;
2638

    
2639
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2640
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2641
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2642
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2643
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2644
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2645
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2646
#ifdef CONFIG_ENCODERS
2647
                c->vsad[0] = vsad16_mmx2;
2648
#endif //CONFIG_ENCODERS
2649
            }
2650

    
2651
#if 1
2652
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
2653
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
2654
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
2655
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
2656
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
2657
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
2658
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
2659
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
2660
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
2661
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
2662
            SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
2663
            SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
2664
            SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
2665
            SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
2666
            SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
2667
            SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
2668
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
2669
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
2670
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
2671
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
2672
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
2673
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
2674
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
2675
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
2676
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
2677
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
2678
            SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
2679
            SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
2680
            SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
2681
            SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
2682
            SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
2683
            SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
2684
#endif
2685

    
2686
//FIXME 3dnow too
2687
#define dspfunc(PFX, IDX, NUM) \
2688
    c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \
2689
    c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \
2690
    c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \
2691
    c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \
2692
    c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \
2693
    c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \
2694
    c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \
2695
    c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \
2696
    c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \
2697
    c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \
2698
    c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \
2699
    c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \
2700
    c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \
2701
    c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \
2702
    c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \
2703
    c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2
2704

    
2705
            dspfunc(put_h264_qpel, 0, 16);
2706
            dspfunc(put_h264_qpel, 1, 8);
2707
            dspfunc(put_h264_qpel, 2, 4);
2708
            dspfunc(avg_h264_qpel, 0, 16);
2709
            dspfunc(avg_h264_qpel, 1, 8);
2710
            dspfunc(avg_h264_qpel, 2, 4);
2711
#undef dspfunc
2712

    
2713
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
2714
            c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
2715
            c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
2716
            c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
2717
            c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
2718
            c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
2719
            c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
2720

    
2721
#ifdef CONFIG_ENCODERS
2722
            c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
2723
#endif //CONFIG_ENCODERS
2724
        } else if (mm_flags & MM_3DNOW) {
2725
            c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2726
            c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2727

    
2728
            c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2729
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2730
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2731

    
2732
            c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2733
            c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2734

    
2735
            c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2736
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2737
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2738

    
2739
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2740
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2741
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2742
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2743
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2744
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2745
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2746
            }
2747

    
2748
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
2749
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
2750
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
2751
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
2752
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
2753
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
2754
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
2755
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
2756
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
2757
            SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
2758
            SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
2759
            SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
2760
            SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
2761
            SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
2762
            SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
2763
            SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
2764
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
2765
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
2766
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
2767
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
2768
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
2769
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
2770
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
2771
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
2772
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
2773
            SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
2774
            SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
2775
            SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
2776
            SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
2777
            SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
2778
            SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
2779
            SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
2780

    
2781
#define dspfunc(PFX, IDX, NUM) \
2782
    c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \
2783
    c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \
2784
    c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \
2785
    c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \
2786
    c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \
2787
    c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \
2788
    c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \
2789
    c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \
2790
    c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \
2791
    c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \
2792
    c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \
2793
    c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \
2794
    c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \
2795
    c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \
2796
    c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \
2797
    c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow
2798

    
2799
            dspfunc(put_h264_qpel, 0, 16);
2800
            dspfunc(put_h264_qpel, 1, 8);
2801
            dspfunc(put_h264_qpel, 2, 4);
2802
            dspfunc(avg_h264_qpel, 0, 16);
2803
            dspfunc(avg_h264_qpel, 1, 8);
2804
            dspfunc(avg_h264_qpel, 2, 4);
2805

    
2806
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
2807
        }
2808
    }
2809
        
2810
#ifdef CONFIG_ENCODERS
2811
    dsputil_init_pix_mmx(c, avctx);
2812
#endif //CONFIG_ENCODERS
2813
#if 0
2814
    // for speed testing
2815
    get_pixels = just_return;
2816
    put_pixels_clamped = just_return;
2817
    add_pixels_clamped = just_return;
2818

2819
    pix_abs16x16 = just_return;
2820
    pix_abs16x16_x2 = just_return;
2821
    pix_abs16x16_y2 = just_return;
2822
    pix_abs16x16_xy2 = just_return;
2823

2824
    put_pixels_tab[0] = just_return;
2825
    put_pixels_tab[1] = just_return;
2826
    put_pixels_tab[2] = just_return;
2827
    put_pixels_tab[3] = just_return;
2828

2829
    put_no_rnd_pixels_tab[0] = just_return;
2830
    put_no_rnd_pixels_tab[1] = just_return;
2831
    put_no_rnd_pixels_tab[2] = just_return;
2832
    put_no_rnd_pixels_tab[3] = just_return;
2833

2834
    avg_pixels_tab[0] = just_return;
2835
    avg_pixels_tab[1] = just_return;
2836
    avg_pixels_tab[2] = just_return;
2837
    avg_pixels_tab[3] = just_return;
2838

2839
    avg_no_rnd_pixels_tab[0] = just_return;
2840
    avg_no_rnd_pixels_tab[1] = just_return;
2841
    avg_no_rnd_pixels_tab[2] = just_return;
2842
    avg_no_rnd_pixels_tab[3] = just_return;
2843

2844
    //av_fdct = just_return;
2845
    //ff_idct = just_return;
2846
#endif
2847
}