Statistics
| Branch: | Revision:

ffmpeg / libavcodec / i386 / motion_est_mmx.c @ 164d75eb

History | View | Annotate | Download (14.5 KB)

1
/*
2
 * MMX optimized motion estimation
3
 * Copyright (c) 2001 Fabrice Bellard.
4
 * Copyright (c) 2002-2004 Michael Niedermayer
5
 *
6
 * This file is part of FFmpeg.
7
 *
8
 * FFmpeg is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 *
22
 * mostly by Michael Niedermayer <michaelni@gmx.at>
23
 */
24
#include "../dsputil.h"
25
#include "x86_cpu.h"
26

    
27
static const __attribute__ ((aligned(8))) uint64_t round_tab[3]={
28
0x0000000000000000ULL,
29
0x0001000100010001ULL,
30
0x0002000200020002ULL,
31
};
32

    
33
static attribute_used __attribute__ ((aligned(8))) uint64_t bone= 0x0101010101010101LL;
34

    
35
static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
36
{
37
    long len= -(stride*h);
38
    asm volatile(
39
        ASMALIGN(4)
40
        "1:                             \n\t"
41
        "movq (%1, %%"REG_a"), %%mm0    \n\t"
42
        "movq (%2, %%"REG_a"), %%mm2    \n\t"
43
        "movq (%2, %%"REG_a"), %%mm4    \n\t"
44
        "add %3, %%"REG_a"              \n\t"
45
        "psubusb %%mm0, %%mm2           \n\t"
46
        "psubusb %%mm4, %%mm0           \n\t"
47
        "movq (%1, %%"REG_a"), %%mm1    \n\t"
48
        "movq (%2, %%"REG_a"), %%mm3    \n\t"
49
        "movq (%2, %%"REG_a"), %%mm5    \n\t"
50
        "psubusb %%mm1, %%mm3           \n\t"
51
        "psubusb %%mm5, %%mm1           \n\t"
52
        "por %%mm2, %%mm0               \n\t"
53
        "por %%mm1, %%mm3               \n\t"
54
        "movq %%mm0, %%mm1              \n\t"
55
        "movq %%mm3, %%mm2              \n\t"
56
        "punpcklbw %%mm7, %%mm0         \n\t"
57
        "punpckhbw %%mm7, %%mm1         \n\t"
58
        "punpcklbw %%mm7, %%mm3         \n\t"
59
        "punpckhbw %%mm7, %%mm2         \n\t"
60
        "paddw %%mm1, %%mm0             \n\t"
61
        "paddw %%mm3, %%mm2             \n\t"
62
        "paddw %%mm2, %%mm0             \n\t"
63
        "paddw %%mm0, %%mm6             \n\t"
64
        "add %3, %%"REG_a"              \n\t"
65
        " js 1b                         \n\t"
66
        : "+a" (len)
67
        : "r" (blk1 - len), "r" (blk2 - len), "r" ((long)stride)
68
    );
69
}
70

    
71
static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
72
{
73
    asm volatile(
74
        ASMALIGN(4)
75
        "1:                             \n\t"
76
        "movq (%1), %%mm0               \n\t"
77
        "movq (%1, %3), %%mm1           \n\t"
78
        "psadbw (%2), %%mm0             \n\t"
79
        "psadbw (%2, %3), %%mm1         \n\t"
80
        "paddw %%mm0, %%mm6             \n\t"
81
        "paddw %%mm1, %%mm6             \n\t"
82
        "lea (%1,%3,2), %1              \n\t"
83
        "lea (%2,%3,2), %2              \n\t"
84
        "sub $2, %0                     \n\t"
85
        " jg 1b                         \n\t"
86
        : "+r" (h), "+r" (blk1), "+r" (blk2)
87
        : "r" ((long)stride)
88
    );
89
}
90

    
91
static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
92
{
93
    asm volatile(
94
        ASMALIGN(4)
95
        "1:                             \n\t"
96
        "movq (%1), %%mm0               \n\t"
97
        "movq (%1, %3), %%mm1           \n\t"
98
        "pavgb 1(%1), %%mm0             \n\t"
99
        "pavgb 1(%1, %3), %%mm1         \n\t"
100
        "psadbw (%2), %%mm0             \n\t"
101
        "psadbw (%2, %3), %%mm1         \n\t"
102
        "paddw %%mm0, %%mm6             \n\t"
103
        "paddw %%mm1, %%mm6             \n\t"
104
        "lea (%1,%3,2), %1              \n\t"
105
        "lea (%2,%3,2), %2              \n\t"
106
        "sub $2, %0                     \n\t"
107
        " jg 1b                         \n\t"
108
        : "+r" (h), "+r" (blk1), "+r" (blk2)
109
        : "r" ((long)stride)
110
    );
111
}
112

    
113
static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
114
{
115
    asm volatile(
116
        "movq (%1), %%mm0               \n\t"
117
        "add %3, %1                     \n\t"
118
        ASMALIGN(4)
119
        "1:                             \n\t"
120
        "movq (%1), %%mm1               \n\t"
121
        "movq (%1, %3), %%mm2           \n\t"
122
        "pavgb %%mm1, %%mm0             \n\t"
123
        "pavgb %%mm2, %%mm1             \n\t"
124
        "psadbw (%2), %%mm0             \n\t"
125
        "psadbw (%2, %3), %%mm1         \n\t"
126
        "paddw %%mm0, %%mm6             \n\t"
127
        "paddw %%mm1, %%mm6             \n\t"
128
        "movq %%mm2, %%mm0              \n\t"
129
        "lea (%1,%3,2), %1              \n\t"
130
        "lea (%2,%3,2), %2              \n\t"
131
        "sub $2, %0                     \n\t"
132
        " jg 1b                         \n\t"
133
        : "+r" (h), "+r" (blk1), "+r" (blk2)
134
        : "r" ((long)stride)
135
    );
136
}
137

    
138
static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
139
{
140
    asm volatile(
141
        "movq "MANGLE(bone)", %%mm5     \n\t"
142
        "movq (%1), %%mm0               \n\t"
143
        "pavgb 1(%1), %%mm0             \n\t"
144
        "add %3, %1                     \n\t"
145
        ASMALIGN(4)
146
        "1:                             \n\t"
147
        "movq (%1), %%mm1               \n\t"
148
        "movq (%1,%3), %%mm2            \n\t"
149
        "pavgb 1(%1), %%mm1             \n\t"
150
        "pavgb 1(%1,%3), %%mm2          \n\t"
151
        "psubusb %%mm5, %%mm1           \n\t"
152
        "pavgb %%mm1, %%mm0             \n\t"
153
        "pavgb %%mm2, %%mm1             \n\t"
154
        "psadbw (%2), %%mm0             \n\t"
155
        "psadbw (%2,%3), %%mm1          \n\t"
156
        "paddw %%mm0, %%mm6             \n\t"
157
        "paddw %%mm1, %%mm6             \n\t"
158
        "movq %%mm2, %%mm0              \n\t"
159
        "lea (%1,%3,2), %1              \n\t"
160
        "lea (%2,%3,2), %2              \n\t"
161
        "sub $2, %0                     \n\t"
162
        " jg 1b                         \n\t"
163
        : "+r" (h), "+r" (blk1), "+r" (blk2)
164
        : "r" ((long)stride)
165
    );
166
}
167

    
168
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
169
{
170
    long len= -(stride*h);
171
    asm volatile(
172
        ASMALIGN(4)
173
        "1:                             \n\t"
174
        "movq (%1, %%"REG_a"), %%mm0    \n\t"
175
        "movq (%2, %%"REG_a"), %%mm1    \n\t"
176
        "movq (%1, %%"REG_a"), %%mm2    \n\t"
177
        "movq (%2, %%"REG_a"), %%mm3    \n\t"
178
        "punpcklbw %%mm7, %%mm0         \n\t"
179
        "punpcklbw %%mm7, %%mm1         \n\t"
180
        "punpckhbw %%mm7, %%mm2         \n\t"
181
        "punpckhbw %%mm7, %%mm3         \n\t"
182
        "paddw %%mm0, %%mm1             \n\t"
183
        "paddw %%mm2, %%mm3             \n\t"
184
        "movq (%3, %%"REG_a"), %%mm4    \n\t"
185
        "movq (%3, %%"REG_a"), %%mm2    \n\t"
186
        "paddw %%mm5, %%mm1             \n\t"
187
        "paddw %%mm5, %%mm3             \n\t"
188
        "psrlw $1, %%mm1                \n\t"
189
        "psrlw $1, %%mm3                \n\t"
190
        "packuswb %%mm3, %%mm1          \n\t"
191
        "psubusb %%mm1, %%mm4           \n\t"
192
        "psubusb %%mm2, %%mm1           \n\t"
193
        "por %%mm4, %%mm1               \n\t"
194
        "movq %%mm1, %%mm0              \n\t"
195
        "punpcklbw %%mm7, %%mm0         \n\t"
196
        "punpckhbw %%mm7, %%mm1         \n\t"
197
        "paddw %%mm1, %%mm0             \n\t"
198
        "paddw %%mm0, %%mm6             \n\t"
199
        "add %4, %%"REG_a"              \n\t"
200
        " js 1b                         \n\t"
201
        : "+a" (len)
202
        : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((long)stride)
203
    );
204
}
205

    
206
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
207
{
208
    long len= -(stride*h);
209
    asm volatile(
210
        "movq (%1, %%"REG_a"), %%mm0    \n\t"
211
        "movq 1(%1, %%"REG_a"), %%mm2   \n\t"
212
        "movq %%mm0, %%mm1              \n\t"
213
        "movq %%mm2, %%mm3              \n\t"
214
        "punpcklbw %%mm7, %%mm0         \n\t"
215
        "punpckhbw %%mm7, %%mm1         \n\t"
216
        "punpcklbw %%mm7, %%mm2         \n\t"
217
        "punpckhbw %%mm7, %%mm3         \n\t"
218
        "paddw %%mm2, %%mm0             \n\t"
219
        "paddw %%mm3, %%mm1             \n\t"
220
        ASMALIGN(4)
221
        "1:                             \n\t"
222
        "movq (%2, %%"REG_a"), %%mm2    \n\t"
223
        "movq 1(%2, %%"REG_a"), %%mm4   \n\t"
224
        "movq %%mm2, %%mm3              \n\t"
225
        "movq %%mm4, %%mm5              \n\t"
226
        "punpcklbw %%mm7, %%mm2         \n\t"
227
        "punpckhbw %%mm7, %%mm3         \n\t"
228
        "punpcklbw %%mm7, %%mm4         \n\t"
229
        "punpckhbw %%mm7, %%mm5         \n\t"
230
        "paddw %%mm4, %%mm2             \n\t"
231
        "paddw %%mm5, %%mm3             \n\t"
232
        "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
233
        "paddw %%mm2, %%mm0             \n\t"
234
        "paddw %%mm3, %%mm1             \n\t"
235
        "paddw %%mm5, %%mm0             \n\t"
236
        "paddw %%mm5, %%mm1             \n\t"
237
        "movq (%3, %%"REG_a"), %%mm4    \n\t"
238
        "movq (%3, %%"REG_a"), %%mm5    \n\t"
239
        "psrlw $2, %%mm0                \n\t"
240
        "psrlw $2, %%mm1                \n\t"
241
        "packuswb %%mm1, %%mm0          \n\t"
242
        "psubusb %%mm0, %%mm4           \n\t"
243
        "psubusb %%mm5, %%mm0           \n\t"
244
        "por %%mm4, %%mm0               \n\t"
245
        "movq %%mm0, %%mm4              \n\t"
246
        "punpcklbw %%mm7, %%mm0         \n\t"
247
        "punpckhbw %%mm7, %%mm4         \n\t"
248
        "paddw %%mm0, %%mm6             \n\t"
249
        "paddw %%mm4, %%mm6             \n\t"
250
        "movq  %%mm2, %%mm0             \n\t"
251
        "movq  %%mm3, %%mm1             \n\t"
252
        "add %4, %%"REG_a"              \n\t"
253
        " js 1b                         \n\t"
254
        : "+a" (len)
255
        : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((long)stride)
256
    );
257
}
258

    
259
static inline int sum_mmx(void)
260
{
261
    int ret;
262
    asm volatile(
263
        "movq %%mm6, %%mm0              \n\t"
264
        "psrlq $32, %%mm6               \n\t"
265
        "paddw %%mm0, %%mm6             \n\t"
266
        "movq %%mm6, %%mm0              \n\t"
267
        "psrlq $16, %%mm6               \n\t"
268
        "paddw %%mm0, %%mm6             \n\t"
269
        "movd %%mm6, %0                 \n\t"
270
        : "=r" (ret)
271
    );
272
    return ret&0xFFFF;
273
}
274

    
275
static inline int sum_mmx2(void)
276
{
277
    int ret;
278
    asm volatile(
279
        "movd %%mm6, %0                 \n\t"
280
        : "=r" (ret)
281
    );
282
    return ret;
283
}
284

    
285
static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
286
{
287
    sad8_2_mmx(blk1, blk1+1, blk2, stride, h);
288
}
289
static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
290
{
291
    sad8_2_mmx(blk1, blk1+stride, blk2, stride, h);
292
}
293

    
294

    
295
#define PIX_SAD(suf)\
296
static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
297
{\
298
    assert(h==8);\
299
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
300
                 "pxor %%mm6, %%mm6     \n\t":);\
301
\
302
    sad8_1_ ## suf(blk1, blk2, stride, 8);\
303
\
304
    return sum_ ## suf();\
305
}\
306
static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
307
{\
308
    assert(h==8);\
309
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
310
                 "pxor %%mm6, %%mm6     \n\t"\
311
                 :: "m"(round_tab[1]) \
312
                 );\
313
\
314
    sad8_x2a_ ## suf(blk1, blk2, stride, 8);\
315
\
316
    return sum_ ## suf();\
317
}\
318
\
319
static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
320
{\
321
    assert(h==8);\
322
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
323
                 "pxor %%mm6, %%mm6     \n\t"\
324
                 "movq %0, %%mm5        \n\t"\
325
                 :: "m"(round_tab[1]) \
326
                 );\
327
\
328
    sad8_y2a_ ## suf(blk1, blk2, stride, 8);\
329
\
330
    return sum_ ## suf();\
331
}\
332
\
333
static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
334
{\
335
    assert(h==8);\
336
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
337
                 "pxor %%mm6, %%mm6     \n\t"\
338
                 ::);\
339
\
340
    sad8_4_ ## suf(blk1, blk2, stride, 8);\
341
\
342
    return sum_ ## suf();\
343
}\
344
\
345
static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
346
{\
347
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
348
                 "pxor %%mm6, %%mm6     \n\t":);\
349
\
350
    sad8_1_ ## suf(blk1  , blk2  , stride, h);\
351
    sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
352
\
353
    return sum_ ## suf();\
354
}\
355
static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
356
{\
357
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
358
                 "pxor %%mm6, %%mm6     \n\t"\
359
                 "movq %0, %%mm5        \n\t"\
360
                 :: "m"(round_tab[1]) \
361
                 );\
362
\
363
    sad8_x2a_ ## suf(blk1  , blk2  , stride, h);\
364
    sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\
365
\
366
    return sum_ ## suf();\
367
}\
368
static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
369
{\
370
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
371
                 "pxor %%mm6, %%mm6     \n\t"\
372
                 "movq %0, %%mm5        \n\t"\
373
                 :: "m"(round_tab[1]) \
374
                 );\
375
\
376
    sad8_y2a_ ## suf(blk1  , blk2  , stride, h);\
377
    sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\
378
\
379
    return sum_ ## suf();\
380
}\
381
static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
382
{\
383
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
384
                 "pxor %%mm6, %%mm6     \n\t"\
385
                 ::);\
386
\
387
    sad8_4_ ## suf(blk1  , blk2  , stride, h);\
388
    sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
389
\
390
    return sum_ ## suf();\
391
}\
392

    
393
PIX_SAD(mmx)
394
PIX_SAD(mmx2)
395

    
396
void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
397
{
398
    if (mm_flags & MM_MMX) {
399
        c->pix_abs[0][0] = sad16_mmx;
400
        c->pix_abs[0][1] = sad16_x2_mmx;
401
        c->pix_abs[0][2] = sad16_y2_mmx;
402
        c->pix_abs[0][3] = sad16_xy2_mmx;
403
        c->pix_abs[1][0] = sad8_mmx;
404
        c->pix_abs[1][1] = sad8_x2_mmx;
405
        c->pix_abs[1][2] = sad8_y2_mmx;
406
        c->pix_abs[1][3] = sad8_xy2_mmx;
407

    
408
        c->sad[0]= sad16_mmx;
409
        c->sad[1]= sad8_mmx;
410
    }
411
    if (mm_flags & MM_MMXEXT) {
412
        c->pix_abs[0][0] = sad16_mmx2;
413
        c->pix_abs[1][0] = sad8_mmx2;
414

    
415
        c->sad[0]= sad16_mmx2;
416
        c->sad[1]= sad8_mmx2;
417

    
418
        if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
419
            c->pix_abs[0][1] = sad16_x2_mmx2;
420
            c->pix_abs[0][2] = sad16_y2_mmx2;
421
            c->pix_abs[0][3] = sad16_xy2_mmx2;
422
            c->pix_abs[1][1] = sad8_x2_mmx2;
423
            c->pix_abs[1][2] = sad8_y2_mmx2;
424
            c->pix_abs[1][3] = sad8_xy2_mmx2;
425
        }
426
    }
427
}