Revision be449fca libavcodec/i386/motion_est_mmx.c

View differences:

libavcodec/i386/motion_est_mmx.c
36 36
static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
37 37
{
38 38
    x86_reg len= -(stride*h);
39
    asm volatile(
39
    __asm__ volatile(
40 40
        ASMALIGN(4)
41 41
        "1:                             \n\t"
42 42
        "movq (%1, %%"REG_a"), %%mm0    \n\t"
......
71 71

  
72 72
static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
73 73
{
74
    asm volatile(
74
    __asm__ volatile(
75 75
        ASMALIGN(4)
76 76
        "1:                             \n\t"
77 77
        "movq (%1), %%mm0               \n\t"
......
92 92
static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
93 93
{
94 94
    int ret;
95
    asm volatile(
95
    __asm__ volatile(
96 96
        "pxor %%xmm6, %%xmm6            \n\t"
97 97
        ASMALIGN(4)
98 98
        "1:                             \n\t"
......
109 109
        : "+r" (h), "+r" (blk1), "+r" (blk2)
110 110
        : "r" ((x86_reg)stride)
111 111
    );
112
    asm volatile(
112
    __asm__ volatile(
113 113
        "movhlps %%xmm6, %%xmm0         \n\t"
114 114
        "paddw   %%xmm0, %%xmm6         \n\t"
115 115
        "movd    %%xmm6, %0             \n\t"
......
120 120

  
121 121
static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
122 122
{
123
    asm volatile(
123
    __asm__ volatile(
124 124
        ASMALIGN(4)
125 125
        "1:                             \n\t"
126 126
        "movq (%1), %%mm0               \n\t"
......
142 142

  
143 143
static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
144 144
{
145
    asm volatile(
145
    __asm__ volatile(
146 146
        "movq (%1), %%mm0               \n\t"
147 147
        "add %3, %1                     \n\t"
148 148
        ASMALIGN(4)
......
167 167

  
168 168
static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
169 169
{
170
    asm volatile(
170
    __asm__ volatile(
171 171
        "movq "MANGLE(bone)", %%mm5     \n\t"
172 172
        "movq (%1), %%mm0               \n\t"
173 173
        "pavgb 1(%1), %%mm0             \n\t"
......
198 198
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
199 199
{
200 200
    x86_reg len= -(stride*h);
201
    asm volatile(
201
    __asm__ volatile(
202 202
        ASMALIGN(4)
203 203
        "1:                             \n\t"
204 204
        "movq (%1, %%"REG_a"), %%mm0    \n\t"
......
236 236
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
237 237
{
238 238
    x86_reg len= -(stride*h);
239
    asm volatile(
239
    __asm__ volatile(
240 240
        "movq (%1, %%"REG_a"), %%mm0    \n\t"
241 241
        "movq 1(%1, %%"REG_a"), %%mm2   \n\t"
242 242
        "movq %%mm0, %%mm1              \n\t"
......
289 289
static inline int sum_mmx(void)
290 290
{
291 291
    int ret;
292
    asm volatile(
292
    __asm__ volatile(
293 293
        "movq %%mm6, %%mm0              \n\t"
294 294
        "psrlq $32, %%mm6               \n\t"
295 295
        "paddw %%mm0, %%mm6             \n\t"
......
305 305
static inline int sum_mmx2(void)
306 306
{
307 307
    int ret;
308
    asm volatile(
308
    __asm__ volatile(
309 309
        "movd %%mm6, %0                 \n\t"
310 310
        : "=r" (ret)
311 311
    );
......
326 326
static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
327 327
{\
328 328
    assert(h==8);\
329
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
329
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
330 330
                 "pxor %%mm6, %%mm6     \n\t":);\
331 331
\
332 332
    sad8_1_ ## suf(blk1, blk2, stride, 8);\
......
336 336
static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
337 337
{\
338 338
    assert(h==8);\
339
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
339
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
340 340
                 "pxor %%mm6, %%mm6     \n\t"\
341 341
                 "movq %0, %%mm5        \n\t"\
342 342
                 :: "m"(round_tab[1]) \
......
350 350
static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
351 351
{\
352 352
    assert(h==8);\
353
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
353
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
354 354
                 "pxor %%mm6, %%mm6     \n\t"\
355 355
                 "movq %0, %%mm5        \n\t"\
356 356
                 :: "m"(round_tab[1]) \
......
364 364
static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
365 365
{\
366 366
    assert(h==8);\
367
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
367
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
368 368
                 "pxor %%mm6, %%mm6     \n\t"\
369 369
                 ::);\
370 370
\
......
375 375
\
376 376
static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
377 377
{\
378
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
378
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
379 379
                 "pxor %%mm6, %%mm6     \n\t":);\
380 380
\
381 381
    sad8_1_ ## suf(blk1  , blk2  , stride, h);\
......
385 385
}\
386 386
static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
387 387
{\
388
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
388
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
389 389
                 "pxor %%mm6, %%mm6     \n\t"\
390 390
                 "movq %0, %%mm5        \n\t"\
391 391
                 :: "m"(round_tab[1]) \
......
398 398
}\
399 399
static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
400 400
{\
401
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
401
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
402 402
                 "pxor %%mm6, %%mm6     \n\t"\
403 403
                 "movq %0, %%mm5        \n\t"\
404 404
                 :: "m"(round_tab[1]) \
......
411 411
}\
412 412
static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
413 413
{\
414
    asm volatile("pxor %%mm7, %%mm7     \n\t"\
414
    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
415 415
                 "pxor %%mm6, %%mm6     \n\t"\
416 416
                 ::);\
417 417
\

Also available in: Unified diff