Revision be449fca libavcodec/i386/mmx.h

View differences:

libavcodec/i386/mmx.h
43 43

  
44 44

  
45 45
#define         mmx_i2r(op,imm,reg) \
46
        asm volatile (#op " %0, %%" #reg \
46
        __asm__ volatile (#op " %0, %%" #reg \
47 47
                              : /* nothing */ \
48 48
                              : "i" (imm) )
49 49

  
50 50
#define         mmx_m2r(op,mem,reg) \
51
        asm volatile (#op " %0, %%" #reg \
51
        __asm__ volatile (#op " %0, %%" #reg \
52 52
                              : /* nothing */ \
53 53
                              : "m" (mem))
54 54

  
55 55
#define         mmx_r2m(op,reg,mem) \
56
        asm volatile (#op " %%" #reg ", %0" \
56
        __asm__ volatile (#op " %%" #reg ", %0" \
57 57
                              : "=m" (mem) \
58 58
                              : /* nothing */ )
59 59

  
60 60
#define         mmx_r2r(op,regs,regd) \
61
        asm volatile (#op " %" #regs ", %" #regd)
61
        __asm__ volatile (#op " %" #regs ", %" #regd)
62 62

  
63 63

  
64
#define         emms() asm volatile ("emms")
64
#define         emms() __asm__ volatile ("emms")
65 65

  
66 66
#define         movd_m2r(var,reg)           mmx_m2r (movd, var, reg)
67 67
#define         movd_r2m(reg,var)           mmx_r2m (movd, reg, var)
......
200 200

  
201 201

  
202 202
#define         mmx_m2ri(op,mem,reg,imm) \
203
        asm volatile (#op " %1, %0, %%" #reg \
203
        __asm__ volatile (#op " %1, %0, %%" #reg \
204 204
                              : /* nothing */ \
205 205
                              : "m" (mem), "i" (imm))
206 206
#define         mmx_r2ri(op,regs,regd,imm) \
207
        asm volatile (#op " %0, %%" #regs ", %%" #regd \
207
        __asm__ volatile (#op " %0, %%" #regs ", %%" #regd \
208 208
                              : /* nothing */ \
209 209
                              : "i" (imm) )
210 210

  
211 211
#define         mmx_fetch(mem,hint) \
212
        asm volatile ("prefetch" #hint " %0" \
212
        __asm__ volatile ("prefetch" #hint " %0" \
213 213
                              : /* nothing */ \
214 214
                              : "m" (mem))
215 215

  
......
240 240
#define         pminub_r2r(regs,regd)       mmx_r2r (pminub, regs, regd)
241 241

  
242 242
#define         pmovmskb(mmreg,reg) \
243
        asm volatile ("movmskps %" #mmreg ", %" #reg)
243
        __asm__ volatile ("movmskps %" #mmreg ", %" #reg)
244 244

  
245 245
#define         pmulhuw_m2r(var,reg)        mmx_m2r (pmulhuw, var, reg)
246 246
#define         pmulhuw_r2r(regs,regd)      mmx_r2r (pmulhuw, regs, regd)
......
256 256
#define         pshufw_m2r(var,reg,imm)     mmx_m2ri(pshufw, var, reg, imm)
257 257
#define         pshufw_r2r(regs,regd,imm)   mmx_r2ri(pshufw, regs, regd, imm)
258 258

  
259
#define         sfence() asm volatile ("sfence\n\t")
259
#define         sfence() __asm__ volatile ("sfence\n\t")
260 260

  
261 261
/* SSE2 */
262 262
#define         pshufhw_m2r(var,reg,imm)    mmx_m2ri(pshufhw, var, reg, imm)

Also available in: Unified diff