Revision b1c32fb5

View differences:

libavcodec/x86/h264_weight.asm
40 40
%macro WEIGHT_SETUP 0
41 41
    add        r4, r4
42 42
    inc        r4
43
    movd       m3, r3
44
    movd       m5, r4
45
    movd       m6, r2
43
    movd       m3, r3d
44
    movd       m5, r4d
45
    movd       m6, r2d
46 46
    pslld      m5, m6
47 47
    psrld      m5, 1
48 48
%if mmsize == 16
......
156 156
    add        r6, 1
157 157
    or         r6, 1
158 158
    add        r3, 1
159
    movd       m3, r4
160
    movd       m4, r5
161
    movd       m5, r6
162
    movd       m6, r3
159
    movd       m3, r4d
160
    movd       m4, r5d
161
    movd       m5, r6d
162
    movd       m6, r3d
163 163
    pslld      m5, m6
164 164
    psrld      m5, 1
165 165
%if mmsize == 16
......
291 291
    add        r6, 1
292 292
    or         r6, 1
293 293
    add        r3, 1
294
    movd       m4, r4
295
    movd       m0, r5
296
    movd       m5, r6
297
    movd       m6, r3
294
    movd       m4, r4d
295
    movd       m0, r5d
296
    movd       m5, r6d
297
    movd       m6, r3d
298 298
    pslld      m5, m6
299 299
    psrld      m5, 1
300 300
    punpcklbw  m4, m0
libavcodec/x86/vc1dsp_yasm.asm
36 36
%endmacro
37 37

  
38 38
%macro STORE_4_WORDS_MMX 6
39
    movd   %6, %5
39
    movd  %6d, %5
40 40
%if mmsize==16
41 41
    psrldq %5, 4
42 42
%else
......
45 45
    mov    %1, %6w
46 46
    shr    %6, 16
47 47
    mov    %2, %6w
48
    movd   %6, %5
48
    movd  %6d, %5
49 49
    mov    %3, %6w
50 50
    shr    %6, 16
51 51
    mov    %4, %6w
......
88 88
    pxor    m7, m3  ; d_sign ^= a0_sign
89 89

  
90 90
    pxor    m5, m5
91
    movd    m3, r2
91
    movd    m3, r2d
92 92
%if %1 > 4
93 93
    punpcklbw m3, m3
94 94
%endif
libavcodec/x86/vp3dsp.asm
93 93
%endmacro
94 94

  
95 95
%macro STORE_4_WORDS 1
96
    movd          r2, %1
96
    movd         r2d, %1
97 97
    mov  [r0     -1], r2w
98 98
    psrlq         %1, 32
99 99
    shr           r2, 16
100 100
    mov  [r0+r1  -1], r2w
101
    movd          r2, %1
101
    movd         r2d, %1
102 102
    mov  [r0+r1*2-1], r2w
103 103
    shr           r2, 16
104 104
    mov  [r0+r3  -1], r2w
......
606 606
    movsx         r2, word [r2]
607 607
    add           r2, 15
608 608
    sar           r2, 5
609
    movd          m0, r2
609
    movd          m0, r2d
610 610
    pshufw        m0, m0, 0x0
611 611
    pxor          m1, m1
612 612
    psubw         m1, m0
libavcodec/x86/vp8dsp.asm
1342 1342
    psrldq        m%2, 4
1343 1343
%if %10 == 8
1344 1344
    movd    [%5+%8*2], m%1
1345
    movd           %5, m%3
1345
    movd          %5d, m%3
1346 1346
%endif
1347 1347
    psrldq        m%3, 4
1348 1348
    psrldq        m%4, 4
......
1379 1379
; 4 is a pointer to the destination's 4th line
1380 1380
; 5/6 is -stride and +stride
1381 1381
%macro WRITE_2x4W 6
1382
    movd             %3, %1
1382
    movd            %3d, %1
1383 1383
    punpckhdq        %1, %1
1384 1384
    mov       [%4+%5*4], %3w
1385 1385
    shr              %3, 16
1386 1386
    add              %4, %6
1387 1387
    mov       [%4+%5*4], %3w
1388 1388

  
1389
    movd             %3, %1
1389
    movd            %3d, %1
1390 1390
    add              %4, %5
1391 1391
    mov       [%4+%5*2], %3w
1392 1392
    shr              %3, 16
1393 1393
    mov       [%4+%5  ], %3w
1394 1394

  
1395
    movd             %3, %2
1395
    movd            %3d, %2
1396 1396
    punpckhdq        %2, %2
1397 1397
    mov       [%4     ], %3w
1398 1398
    shr              %3, 16
1399 1399
    mov       [%4+%6  ], %3w
1400 1400

  
1401
    movd             %3, %2
1401
    movd            %3d, %2
1402 1402
    add              %4, %6
1403 1403
    mov       [%4+%6  ], %3w
1404 1404
    shr              %3, 16
......
1407 1407
%endmacro
1408 1408

  
1409 1409
%macro WRITE_8W_SSE2 5
1410
    movd             %2, %1
1410
    movd            %2d, %1
1411 1411
    psrldq           %1, 4
1412 1412
    mov       [%3+%4*4], %2w
1413 1413
    shr              %2, 16
1414 1414
    add              %3, %5
1415 1415
    mov       [%3+%4*4], %2w
1416 1416

  
1417
    movd             %2, %1
1417
    movd            %2d, %1
1418 1418
    psrldq           %1, 4
1419 1419
    add              %3, %4
1420 1420
    mov       [%3+%4*2], %2w
1421 1421
    shr              %2, 16
1422 1422
    mov       [%3+%4  ], %2w
1423 1423

  
1424
    movd             %2, %1
1424
    movd            %2d, %1
1425 1425
    psrldq           %1, 4
1426 1426
    mov       [%3     ], %2w
1427 1427
    shr              %2, 16
1428 1428
    mov       [%3+%5  ], %2w
1429 1429

  
1430
    movd             %2, %1
1430
    movd            %2d, %1
1431 1431
    add              %3, %5
1432 1432
    mov       [%3+%5  ], %2w
1433 1433
    shr              %2, 16
......
1446 1446
%endmacro
1447 1447

  
1448 1448
%macro SPLATB_REG_MMX 2-3
1449
    movd           %1, %2
1449
    movd           %1, %2d
1450 1450
    punpcklbw      %1, %1
1451 1451
    punpcklwd      %1, %1
1452 1452
    punpckldq      %1, %1
1453 1453
%endmacro
1454 1454

  
1455 1455
%macro SPLATB_REG_MMXEXT 2-3
1456
    movd           %1, %2
1456
    movd           %1, %2d
1457 1457
    punpcklbw      %1, %1
1458 1458
    pshufw         %1, %1, 0x0
1459 1459
%endmacro
1460 1460

  
1461 1461
%macro SPLATB_REG_SSE2 2-3
1462
    movd           %1, %2
1462
    movd           %1, %2d
1463 1463
    punpcklbw      %1, %1
1464 1464
    pshuflw        %1, %1, 0x0
1465 1465
    punpcklqdq     %1, %1
1466 1466
%endmacro
1467 1467

  
1468 1468
%macro SPLATB_REG_SSSE3 3
1469
    movd           %1, %2
1469
    movd           %1, %2d
1470 1470
    pshufb         %1, %3
1471 1471
%endmacro
1472 1472

  

Also available in: Unified diff