Revision fe2ff6d2 libavcodec/x86/dsputil_yasm.asm

View differences:

libavcodec/x86/dsputil_yasm.asm
30 30

  
31 31
section .text align=16
32 32

  
33
%macro PSWAPD_SSE 2
34
    pshufw %1, %2, 0x4e
35
%endmacro
36
%macro PSWAPD_3DN1 2
37
    movq  %1, %2
38
    psrlq %1, 32
39
    punpckldq %1, %2
40
%endmacro
41

  
42
%macro FLOAT_TO_INT16_INTERLEAVE6 1
43
; void float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len)
44
cglobal float_to_int16_interleave6_%1, 2,7,0, dst, src, src1, src2, src3, src4, src5
45
%ifdef ARCH_X86_64
46
    %define lend r10d
47
    mov     lend, r2d
48
%else
49
    %define lend dword r2m
50
%endif
51
    mov src1q, [srcq+1*gprsize]
52
    mov src2q, [srcq+2*gprsize]
53
    mov src3q, [srcq+3*gprsize]
54
    mov src4q, [srcq+4*gprsize]
55
    mov src5q, [srcq+5*gprsize]
56
    mov srcq,  [srcq]
57
    sub src1q, srcq
58
    sub src2q, srcq
59
    sub src3q, srcq
60
    sub src4q, srcq
61
    sub src5q, srcq
62
.loop:
63
    cvtps2pi   mm0, [srcq]
64
    cvtps2pi   mm1, [srcq+src1q]
65
    cvtps2pi   mm2, [srcq+src2q]
66
    cvtps2pi   mm3, [srcq+src3q]
67
    cvtps2pi   mm4, [srcq+src4q]
68
    cvtps2pi   mm5, [srcq+src5q]
69
    packssdw   mm0, mm3
70
    packssdw   mm1, mm4
71
    packssdw   mm2, mm5
72
    pswapd     mm3, mm0
73
    punpcklwd  mm0, mm1
74
    punpckhwd  mm1, mm2
75
    punpcklwd  mm2, mm3
76
    pswapd     mm3, mm0
77
    punpckldq  mm0, mm2
78
    punpckhdq  mm2, mm1
79
    punpckldq  mm1, mm3
80
    movq [dstq   ], mm0
81
    movq [dstq+16], mm2
82
    movq [dstq+ 8], mm1
83
    add srcq, 8
84
    add dstq, 24
85
    sub lend, 2
86
    jg .loop
87
    emms
88
    RET
89
%endmacro ; FLOAT_TO_INT16_INTERLEAVE6
90

  
91
%define pswapd PSWAPD_SSE
92
FLOAT_TO_INT16_INTERLEAVE6 sse
93
%define cvtps2pi pf2id
94
%define pswapd PSWAPD_3DN1
95
FLOAT_TO_INT16_INTERLEAVE6 3dnow
96
%undef pswapd
97
FLOAT_TO_INT16_INTERLEAVE6 3dn2
98
%undef cvtps2pi
99

  
100

  
101

  
102 33
%macro SCALARPRODUCT 1
103 34
; int scalarproduct_int16(int16_t *v1, int16_t *v2, int order, int shift)
104 35
cglobal scalarproduct_int16_%1, 3,3,4, v1, v2, order, shift

Also available in: Unified diff