Revision c0673f2c

View differences:

libavcodec/x86/h264dsp_mmx.c
66 66
static av_always_inline void h264_loop_filter_strength_iteration_mmx2(int16_t bS[2][4][4], uint8_t nnz[40],
67 67
                                                                      int8_t ref[2][40],   int16_t mv[2][40][2],
68 68
                                                                      int bidir,   int edges, int step,
69
                                                                      int mask_mv, int dir)
69
                                                                      int mask_mv, int dir, const int d_idx)
70 70
{
71
        const x86_reg d_idx = dir ? -8 : -1;
72 71
        DECLARE_ALIGNED(8, const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
73 72
        int b_idx, edge;
74 73
        for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
......
79 78
            if(!(mask_mv & edge)) {
80 79
                if(bidir) {
81 80
                    __asm__ volatile(
82
                        "movd         (%1,%0), %%mm2 \n"
83
                        "punpckldq  40(%1,%0), %%mm2 \n" // { ref0[bn], ref1[bn] }
84
                        "pshufw $0x44,   (%1), %%mm0 \n" // { ref0[b], ref0[b] }
85
                        "pshufw $0x44, 40(%1), %%mm1 \n" // { ref1[b], ref1[b] }
81
                        "movd         %a2(%0), %%mm2 \n"
82
                        "punpckldq    %a3(%0), %%mm2 \n" // { ref0[bn], ref1[bn] }
83
                        "pshufw $0x44,   (%0), %%mm0 \n" // { ref0[b], ref0[b] }
84
                        "pshufw $0x44, 40(%0), %%mm1 \n" // { ref1[b], ref1[b] }
86 85
                        "pshufw $0x4E, %%mm2, %%mm3 \n"
87 86
                        "psubb         %%mm2, %%mm0 \n" // { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] }
88 87
                        "psubb         %%mm3, %%mm1 \n" // { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] }
89 88

  
90 89
                        "por           %%mm1, %%mm0 \n"
91
                        "movq      (%2,%0,4), %%mm1 \n"
92
                        "movq     8(%2,%0,4), %%mm2 \n"
90
                        "movq        %a4(%1), %%mm1 \n"
91
                        "movq        %a5(%1), %%mm2 \n"
93 92
                        "movq          %%mm1, %%mm3 \n"
94 93
                        "movq          %%mm2, %%mm4 \n"
95
                        "psubw          (%2), %%mm1 \n"
96
                        "psubw         8(%2), %%mm2 \n"
97
                        "psubw       160(%2), %%mm3 \n"
98
                        "psubw       168(%2), %%mm4 \n"
94
                        "psubw          (%1), %%mm1 \n"
95
                        "psubw         8(%1), %%mm2 \n"
96
                        "psubw       160(%1), %%mm3 \n"
97
                        "psubw       168(%1), %%mm4 \n"
99 98
                        "packsswb      %%mm2, %%mm1 \n"
100 99
                        "packsswb      %%mm4, %%mm3 \n"
101 100
                        "paddb         %%mm6, %%mm1 \n"
......
105 104
                        "packsswb      %%mm3, %%mm1 \n"
106 105

  
107 106
                        "por           %%mm1, %%mm0 \n"
108
                        "movq   160(%2,%0,4), %%mm1 \n"
109
                        "movq   168(%2,%0,4), %%mm2 \n"
107
                        "movq        %a6(%1), %%mm1 \n"
108
                        "movq        %a7(%1), %%mm2 \n"
110 109
                        "movq          %%mm1, %%mm3 \n"
111 110
                        "movq          %%mm2, %%mm4 \n"
112
                        "psubw          (%2), %%mm1 \n"
113
                        "psubw         8(%2), %%mm2 \n"
114
                        "psubw       160(%2), %%mm3 \n"
115
                        "psubw       168(%2), %%mm4 \n"
111
                        "psubw          (%1), %%mm1 \n"
112
                        "psubw         8(%1), %%mm2 \n"
113
                        "psubw       160(%1), %%mm3 \n"
114
                        "psubw       168(%1), %%mm4 \n"
116 115
                        "packsswb      %%mm2, %%mm1 \n"
117 116
                        "packsswb      %%mm4, %%mm3 \n"
118 117
                        "paddb         %%mm6, %%mm1 \n"
......
125 124
                        "por           %%mm1, %%mm0 \n"
126 125
                        "pshufw $0x4E, %%mm0, %%mm1 \n"
127 126
                        "pminub        %%mm1, %%mm0 \n"
128
                        ::"r"(d_idx),
129
                          "r"(ref[0]+b_idx),
130
                          "r"(mv[0]+b_idx)
127
                        ::"r"(ref[0]+b_idx),
128
                          "r"(mv[0]+b_idx),
129
                          "i"(d_idx),
130
                          "i"(d_idx+40),
131
                          "i"(d_idx*4),
132
                          "i"(d_idx*4+8),
133
                          "i"(d_idx*4+160),
134
                          "i"(d_idx*4+168)
131 135
                    );
132 136
                } else {
133 137
                    __asm__ volatile(
134 138
                        "movd        (%1), %%mm0 \n"
135
                        "psubb    (%1,%0), %%mm0 \n" // ref[b] != ref[bn]
139
                        "psubb    %a0(%1), %%mm0 \n" // ref[b] != ref[bn]
136 140
                        "movq        (%2), %%mm1 \n"
137 141
                        "movq       8(%2), %%mm2 \n"
138
                        "psubw  (%2,%0,4), %%mm1 \n"
139
                        "psubw 8(%2,%0,4), %%mm2 \n"
142
                        "psubw    %a3(%2), %%mm1 \n"
143
                        "psubw    %a4(%2), %%mm2 \n"
140 144
                        "packsswb   %%mm2, %%mm1 \n"
141 145
                        "paddb      %%mm6, %%mm1 \n"
142 146
                        "psubusb    %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit
143 147
                        "packsswb   %%mm1, %%mm1 \n"
144 148
                        "por        %%mm1, %%mm0 \n"
145
                        ::"r"(d_idx),
149
                        ::"i"(d_idx),
146 150
                          "r"(ref[0]+b_idx),
147
                          "r"(mv[0]+b_idx)
151
                          "r"(mv[0]+b_idx),
152
                          "i"(d_idx*4),
153
                          "i"(d_idx*4+8)
148 154
                    );
149 155
                }
150 156
            }
151 157
            __asm__ volatile(
152
                "movd %0, %%mm1 \n"
153
                "por  %1, %%mm1 \n" // nnz[b] || nnz[bn]
154
                ::"m"(nnz[b_idx]),
155
                  "m"(nnz[b_idx+d_idx])
158
                "movd   (%0), %%mm1 \n"
159
                "por %a1(%0), %%mm1 \n" // nnz[b] || nnz[bn]
160
                ::"r"(nnz+b_idx),
161
                  "i"(d_idx)
156 162
            );
157 163
            __asm__ volatile(
158 164
                "pminub    %%mm7, %%mm1 \n"
......
187 193

  
188 194
    // could do a special case for dir==0 && edges==1, but it only reduces the
189 195
    // average filter time by 1.2%
190
    h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1);
191
    h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir,     4,    1, mask_mv0, 0);
196
    h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8);
197
    h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir,     4,    1, mask_mv0, 0, -1);
192 198

  
193 199
    __asm__ volatile(
194 200
        "movq   (%0), %%mm0 \n\t"

Also available in: Unified diff