Revision 0cc8a5d0

View differences:

libavcodec/x86/h264dsp_mmx.c
66 66
static av_always_inline void h264_loop_filter_strength_iteration_mmx2(int16_t bS[2][4][4], uint8_t nnz[40],
67 67
                                                                      int8_t ref[2][40],   int16_t mv[2][40][2],
68 68
                                                                      int bidir,   int edges, int step,
69
                                                                      int mask_mv, int dir, const int d_idx)
69
                                                                      int mask_mv, int dir, const int d_idx,
70
                                                                      const uint64_t mask_dir)
70 71
{
71
        DECLARE_ALIGNED(8, const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
72 72
        int b_idx, edge;
73 73
        for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
74
            if (!mask_dir)
74 75
            __asm__ volatile(
75
                "pand %0, %%mm0 \n\t"
76
                ::"m"(mask_dir)
76
                    "pxor %%mm0, %%mm0 \n\t"
77
                    ::
77 78
            );
78 79
            if(!(mask_mv & edge)) {
79 80
                if(bidir) {
......
193 194

  
194 195
    // could do a special case for dir==0 && edges==1, but it only reduces the
195 196
    // average filter time by 1.2%
196
    h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8);
197
    h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir,     4,    1, mask_mv0, 0, -1);
197
    h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8,  0);
198
    h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir,     4,    1, mask_mv0, 0, -1, -1);
198 199

  
199 200
    __asm__ volatile(
200 201
        "movq   (%0), %%mm0 \n\t"

Also available in: Unified diff