Statistics
| Branch: | Revision:

ffmpeg / libavcodec / x86 / h264dsp_mmx.c @ 5705b020

History | View | Annotate | Download (20.1 KB)

1
/*
2
 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
3
 *
4
 * This file is part of Libav.
5
 *
6
 * Libav is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * Libav is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with Libav; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20

    
21
#include "libavutil/cpu.h"
22
#include "libavutil/x86_cpu.h"
23
#include "libavcodec/h264dsp.h"
24
#include "dsputil_mmx.h"
25

    
26
DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1  ) = 0x0103010301030103ULL;
27

    
28
/***********************************/
29
/* IDCT */
30

    
31
void ff_h264_idct_add_mmx     (uint8_t *dst, int16_t *block, int stride);
32
void ff_h264_idct8_add_mmx    (uint8_t *dst, int16_t *block, int stride);
33
void ff_h264_idct8_add_sse2   (uint8_t *dst, int16_t *block, int stride);
34
void ff_h264_idct_dc_add_mmx2 (uint8_t *dst, int16_t *block, int stride);
35
void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride);
36

    
37
void ff_h264_idct_add16_mmx      (uint8_t *dst, const int *block_offset,
38
                                  DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
39
void ff_h264_idct8_add4_mmx      (uint8_t *dst, const int *block_offset,
40
                                  DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
41
void ff_h264_idct_add16_mmx2     (uint8_t *dst, const int *block_offset,
42
                                  DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
43
void ff_h264_idct_add16intra_mmx (uint8_t *dst, const int *block_offset,
44
                                  DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
45
void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset,
46
                                  DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
47
void ff_h264_idct8_add4_mmx2     (uint8_t *dst, const int *block_offset,
48
                                  DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
49
void ff_h264_idct8_add4_sse2     (uint8_t *dst, const int *block_offset,
50
                                  DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
51
void ff_h264_idct_add8_mmx       (uint8_t **dest, const int *block_offset,
52
                                  DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
53
void ff_h264_idct_add8_mmx2      (uint8_t **dest, const int *block_offset,
54
                                  DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
55

    
56
void ff_h264_idct_add16_sse2     (uint8_t *dst, const int *block_offset, DCTELEM *block,
57
                                  int stride, const uint8_t nnzc[6*8]);
58
void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block,
59
                                  int stride, const uint8_t nnzc[6*8]);
60
void ff_h264_idct_add8_sse2      (uint8_t **dest, const int *block_offset, DCTELEM *block,
61
                                  int stride, const uint8_t nnzc[6*8]);
62
void ff_h264_luma_dc_dequant_idct_mmx (DCTELEM *output, DCTELEM *input, int qmul);
63
void ff_h264_luma_dc_dequant_idct_sse2(DCTELEM *output, DCTELEM *input, int qmul);
64

    
65
/***********************************/
66
/* deblocking */
67

    
68
#define h264_loop_filter_strength_iteration_mmx2(bS, nz, ref, mv, bidir, edges, step, mask_mv, dir, d_idx, mask_dir) \
69
    do { \
70
        x86_reg b_idx; \
71
        mask_mv <<= 3; \
72
        for( b_idx=0; b_idx<edges; b_idx+=step ) { \
73
            if (!mask_dir) \
74
            __asm__ volatile( \
75
                    "pxor %%mm0, %%mm0 \n\t" \
76
                    :: \
77
            ); \
78
            if(!(mask_mv & b_idx)) { \
79
                if(bidir) { \
80
                    __asm__ volatile( \
81
                        "movd         %a3(%0,%2), %%mm2 \n" \
82
                        "punpckldq    %a4(%0,%2), %%mm2 \n" /* { ref0[bn], ref1[bn] } */ \
83
                        "pshufw $0x44, 12(%0,%2), %%mm0 \n" /* { ref0[b], ref0[b] } */ \
84
                        "pshufw $0x44, 52(%0,%2), %%mm1 \n" /* { ref1[b], ref1[b] } */ \
85
                        "pshufw $0x4E, %%mm2, %%mm3 \n" \
86
                        "psubb         %%mm2, %%mm0 \n" /* { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } */ \
87
                        "psubb         %%mm3, %%mm1 \n" /* { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } */ \
88
 \
89
                        "por           %%mm1, %%mm0 \n" \
90
                        "movq   %a5(%1,%2,4), %%mm1 \n" \
91
                        "movq   %a6(%1,%2,4), %%mm2 \n" \
92
                        "movq          %%mm1, %%mm3 \n" \
93
                        "movq          %%mm2, %%mm4 \n" \
94
                        "psubw   48(%1,%2,4), %%mm1 \n" \
95
                        "psubw   56(%1,%2,4), %%mm2 \n" \
96
                        "psubw  208(%1,%2,4), %%mm3 \n" \
97
                        "psubw  216(%1,%2,4), %%mm4 \n" \
98
                        "packsswb      %%mm2, %%mm1 \n" \
99
                        "packsswb      %%mm4, %%mm3 \n" \
100
                        "paddb         %%mm6, %%mm1 \n" \
101
                        "paddb         %%mm6, %%mm3 \n" \
102
                        "psubusb       %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
103
                        "psubusb       %%mm5, %%mm3 \n" \
104
                        "packsswb      %%mm3, %%mm1 \n" \
105
 \
106
                        "por           %%mm1, %%mm0 \n" \
107
                        "movq   %a7(%1,%2,4), %%mm1 \n" \
108
                        "movq   %a8(%1,%2,4), %%mm2 \n" \
109
                        "movq          %%mm1, %%mm3 \n" \
110
                        "movq          %%mm2, %%mm4 \n" \
111
                        "psubw   48(%1,%2,4), %%mm1 \n" \
112
                        "psubw   56(%1,%2,4), %%mm2 \n" \
113
                        "psubw  208(%1,%2,4), %%mm3 \n" \
114
                        "psubw  216(%1,%2,4), %%mm4 \n" \
115
                        "packsswb      %%mm2, %%mm1 \n" \
116
                        "packsswb      %%mm4, %%mm3 \n" \
117
                        "paddb         %%mm6, %%mm1 \n" \
118
                        "paddb         %%mm6, %%mm3 \n" \
119
                        "psubusb       %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
120
                        "psubusb       %%mm5, %%mm3 \n" \
121
                        "packsswb      %%mm3, %%mm1 \n" \
122
 \
123
                        "pshufw $0x4E, %%mm1, %%mm1 \n" \
124
                        "por           %%mm1, %%mm0 \n" \
125
                        "pshufw $0x4E, %%mm0, %%mm1 \n" \
126
                        "pminub        %%mm1, %%mm0 \n" \
127
                        ::"r"(ref), \
128
                          "r"(mv), \
129
                          "r"(b_idx), \
130
                          "i"(d_idx+12), \
131
                          "i"(d_idx+52), \
132
                          "i"(d_idx*4+48), \
133
                          "i"(d_idx*4+56), \
134
                          "i"(d_idx*4+208), \
135
                          "i"(d_idx*4+216) \
136
                    ); \
137
                } else { \
138
                    __asm__ volatile( \
139
                        "movd   12(%0,%2), %%mm0 \n" \
140
                        "psubb %a3(%0,%2), %%mm0 \n" /* ref[b] != ref[bn] */ \
141
                        "movq   48(%1,%2,4), %%mm1 \n" \
142
                        "movq   56(%1,%2,4), %%mm2 \n" \
143
                        "psubw %a4(%1,%2,4), %%mm1 \n" \
144
                        "psubw %a5(%1,%2,4), %%mm2 \n" \
145
                        "packsswb   %%mm2, %%mm1 \n" \
146
                        "paddb      %%mm6, %%mm1 \n" \
147
                        "psubusb    %%mm5, %%mm1 \n" /* abs(mv[b] - mv[bn]) >= limit */ \
148
                        "packsswb   %%mm1, %%mm1 \n" \
149
                        "por        %%mm1, %%mm0 \n" \
150
                        ::"r"(ref), \
151
                          "r"(mv), \
152
                          "r"(b_idx), \
153
                          "i"(d_idx+12), \
154
                          "i"(d_idx*4+48), \
155
                          "i"(d_idx*4+56) \
156
                    ); \
157
                } \
158
            } \
159
            __asm__ volatile( \
160
                "movd 12(%0,%1), %%mm1 \n" \
161
                "por %a2(%0,%1), %%mm1 \n" /* nnz[b] || nnz[bn] */ \
162
                ::"r"(nnz), \
163
                  "r"(b_idx), \
164
                  "i"(d_idx+12) \
165
            ); \
166
            __asm__ volatile( \
167
                "pminub    %%mm7, %%mm1 \n" \
168
                "pminub    %%mm7, %%mm0 \n" \
169
                "psllw        $1, %%mm1 \n" \
170
                "pxor      %%mm2, %%mm2 \n" \
171
                "pmaxub    %%mm0, %%mm1 \n" \
172
                "punpcklbw %%mm2, %%mm1 \n" \
173
                "movq      %%mm1, %a1(%0,%2) \n" \
174
                ::"r"(bS), \
175
                  "i"(32*dir), \
176
                  "r"(b_idx) \
177
                :"memory" \
178
            ); \
179
        } \
180
    } while (0)
181

    
182
static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
183
                                            int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
184
    __asm__ volatile(
185
        "movq %0, %%mm7 \n"
186
        "movq %1, %%mm6 \n"
187
        ::"m"(ff_pb_1), "m"(ff_pb_3)
188
    );
189
    if(field)
190
        __asm__ volatile(
191
            "movq %0, %%mm6 \n"
192
            ::"m"(ff_pb_3_1)
193
        );
194
    __asm__ volatile(
195
        "movq  %%mm6, %%mm5 \n"
196
        "paddb %%mm5, %%mm5 \n"
197
    :);
198

    
199
    // could do a special case for dir==0 && edges==1, but it only reduces the
200
    // average filter time by 1.2%
201
    step  <<= 3;
202
    edges <<= 3;
203
    h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir, edges, step, mask_mv1, 1, -8,  0);
204
    h264_loop_filter_strength_iteration_mmx2(bS, nnz, ref, mv, bidir,    32,    8, mask_mv0, 0, -1, -1);
205

    
206
    __asm__ volatile(
207
        "movq   (%0), %%mm0 \n\t"
208
        "movq  8(%0), %%mm1 \n\t"
209
        "movq 16(%0), %%mm2 \n\t"
210
        "movq 24(%0), %%mm3 \n\t"
211
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
212
        "movq %%mm0,   (%0) \n\t"
213
        "movq %%mm3,  8(%0) \n\t"
214
        "movq %%mm4, 16(%0) \n\t"
215
        "movq %%mm2, 24(%0) \n\t"
216
        ::"r"(bS[0])
217
        :"memory"
218
    );
219
}
220

    
221
#define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
222
void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
223
                                                                int alpha, int beta, int8_t *tc0);
224
#define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
225
void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
226
                                                                int alpha, int beta);
227

    
228
#define LF_FUNCS(type, depth)\
229
LF_FUNC (h,  chroma,       depth, mmxext)\
230
LF_IFUNC(h,  chroma_intra, depth, mmxext)\
231
LF_FUNC (v,  chroma,       depth, mmxext)\
232
LF_IFUNC(v,  chroma_intra, depth, mmxext)\
233
LF_FUNC (h,  luma,         depth, mmxext)\
234
LF_IFUNC(h,  luma_intra,   depth, mmxext)\
235
LF_FUNC (h,  luma,         depth, sse2)\
236
LF_IFUNC(h,  luma_intra,   depth, sse2)\
237
LF_FUNC (v,  luma,         depth, sse2)\
238
LF_IFUNC(v,  luma_intra,   depth, sse2)\
239
LF_FUNC (h,  chroma,       depth, sse2)\
240
LF_IFUNC(h,  chroma_intra, depth, sse2)\
241
LF_FUNC (v,  chroma,       depth, sse2)\
242
LF_IFUNC(v,  chroma_intra, depth, sse2)\
243
LF_FUNC (h,  luma,         depth,  avx)\
244
LF_IFUNC(h,  luma_intra,   depth,  avx)\
245
LF_FUNC (v,  luma,         depth,  avx)\
246
LF_IFUNC(v,  luma_intra,   depth,  avx)\
247
LF_FUNC (h,  chroma,       depth,  avx)\
248
LF_IFUNC(h,  chroma_intra, depth,  avx)\
249
LF_FUNC (v,  chroma,       depth,  avx)\
250
LF_IFUNC(v,  chroma_intra, depth,  avx)
251

    
252
LF_FUNCS( uint8_t,  8)
253
LF_FUNCS(uint16_t, 10)
254

    
255
LF_FUNC (v8, luma,             8, mmxext)
256
static void ff_deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
257
{
258
    if((tc0[0] & tc0[1]) >= 0)
259
        ff_deblock_v8_luma_8_mmxext(pix+0, stride, alpha, beta, tc0);
260
    if((tc0[2] & tc0[3]) >= 0)
261
        ff_deblock_v8_luma_8_mmxext(pix+8, stride, alpha, beta, tc0+2);
262
}
263
LF_IFUNC(v8, luma_intra,        8, mmxext)
264
static void ff_deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride, int alpha, int beta)
265
{
266
    ff_deblock_v8_luma_intra_8_mmxext(pix+0, stride, alpha, beta);
267
    ff_deblock_v8_luma_intra_8_mmxext(pix+8, stride, alpha, beta);
268
}
269

    
270
LF_FUNC (v,  luma,            10, mmxext)
271
LF_IFUNC(v,  luma_intra,      10, mmxext)
272

    
273
/***********************************/
274
/* weighted prediction */
275

    
276
#define H264_WEIGHT(W, H, OPT) \
277
void ff_h264_weight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
278
    int stride, int log2_denom, int weight, int offset);
279

    
280
#define H264_BIWEIGHT(W, H, OPT) \
281
void ff_h264_biweight_ ## W ## x ## H ## _ ## OPT(uint8_t *dst, \
282
    uint8_t *src, int stride, int log2_denom, int weightd, \
283
    int weights, int offset);
284

    
285
#define H264_BIWEIGHT_MMX(W,H) \
286
H264_WEIGHT  (W, H, mmx2) \
287
H264_BIWEIGHT(W, H, mmx2)
288

    
289
#define H264_BIWEIGHT_MMX_SSE(W,H) \
290
H264_BIWEIGHT_MMX(W, H) \
291
H264_WEIGHT      (W, H, sse2) \
292
H264_BIWEIGHT    (W, H, sse2) \
293
H264_BIWEIGHT    (W, H, ssse3)
294

    
295
H264_BIWEIGHT_MMX_SSE(16, 16)
296
H264_BIWEIGHT_MMX_SSE(16,  8)
297
H264_BIWEIGHT_MMX_SSE( 8, 16)
298
H264_BIWEIGHT_MMX_SSE( 8,  8)
299
H264_BIWEIGHT_MMX_SSE( 8,  4)
300
H264_BIWEIGHT_MMX    ( 4,  8)
301
H264_BIWEIGHT_MMX    ( 4,  4)
302
H264_BIWEIGHT_MMX    ( 4,  2)
303

    
304
void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
305
{
306
    int mm_flags = av_get_cpu_flags();
307

    
308
    if (bit_depth == 8) {
309
    if (mm_flags & AV_CPU_FLAG_MMX2) {
310
        c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
311
    }
312
#if HAVE_YASM
313
    if (mm_flags & AV_CPU_FLAG_MMX) {
314
        c->h264_idct_dc_add=
315
        c->h264_idct_add= ff_h264_idct_add_mmx;
316
        c->h264_idct8_dc_add=
317
        c->h264_idct8_add= ff_h264_idct8_add_mmx;
318

    
319
        c->h264_idct_add16     = ff_h264_idct_add16_mmx;
320
        c->h264_idct8_add4     = ff_h264_idct8_add4_mmx;
321
        c->h264_idct_add8      = ff_h264_idct_add8_mmx;
322
        c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
323
        c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_mmx;
324

    
325
        if (mm_flags & AV_CPU_FLAG_MMX2) {
326
            c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
327
            c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
328
            c->h264_idct_add16     = ff_h264_idct_add16_mmx2;
329
            c->h264_idct8_add4     = ff_h264_idct8_add4_mmx2;
330
            c->h264_idct_add8      = ff_h264_idct_add8_mmx2;
331
            c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
332

    
333
            c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_8_mmxext;
334
            c->h264_h_loop_filter_chroma= ff_deblock_h_chroma_8_mmxext;
335
            c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_8_mmxext;
336
            c->h264_h_loop_filter_chroma_intra= ff_deblock_h_chroma_intra_8_mmxext;
337
#if ARCH_X86_32
338
            c->h264_v_loop_filter_luma= ff_deblock_v_luma_8_mmxext;
339
            c->h264_h_loop_filter_luma= ff_deblock_h_luma_8_mmxext;
340
            c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmxext;
341
            c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext;
342
#endif
343
            c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
344
            c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
345
            c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
346
            c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
347
            c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
348
            c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
349
            c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
350
            c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
351

    
352
            c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
353
            c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
354
            c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
355
            c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
356
            c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
357
            c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
358
            c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
359
            c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
360

    
361
            if (mm_flags&AV_CPU_FLAG_SSE2) {
362
                c->h264_idct8_add = ff_h264_idct8_add_sse2;
363
                c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
364
                c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_sse2;
365

    
366
                c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
367
                c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
368
                c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
369
                c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_sse2;
370
                c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_sse2;
371

    
372
                c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
373
                c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_sse2;
374
                c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_sse2;
375
                c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
376
                c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_sse2;
377

    
378
#if HAVE_ALIGNED_STACK
379
                c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
380
                c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
381
                c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
382
                c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
383
#endif
384

    
385
                c->h264_idct_add16 = ff_h264_idct_add16_sse2;
386
                c->h264_idct_add8  = ff_h264_idct_add8_sse2;
387
                c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
388
            }
389
            if (mm_flags&AV_CPU_FLAG_SSSE3) {
390
                c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
391
                c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
392
                c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
393
                c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
394
                c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_ssse3;
395
            }
396
            if (mm_flags&AV_CPU_FLAG_AVX) {
397
#if HAVE_ALIGNED_STACK
398
                c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
399
                c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
400
                c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
401
                c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
402
#endif
403
            }
404
        }
405
    }
406
#endif
407
    } else if (bit_depth == 10) {
408
#if HAVE_YASM
409
    if (mm_flags & AV_CPU_FLAG_MMX) {
410
        if (mm_flags & AV_CPU_FLAG_MMX2) {
411
#if ARCH_X86_32
412
            c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_mmxext;
413
            c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_mmxext;
414
            c->h264_v_loop_filter_luma= ff_deblock_v_luma_10_mmxext;
415
            c->h264_h_loop_filter_luma= ff_deblock_h_luma_10_mmxext;
416
            c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmxext;
417
            c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmxext;
418
#endif
419
            if (mm_flags&AV_CPU_FLAG_SSE2) {
420
                c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2;
421
                c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2;
422
#if HAVE_ALIGNED_STACK
423
                c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
424
                c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
425
                c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
426
                c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
427
#endif
428
            }
429
            if (mm_flags&AV_CPU_FLAG_AVX) {
430
                c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_avx;
431
                c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_avx;
432
#if HAVE_ALIGNED_STACK
433
                c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
434
                c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
435
                c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
436
                c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
437
#endif
438
            }
439
        }
440
    }
441
#endif
442
    }
443
}