Statistics
| Branch: | Revision:

ffmpeg / libavcodec / i386 / vc1dsp_mmx.c @ d3a9c44e

History | View | Annotate | Download (23.6 KB)

1
/*
2
 * VC-1 and WMV3 - DSP functions MMX-optimized
3
 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
4
 *
5
 * Permission is hereby granted, free of charge, to any person
6
 * obtaining a copy of this software and associated documentation
7
 * files (the "Software"), to deal in the Software without
8
 * restriction, including without limitation the rights to use,
9
 * copy, modify, merge, publish, distribute, sublicense, and/or sell
10
 * copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following
12
 * conditions:
13
 *
14
 * The above copyright notice and this permission notice shall be
15
 * included in all copies or substantial portions of the Software.
16
 *
17
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19
 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21
 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22
 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24
 * OTHER DEALINGS IN THE SOFTWARE.
25
 */
26

    
27
#include "dsputil.h"
28
#include "x86_cpu.h"
29

    
30
/** Add rounder from mm7 to mm3 and pack result at destination */
31
#define NORMALIZE_MMX(SHIFT)                                    \
32
     "paddw     %%mm7, %%mm3           \n\t" /* +bias-r */      \
33
     "paddw     %%mm7, %%mm4           \n\t" /* +bias-r */      \
34
     "psraw     "SHIFT", %%mm3         \n\t"                    \
35
     "psraw     "SHIFT", %%mm4         \n\t"
36

    
37
#define TRANSFER_DO_PACK                        \
38
     "packuswb  %%mm4, %%mm3           \n\t"    \
39
     "movq      %%mm3, (%2)            \n\t"
40

    
41
#define TRANSFER_DONT_PACK                      \
42
     "movq      %%mm3, 0(%2)           \n\t"    \
43
     "movq      %%mm4, 8(%2)           \n\t"
44

    
45
/** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
46
#define DO_UNPACK(reg)  "punpcklbw %%mm0, " reg "\n\t"
47
#define DONT_UNPACK(reg)
48

    
49
/** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
50
#define LOAD_ROUNDER_MMX(ROUND)                 \
51
     "movd      "ROUND", %%mm7         \n\t"    \
52
     "punpcklwd %%mm7, %%mm7           \n\t"    \
53
     "punpckldq %%mm7, %%mm7           \n\t"
54

    
55
#define SHIFT2_LINE(OFF, R0,R1,R2,R3)           \
56
    "paddw     %%mm"#R2", %%mm"#R1"    \n\t"    \
57
    "movd      (%1,%4), %%mm"#R0"      \n\t"    \
58
    "pmullw    %%mm6, %%mm"#R1"        \n\t"    \
59
    "punpcklbw %%mm0, %%mm"#R0"        \n\t"    \
60
    "movd      (%1,%3), %%mm"#R3"      \n\t"    \
61
    "psubw     %%mm"#R0", %%mm"#R1"    \n\t"    \
62
    "punpcklbw %%mm0, %%mm"#R3"        \n\t"    \
63
    "paddw     %%mm7, %%mm"#R1"        \n\t"    \
64
    "psubw     %%mm"#R3", %%mm"#R1"    \n\t"    \
65
    "psraw     %5, %%mm"#R1"           \n\t"    \
66
    "movq      %%mm"#R1", "#OFF"(%2)   \n\t"    \
67
    "add       %3, %1                  \n\t"
68

    
69
DECLARE_ALIGNED_16(static const uint64_t, fact_9) = 0x0009000900090009ULL;
70

    
71
/** Sacrifying mm6 allows to pipeline loads from src */
72
static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
73
                                       const uint8_t *src, long int stride,
74
                                       int rnd, int64_t shift)
75
{
76
    int  w = 3;
77

    
78
    asm volatile(
79
        LOAD_ROUNDER_MMX("%6")
80
        "movq      %7, %%mm6               \n\t"
81
        "1:                                \n\t"
82
        "movd      (%1), %%mm2             \n\t"
83
        "add       %3, %1                  \n\t"
84
        "movd      (%1), %%mm3             \n\t"
85
        "punpcklbw %%mm0, %%mm2            \n\t"
86
        "punpcklbw %%mm0, %%mm3            \n\t"
87
        SHIFT2_LINE(  0, 1, 2, 3, 4)
88
        SHIFT2_LINE( 24, 2, 3, 4, 1)
89
        SHIFT2_LINE( 48, 3, 4, 1, 2)
90
        SHIFT2_LINE( 72, 4, 1, 2, 3)
91
        SHIFT2_LINE( 96, 1, 2, 3, 4)
92
        SHIFT2_LINE(120, 2, 3, 4, 1)
93
        SHIFT2_LINE(144, 3, 4, 1, 2)
94
        SHIFT2_LINE(168, 4, 1, 2, 3)
95
        "sub       %8, %1                  \n\t"
96
        "add       $8, %2                  \n\t"
97
        "dec       %0                      \n\t"
98
        "jnz 1b                            \n\t"
99
        : "+g"(w), "+r"(src), "+r"(dst)
100
        : "r"(stride), "r"(-2*stride), "m"(shift),
101
          "m"(rnd), "m"(fact_9), "g"(9*stride-4)
102
        : "memory"
103
    );
104
}
105

    
106
/** To remove bias allowing use of MMX 16bits arithmetic */
107
DECLARE_ALIGNED_16(static const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
108

    
109
/**
110
 * Data is already unpacked, so some operations can directly be made from
111
 * memory.
112
 */
113
static void vc1_put_hor_16b_shift2_mmx(uint8_t *dst, long int stride,
114
                                       const int16_t *src, int rnd)
115
{
116
    int h = 8;
117

    
118
    src -= 1;
119
    rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */
120
    asm volatile(
121
        LOAD_ROUNDER_MMX("%4")
122
        "movq      %6, %%mm6               \n\t"
123
        "movq      %5, %%mm5               \n\t"
124
        "1:                                \n\t"
125
        "movq      2*0+0(%1), %%mm1        \n\t"
126
        "movq      2*0+8(%1), %%mm2        \n\t"
127
        "movq      2*1+0(%1), %%mm3        \n\t"
128
        "movq      2*1+8(%1), %%mm4        \n\t"
129
        "paddw     2*3+0(%1), %%mm1        \n\t"
130
        "paddw     2*3+8(%1), %%mm2        \n\t"
131
        "paddw     2*2+0(%1), %%mm3        \n\t"
132
        "paddw     2*2+8(%1), %%mm4        \n\t"
133
        "pmullw    %%mm5, %%mm3            \n\t"
134
        "pmullw    %%mm5, %%mm4            \n\t"
135
        "psubw     %%mm1, %%mm3            \n\t"
136
        "psubw     %%mm2, %%mm4            \n\t"
137
        NORMALIZE_MMX("$7")
138
        /* Remove bias */
139
        "paddw     %%mm6, %%mm3            \n\t"
140
        "paddw     %%mm6, %%mm4            \n\t"
141
        TRANSFER_DO_PACK
142
        "add       $24, %1                 \n\t"
143
        "add       %3, %2                  \n\t"
144
        "dec       %0                      \n\t"
145
        "jnz 1b                            \n\t"
146
        : "+g"(h), "+r" (src),  "+r" (dst)
147
        : "g"(stride), "m"(rnd), "m"(fact_9), "m"(ff_pw_128)
148
        : "memory"
149
    );
150
}
151

    
152

    
153
/**
154
 * Purely vertical or horizontal 1/2 shift interpolation.
155
 * Sacrify mm6 for *9 factor.
156
 */
157
static void vc1_put_shift2_mmx(uint8_t *dst, const uint8_t *src,
158
                               long int stride, int rnd, long int offset)
159
{
160
    int h = 8;
161

    
162
    rnd = 8-rnd;
163
    asm volatile(
164
        LOAD_ROUNDER_MMX("%6")
165
        "movq      %8, %%mm6               \n\t"
166
        "1:                                \n\t"
167
        "movd      0(%1   ), %%mm3         \n\t"
168
        "movd      4(%1   ), %%mm4         \n\t"
169
        "movd      0(%1,%3), %%mm1         \n\t"
170
        "movd      4(%1,%3), %%mm2         \n\t"
171
        "add       %3, %1                  \n\t"
172
        "punpcklbw %%mm0, %%mm3            \n\t"
173
        "punpcklbw %%mm0, %%mm4            \n\t"
174
        "punpcklbw %%mm0, %%mm1            \n\t"
175
        "punpcklbw %%mm0, %%mm2            \n\t"
176
        "paddw     %%mm1, %%mm3            \n\t"
177
        "paddw     %%mm2, %%mm4            \n\t"
178
        "movd      0(%1,%3), %%mm1         \n\t"
179
        "movd      4(%1,%3), %%mm2         \n\t"
180
        "pmullw    %%mm6, %%mm3            \n\t" /* 0,9,9,0*/
181
        "pmullw    %%mm6, %%mm4            \n\t" /* 0,9,9,0*/
182
        "punpcklbw %%mm0, %%mm1            \n\t"
183
        "punpcklbw %%mm0, %%mm2            \n\t"
184
        "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,0*/
185
        "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,0*/
186
        "movd      0(%1,%3), %%mm1         \n\t"
187
        "movd      4(%1,%3), %%mm2         \n\t"
188
        "punpcklbw %%mm0, %%mm1            \n\t"
189
        "punpcklbw %%mm0, %%mm2            \n\t"
190
        "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,-1*/
191
        "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,-1*/
192
        NORMALIZE_MMX("$4")
193
        TRANSFER_DO_PACK
194
        "add       %7, %1                  \n\t"
195
        "add       %5, %2                  \n\t"
196
        "dec       %0                      \n\t"
197
        "jnz 1b                            \n\t"
198
        : "+g"(h), "+r"(src),  "+r"(dst)
199
        : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),
200
          "g"(stride-offset), "m"(fact_9)
201
        : "memory"
202
    );
203
}
204

    
205
/**
206
 * Filter coefficients made global to allow access by all 1 or 3 quarter shift
207
 * interpolation functions.
208
 */
209
DECLARE_ALIGNED_16(static const uint64_t, fact_53) = 0x0035003500350035ULL;
210
DECLARE_ALIGNED_16(static const uint64_t, fact_18) = 0x0012001200120012ULL;
211

    
212
/**
213
 * Core of the 1/4 and 3/4 shift bicubic interpolation.
214
 *
215
 * @param UNPACK  Macro unpacking arguments from 8 to 16bits (can be empty).
216
 * @param MOVQ    "movd 1" or "movq 2", if data read is already unpacked.
217
 * @param A1      Address of 1st tap (beware of unpacked/packed).
218
 * @param A2      Address of 2nd tap
219
 * @param A3      Address of 3rd tap
220
 * @param A4      Address of 4th tap
221
 */
222
#define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4, POS)  \
223
     MOVQ "*0+"A1", %%mm1       \n\t"                           \
224
     MOVQ "*4+"A1", %%mm2       \n\t"                           \
225
     UNPACK("%%mm1")                                            \
226
     UNPACK("%%mm2")                                            \
227
     "pmullw    "POS", %%mm1    \n\t"                           \
228
     "pmullw    "POS", %%mm2    \n\t"                           \
229
     MOVQ "*0+"A2", %%mm3       \n\t"                           \
230
     MOVQ "*4+"A2", %%mm4       \n\t"                           \
231
     UNPACK("%%mm3")                                            \
232
     UNPACK("%%mm4")                                            \
233
     "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                 \
234
     "pmullw    %%mm6, %%mm4    \n\t" /* *18 */                 \
235
     "psubw     %%mm1, %%mm3    \n\t" /* 18,-3 */               \
236
     "psubw     %%mm2, %%mm4    \n\t" /* 18,-3 */               \
237
     MOVQ "*0+"A4", %%mm1       \n\t"                           \
238
     MOVQ "*4+"A4", %%mm2       \n\t"                           \
239
     UNPACK("%%mm1")                                            \
240
     UNPACK("%%mm2")                                            \
241
     "psllw     $2, %%mm1       \n\t" /* 4* */                  \
242
     "psllw     $2, %%mm2       \n\t" /* 4* */                  \
243
     "psubw     %%mm1, %%mm3    \n\t" /* -4,18,-3 */            \
244
     "psubw     %%mm2, %%mm4    \n\t" /* -4,18,-3 */            \
245
     MOVQ "*0+"A3", %%mm1       \n\t"                           \
246
     MOVQ "*4+"A3", %%mm2       \n\t"                           \
247
     UNPACK("%%mm1")                                            \
248
     UNPACK("%%mm2")                                            \
249
     "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                 \
250
     "pmullw    %%mm5, %%mm2    \n\t" /* *53 */                 \
251
     "paddw     %%mm1, %%mm3    \n\t" /* 4,53,18,-3 */          \
252
     "paddw     %%mm2, %%mm4    \n\t" /* 4,53,18,-3 */
253

    
254
/**
255
 * Macro to build the vertical 16bits version of vc1_put_shift[13].
256
 * Here, offset=src_stride. Parameters passed A1 to A4 must use
257
 * %3 (src_stride) and %4 (3*src_stride).
258
 *
259
 * @param  NAME   Either 1 or 3
260
 * @see MSPEL_FILTER13_CORE for information on A1->A4
261
 */
262
#define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4)                    \
263
static void                                                             \
264
vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src,      \
265
                                 long int src_stride,                   \
266
                                 int rnd, int64_t shift)                \
267
{                                                                       \
268
    int h = 8;                                                          \
269
    src -= src_stride;                                                  \
270
    asm volatile(                                                       \
271
        LOAD_ROUNDER_MMX("%5")                                          \
272
        "movq      %7, %%mm5       \n\t"                                \
273
        "movq      %8, %%mm6       \n\t"                                \
274
        ASMALIGN(3)                                                     \
275
        "1:                        \n\t"                                \
276
        MSPEL_FILTER13_CORE(DO_UNPACK, "movd  1", A1, A2, A3, A4, "%9") \
277
        NORMALIZE_MMX("%6")                                             \
278
        TRANSFER_DONT_PACK                                              \
279
        /* Last 3 (in fact 4) bytes on the line */                      \
280
        "movd      8+"A1", %%mm1   \n\t"                                \
281
        DO_UNPACK("%%mm1")                                              \
282
        "movq      %%mm1, %%mm3    \n\t"                                \
283
        "paddw     %%mm1, %%mm1    \n\t"                                \
284
        "paddw     %%mm3, %%mm1    \n\t" /* 3* */                       \
285
        "movd      8+"A2", %%mm3   \n\t"                                \
286
        DO_UNPACK("%%mm3")                                              \
287
        "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                      \
288
        "psubw     %%mm1, %%mm3    \n\t" /*18,-3 */                     \
289
        "movd      8+"A3", %%mm1   \n\t"                                \
290
        DO_UNPACK("%%mm1")                                              \
291
        "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                      \
292
        "paddw     %%mm1, %%mm3    \n\t" /*53,18,-3 */                  \
293
        "movd      8+"A4", %%mm1   \n\t"                                \
294
        DO_UNPACK("%%mm1")                                              \
295
        "psllw     $2, %%mm1       \n\t" /* 4* */                       \
296
        "psubw     %%mm1, %%mm3    \n\t"                                \
297
        "paddw     %%mm7, %%mm3    \n\t"                                \
298
        "psraw     %6, %%mm3       \n\t"                                \
299
        "movq      %%mm3, 16(%2)   \n\t"                                \
300
        "add       %3, %1          \n\t"                                \
301
        "add       $24, %2         \n\t"                                \
302
        "dec       %0              \n\t"                                \
303
        "jnz 1b                    \n\t"                                \
304
        : "+g"(h), "+r" (src),  "+r" (dst)                              \
305
        : "r"(src_stride), "r"(3*src_stride),                           \
306
          "m"(rnd), "m"(shift),                                         \
307
          "m"(fact_53), "m"(fact_18), "m"(ff_pw_3)                      \
308
        : "memory"                                                      \
309
    );                                                                  \
310
}
311

    
312
/**
313
 * Macro to build the horizontal 16bits version of vc1_put_shift[13].
314
 * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
315
 *
316
 * @param  NAME   Either 1 or 3
317
 * @see MSPEL_FILTER13_CORE for information on A1->A4
318
 */
319
#define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4)                    \
320
static void                                                             \
321
vc1_put_hor_16b_ ## NAME ## _mmx(uint8_t *dst, long int stride,         \
322
                                 const int16_t *src, int rnd)           \
323
{                                                                       \
324
    int h = 8;                                                          \
325
    src -= 1;                                                           \
326
    rnd -= (-4+58+13-3)*256; /* Add -256 bias */                        \
327
    asm volatile(                                                       \
328
        LOAD_ROUNDER_MMX("%4")                                          \
329
        "movq      %6, %%mm6       \n\t"                                \
330
        "movq      %5, %%mm5       \n\t"                                \
331
        ASMALIGN(3)                                                     \
332
        "1:                        \n\t"                                \
333
        MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4, "%8")\
334
        NORMALIZE_MMX("$7")                                             \
335
        /* Remove bias */                                               \
336
        "paddw     %7, %%mm3       \n\t"                                \
337
        "paddw     %7, %%mm4       \n\t"                                \
338
        TRANSFER_DO_PACK                                                \
339
        "add       $24, %1         \n\t"                                \
340
        "add       %3, %2          \n\t"                                \
341
        "dec       %0              \n\t"                                \
342
        "jnz 1b                    \n\t"                                \
343
        : "+g"(h), "+r" (src),  "+r" (dst)                              \
344
        : "g"(stride), "m"(rnd), "m"(fact_53), "m"(fact_18),            \
345
          "m"(ff_pw_128), "m"(ff_pw_3)                                  \
346
        : "memory"                                                      \
347
    );                                                                  \
348
}
349

    
350
/**
351
 * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
352
 * Here, offset=src_stride. Parameters passed A1 to A4 must use
353
 * %3 (offset) and %4 (3*offset).
354
 *
355
 * @param  NAME   Either 1 or 3
356
 * @see MSPEL_FILTER13_CORE for information on A1->A4
357
 */
358
#define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4)                         \
359
static void                                                             \
360
vc1_put_## NAME ## _mmx(uint8_t *dst, const uint8_t *src,               \
361
                        long int stride, int rnd, long int offset)      \
362
{                                                                       \
363
    int h = 8;                                                          \
364
    src -= offset;                                                      \
365
    rnd = 32-rnd;                                                       \
366
    asm volatile (                                                      \
367
        LOAD_ROUNDER_MMX("%6")                                          \
368
        "movq      %7, %%mm5       \n\t"                                \
369
        "movq      %8, %%mm6       \n\t"                                \
370
        ASMALIGN(3)                                                     \
371
        "1:                        \n\t"                                \
372
        MSPEL_FILTER13_CORE(DO_UNPACK, "movd   1", A1, A2, A3, A4, "%9")\
373
        NORMALIZE_MMX("$6")                                             \
374
        TRANSFER_DO_PACK                                                \
375
        "add       %5, %1          \n\t"                                \
376
        "add       %5, %2          \n\t"                                \
377
        "dec       %0              \n\t"                                \
378
        "jnz 1b                    \n\t"                                \
379
        : "+g"(h), "+r" (src),  "+r" (dst)                              \
380
        : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd),            \
381
          "m"(fact_53), "m"(fact_18), "m"(ff_pw_3)                      \
382
        : "memory"                                                      \
383
    );                                                                  \
384
}
385

    
386
/** 1/4 shift bicubic interpolation */
387
MSPEL_FILTER13_8B     (shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )")
388
MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )")
389
MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)")
390

    
391
/** 3/4 shift bicubic interpolation */
392
MSPEL_FILTER13_8B     (shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )")
393
MSPEL_FILTER13_VER_16B(shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )")
394
MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)")
395

    
396
typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, long int src_stride, int rnd, int64_t shift);
397
typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, long int dst_stride, const int16_t *src, int rnd);
398
typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, long int stride, int rnd, long int offset);
399

    
400
/**
401
 * Interpolates fractional pel values by applying proper vertical then
402
 * horizontal filter.
403
 *
404
 * @param  dst     Destination buffer for interpolated pels.
405
 * @param  src     Source buffer.
406
 * @param  stride  Stride for both src and dst buffers.
407
 * @param  hmode   Horizontal filter (expressed in quarter pixels shift).
408
 * @param  hmode   Vertical filter.
409
 * @param  rnd     Rounding bias.
410
 */
411
static void vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,
412
                         int hmode, int vmode, int rnd)
413
{
414
    static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =
415
         { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };
416
    static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =
417
         { NULL, vc1_put_hor_16b_shift1_mmx, vc1_put_hor_16b_shift2_mmx, vc1_put_hor_16b_shift3_mmx };
418
    static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =
419
         { NULL, vc1_put_shift1_mmx, vc1_put_shift2_mmx, vc1_put_shift3_mmx };
420

    
421
    asm volatile(
422
        "pxor %%mm0, %%mm0         \n\t"
423
        ::: "memory"
424
    );
425

    
426
    if (vmode) { /* Vertical filter to apply */
427
        if (hmode) { /* Horizontal filter to apply, output to tmp */
428
            static const int shift_value[] = { 0, 5, 1, 5 };
429
            int              shift = (shift_value[hmode]+shift_value[vmode])>>1;
430
            int              r;
431
            DECLARE_ALIGNED_16(int16_t, tmp[12*8]);
432

    
433
            r = (1<<(shift-1)) + rnd-1;
434
            vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);
435

    
436
            vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);
437
            return;
438
        }
439
        else { /* No horizontal filter, output 8 lines to dst */
440
            vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);
441
            return;
442
        }
443
    }
444

    
445
    /* Horizontal mode with no vertical mode */
446
    vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);
447
}
448

    
449
static void put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
450
    put_pixels8_mmx(dst, src, stride, 8);
451
}
452

    
453
/** Macro to ease bicubic filter interpolation functions declarations */
454
#define DECLARE_FUNCTION(a, b)                                          \
455
static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
456
     vc1_mspel_mc(dst, src, stride, a, b, rnd);                         \
457
}
458

    
459
DECLARE_FUNCTION(0, 1)
460
DECLARE_FUNCTION(0, 2)
461
DECLARE_FUNCTION(0, 3)
462

    
463
DECLARE_FUNCTION(1, 0)
464
DECLARE_FUNCTION(1, 1)
465
DECLARE_FUNCTION(1, 2)
466
DECLARE_FUNCTION(1, 3)
467

    
468
DECLARE_FUNCTION(2, 0)
469
DECLARE_FUNCTION(2, 1)
470
DECLARE_FUNCTION(2, 2)
471
DECLARE_FUNCTION(2, 3)
472

    
473
DECLARE_FUNCTION(3, 0)
474
DECLARE_FUNCTION(3, 1)
475
DECLARE_FUNCTION(3, 2)
476
DECLARE_FUNCTION(3, 3)
477

    
478
void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
479
    dsp->put_vc1_mspel_pixels_tab[ 0] = put_vc1_mspel_mc00_mmx;
480
    dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
481
    dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
482
    dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;
483

    
484
    dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
485
    dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
486
    dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
487
    dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;
488

    
489
    dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
490
    dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
491
    dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
492
    dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;
493

    
494
    dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
495
    dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
496
    dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
497
    dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
498
}