ffmpeg / libavcodec / x86 / dsputil_mmx.c @ fe2ff6d2
History | View | Annotate | Download (118 KB)
1 |
/*
|
---|---|
2 |
* MMX optimized DSP utils
|
3 |
* Copyright (c) 2000, 2001 Fabrice Bellard
|
4 |
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
|
5 |
*
|
6 |
* This file is part of FFmpeg.
|
7 |
*
|
8 |
* FFmpeg is free software; you can redistribute it and/or
|
9 |
* modify it under the terms of the GNU Lesser General Public
|
10 |
* License as published by the Free Software Foundation; either
|
11 |
* version 2.1 of the License, or (at your option) any later version.
|
12 |
*
|
13 |
* FFmpeg is distributed in the hope that it will be useful,
|
14 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
16 |
* Lesser General Public License for more details.
|
17 |
*
|
18 |
* You should have received a copy of the GNU Lesser General Public
|
19 |
* License along with FFmpeg; if not, write to the Free Software
|
20 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
21 |
*
|
22 |
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
|
23 |
*/
|
24 |
|
25 |
#include "libavutil/cpu.h" |
26 |
#include "libavutil/x86_cpu.h" |
27 |
#include "libavcodec/dsputil.h" |
28 |
#include "libavcodec/h264dsp.h" |
29 |
#include "libavcodec/mpegvideo.h" |
30 |
#include "libavcodec/simple_idct.h" |
31 |
#include "libavcodec/ac3dec.h" |
32 |
#include "dsputil_mmx.h" |
33 |
#include "idct_xvid.h" |
34 |
|
35 |
//#undef NDEBUG
|
36 |
//#include <assert.h>
|
37 |
|
38 |
/* pixel operations */
|
39 |
DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL; |
40 |
DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL; |
41 |
|
42 |
DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] = |
43 |
{0x8000000080000000ULL, 0x8000000080000000ULL}; |
44 |
|
45 |
DECLARE_ALIGNED(8, const uint64_t, ff_pw_1 ) = 0x0001000100010001ULL; |
46 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3 ) = {0x0003000300030003ULL, 0x0003000300030003ULL}; |
47 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL}; |
48 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL}; |
49 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL}; |
50 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9 ) = {0x0009000900090009ULL, 0x0009000900090009ULL}; |
51 |
DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL; |
52 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL}; |
53 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17 ) = {0x0011001100110011ULL, 0x0011001100110011ULL}; |
54 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL}; |
55 |
DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL; |
56 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL}; |
57 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL}; |
58 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL}; |
59 |
DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL; |
60 |
DECLARE_ALIGNED(8, const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL; |
61 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL}; |
62 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL}; |
63 |
DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL; |
64 |
DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL; |
65 |
DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL; |
66 |
|
67 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pb_0 ) = {0x0000000000000000ULL, 0x0000000000000000ULL}; |
68 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1 ) = {0x0101010101010101ULL, 0x0101010101010101ULL}; |
69 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3 ) = {0x0303030303030303ULL, 0x0303030303030303ULL}; |
70 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4 ) = {0x0404040404040404ULL, 0x0404040404040404ULL}; |
71 |
DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL; |
72 |
DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL; |
73 |
DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL; |
74 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL}; |
75 |
DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL; |
76 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pb_A1 ) = {0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL}; |
77 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL}; |
78 |
DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL; |
79 |
DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL}; |
80 |
|
81 |
DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 }; |
82 |
DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; |
83 |
|
84 |
#define JUMPALIGN() __asm__ volatile (".p2align 3"::) |
85 |
#define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::) |
86 |
|
87 |
#define MOVQ_BFE(regd) \
|
88 |
__asm__ volatile ( \
|
89 |
"pcmpeqd %%" #regd ", %%" #regd " \n\t"\ |
90 |
"paddb %%" #regd ", %%" #regd " \n\t" ::) |
91 |
|
92 |
#ifndef PIC
|
93 |
#define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone)) |
94 |
#define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo)) |
95 |
#else
|
96 |
// for shared library it's better to use this way for accessing constants
|
97 |
// pcmpeqd -> -1
|
98 |
#define MOVQ_BONE(regd) \
|
99 |
__asm__ volatile ( \
|
100 |
"pcmpeqd %%" #regd ", %%" #regd " \n\t" \ |
101 |
"psrlw $15, %%" #regd " \n\t" \ |
102 |
"packuswb %%" #regd ", %%" #regd " \n\t" ::) |
103 |
|
104 |
#define MOVQ_WTWO(regd) \
|
105 |
__asm__ volatile ( \
|
106 |
"pcmpeqd %%" #regd ", %%" #regd " \n\t" \ |
107 |
"psrlw $15, %%" #regd " \n\t" \ |
108 |
"psllw $1, %%" #regd " \n\t"::) |
109 |
|
110 |
#endif
|
111 |
|
112 |
// using regr as temporary and for the output result
|
113 |
// first argument is unmodifed and second is trashed
|
114 |
// regfe is supposed to contain 0xfefefefefefefefe
|
115 |
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
|
116 |
"movq " #rega ", " #regr " \n\t"\ |
117 |
"pand " #regb ", " #regr " \n\t"\ |
118 |
"pxor " #rega ", " #regb " \n\t"\ |
119 |
"pand " #regfe "," #regb " \n\t"\ |
120 |
"psrlq $1, " #regb " \n\t"\ |
121 |
"paddb " #regb ", " #regr " \n\t" |
122 |
|
123 |
#define PAVGB_MMX(rega, regb, regr, regfe) \
|
124 |
"movq " #rega ", " #regr " \n\t"\ |
125 |
"por " #regb ", " #regr " \n\t"\ |
126 |
"pxor " #rega ", " #regb " \n\t"\ |
127 |
"pand " #regfe "," #regb " \n\t"\ |
128 |
"psrlq $1, " #regb " \n\t"\ |
129 |
"psubb " #regb ", " #regr " \n\t" |
130 |
|
131 |
// mm6 is supposed to contain 0xfefefefefefefefe
|
132 |
#define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
|
133 |
"movq " #rega ", " #regr " \n\t"\ |
134 |
"movq " #regc ", " #regp " \n\t"\ |
135 |
"pand " #regb ", " #regr " \n\t"\ |
136 |
"pand " #regd ", " #regp " \n\t"\ |
137 |
"pxor " #rega ", " #regb " \n\t"\ |
138 |
"pxor " #regc ", " #regd " \n\t"\ |
139 |
"pand %%mm6, " #regb " \n\t"\ |
140 |
"pand %%mm6, " #regd " \n\t"\ |
141 |
"psrlq $1, " #regb " \n\t"\ |
142 |
"psrlq $1, " #regd " \n\t"\ |
143 |
"paddb " #regb ", " #regr " \n\t"\ |
144 |
"paddb " #regd ", " #regp " \n\t" |
145 |
|
146 |
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
|
147 |
"movq " #rega ", " #regr " \n\t"\ |
148 |
"movq " #regc ", " #regp " \n\t"\ |
149 |
"por " #regb ", " #regr " \n\t"\ |
150 |
"por " #regd ", " #regp " \n\t"\ |
151 |
"pxor " #rega ", " #regb " \n\t"\ |
152 |
"pxor " #regc ", " #regd " \n\t"\ |
153 |
"pand %%mm6, " #regb " \n\t"\ |
154 |
"pand %%mm6, " #regd " \n\t"\ |
155 |
"psrlq $1, " #regd " \n\t"\ |
156 |
"psrlq $1, " #regb " \n\t"\ |
157 |
"psubb " #regb ", " #regr " \n\t"\ |
158 |
"psubb " #regd ", " #regp " \n\t" |
159 |
|
160 |
/***********************************/
|
161 |
/* MMX no rounding */
|
162 |
#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx |
163 |
#define SET_RND MOVQ_WONE
|
164 |
#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
|
165 |
#define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
|
166 |
#define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e)
|
167 |
|
168 |
#include "dsputil_mmx_rnd_template.c" |
169 |
|
170 |
#undef DEF
|
171 |
#undef SET_RND
|
172 |
#undef PAVGBP
|
173 |
#undef PAVGB
|
174 |
/***********************************/
|
175 |
/* MMX rounding */
|
176 |
|
177 |
#define DEF(x, y) x ## _ ## y ##_mmx |
178 |
#define SET_RND MOVQ_WTWO
|
179 |
#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
|
180 |
#define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
|
181 |
|
182 |
#include "dsputil_mmx_rnd_template.c" |
183 |
|
184 |
#undef DEF
|
185 |
#undef SET_RND
|
186 |
#undef PAVGBP
|
187 |
#undef PAVGB
|
188 |
#undef OP_AVG
|
189 |
|
190 |
/***********************************/
|
191 |
/* 3Dnow specific */
|
192 |
|
193 |
#define DEF(x) x ## _3dnow |
194 |
#define PAVGB "pavgusb" |
195 |
#define OP_AVG PAVGB
|
196 |
|
197 |
#include "dsputil_mmx_avg_template.c" |
198 |
|
199 |
#undef DEF
|
200 |
#undef PAVGB
|
201 |
#undef OP_AVG
|
202 |
|
203 |
/***********************************/
|
204 |
/* MMX2 specific */
|
205 |
|
206 |
#define DEF(x) x ## _mmx2 |
207 |
|
208 |
/* Introduced only in MMX2 set */
|
209 |
#define PAVGB "pavgb" |
210 |
#define OP_AVG PAVGB
|
211 |
|
212 |
#include "dsputil_mmx_avg_template.c" |
213 |
|
214 |
#undef DEF
|
215 |
#undef PAVGB
|
216 |
#undef OP_AVG
|
217 |
|
218 |
#define put_no_rnd_pixels16_mmx put_pixels16_mmx
|
219 |
#define put_no_rnd_pixels8_mmx put_pixels8_mmx
|
220 |
#define put_pixels16_mmx2 put_pixels16_mmx
|
221 |
#define put_pixels8_mmx2 put_pixels8_mmx
|
222 |
#define put_pixels4_mmx2 put_pixels4_mmx
|
223 |
#define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
|
224 |
#define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
|
225 |
#define put_pixels16_3dnow put_pixels16_mmx
|
226 |
#define put_pixels8_3dnow put_pixels8_mmx
|
227 |
#define put_pixels4_3dnow put_pixels4_mmx
|
228 |
#define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
|
229 |
#define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
|
230 |
|
231 |
/***********************************/
|
232 |
/* standard MMX */
|
233 |
|
234 |
void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) |
235 |
{ |
236 |
const DCTELEM *p;
|
237 |
uint8_t *pix; |
238 |
|
239 |
/* read the pixels */
|
240 |
p = block; |
241 |
pix = pixels; |
242 |
/* unrolled loop */
|
243 |
__asm__ volatile(
|
244 |
"movq %3, %%mm0 \n\t"
|
245 |
"movq 8%3, %%mm1 \n\t"
|
246 |
"movq 16%3, %%mm2 \n\t"
|
247 |
"movq 24%3, %%mm3 \n\t"
|
248 |
"movq 32%3, %%mm4 \n\t"
|
249 |
"movq 40%3, %%mm5 \n\t"
|
250 |
"movq 48%3, %%mm6 \n\t"
|
251 |
"movq 56%3, %%mm7 \n\t"
|
252 |
"packuswb %%mm1, %%mm0 \n\t"
|
253 |
"packuswb %%mm3, %%mm2 \n\t"
|
254 |
"packuswb %%mm5, %%mm4 \n\t"
|
255 |
"packuswb %%mm7, %%mm6 \n\t"
|
256 |
"movq %%mm0, (%0) \n\t"
|
257 |
"movq %%mm2, (%0, %1) \n\t"
|
258 |
"movq %%mm4, (%0, %1, 2) \n\t"
|
259 |
"movq %%mm6, (%0, %2) \n\t"
|
260 |
::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p) |
261 |
:"memory");
|
262 |
pix += line_size*4;
|
263 |
p += 32;
|
264 |
|
265 |
// if here would be an exact copy of the code above
|
266 |
// compiler would generate some very strange code
|
267 |
// thus using "r"
|
268 |
__asm__ volatile(
|
269 |
"movq (%3), %%mm0 \n\t"
|
270 |
"movq 8(%3), %%mm1 \n\t"
|
271 |
"movq 16(%3), %%mm2 \n\t"
|
272 |
"movq 24(%3), %%mm3 \n\t"
|
273 |
"movq 32(%3), %%mm4 \n\t"
|
274 |
"movq 40(%3), %%mm5 \n\t"
|
275 |
"movq 48(%3), %%mm6 \n\t"
|
276 |
"movq 56(%3), %%mm7 \n\t"
|
277 |
"packuswb %%mm1, %%mm0 \n\t"
|
278 |
"packuswb %%mm3, %%mm2 \n\t"
|
279 |
"packuswb %%mm5, %%mm4 \n\t"
|
280 |
"packuswb %%mm7, %%mm6 \n\t"
|
281 |
"movq %%mm0, (%0) \n\t"
|
282 |
"movq %%mm2, (%0, %1) \n\t"
|
283 |
"movq %%mm4, (%0, %1, 2) \n\t"
|
284 |
"movq %%mm6, (%0, %2) \n\t"
|
285 |
::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p) |
286 |
:"memory");
|
287 |
} |
288 |
|
289 |
DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] = |
290 |
{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; |
291 |
|
292 |
#define put_signed_pixels_clamped_mmx_half(off) \
|
293 |
"movq "#off"(%2), %%mm1 \n\t"\ |
294 |
"movq 16+"#off"(%2), %%mm2 \n\t"\ |
295 |
"movq 32+"#off"(%2), %%mm3 \n\t"\ |
296 |
"movq 48+"#off"(%2), %%mm4 \n\t"\ |
297 |
"packsswb 8+"#off"(%2), %%mm1 \n\t"\ |
298 |
"packsswb 24+"#off"(%2), %%mm2 \n\t"\ |
299 |
"packsswb 40+"#off"(%2), %%mm3 \n\t"\ |
300 |
"packsswb 56+"#off"(%2), %%mm4 \n\t"\ |
301 |
"paddb %%mm0, %%mm1 \n\t"\
|
302 |
"paddb %%mm0, %%mm2 \n\t"\
|
303 |
"paddb %%mm0, %%mm3 \n\t"\
|
304 |
"paddb %%mm0, %%mm4 \n\t"\
|
305 |
"movq %%mm1, (%0) \n\t"\
|
306 |
"movq %%mm2, (%0, %3) \n\t"\
|
307 |
"movq %%mm3, (%0, %3, 2) \n\t"\
|
308 |
"movq %%mm4, (%0, %1) \n\t"
|
309 |
|
310 |
void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) |
311 |
{ |
312 |
x86_reg line_skip = line_size; |
313 |
x86_reg line_skip3; |
314 |
|
315 |
__asm__ volatile (
|
316 |
"movq "MANGLE(ff_vector128)", %%mm0 \n\t" |
317 |
"lea (%3, %3, 2), %1 \n\t"
|
318 |
put_signed_pixels_clamped_mmx_half(0)
|
319 |
"lea (%0, %3, 4), %0 \n\t"
|
320 |
put_signed_pixels_clamped_mmx_half(64)
|
321 |
:"+&r" (pixels), "=&r" (line_skip3) |
322 |
:"r" (block), "r"(line_skip) |
323 |
:"memory");
|
324 |
} |
325 |
|
326 |
void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) |
327 |
{ |
328 |
const DCTELEM *p;
|
329 |
uint8_t *pix; |
330 |
int i;
|
331 |
|
332 |
/* read the pixels */
|
333 |
p = block; |
334 |
pix = pixels; |
335 |
MOVQ_ZERO(mm7); |
336 |
i = 4;
|
337 |
do {
|
338 |
__asm__ volatile(
|
339 |
"movq (%2), %%mm0 \n\t"
|
340 |
"movq 8(%2), %%mm1 \n\t"
|
341 |
"movq 16(%2), %%mm2 \n\t"
|
342 |
"movq 24(%2), %%mm3 \n\t"
|
343 |
"movq %0, %%mm4 \n\t"
|
344 |
"movq %1, %%mm6 \n\t"
|
345 |
"movq %%mm4, %%mm5 \n\t"
|
346 |
"punpcklbw %%mm7, %%mm4 \n\t"
|
347 |
"punpckhbw %%mm7, %%mm5 \n\t"
|
348 |
"paddsw %%mm4, %%mm0 \n\t"
|
349 |
"paddsw %%mm5, %%mm1 \n\t"
|
350 |
"movq %%mm6, %%mm5 \n\t"
|
351 |
"punpcklbw %%mm7, %%mm6 \n\t"
|
352 |
"punpckhbw %%mm7, %%mm5 \n\t"
|
353 |
"paddsw %%mm6, %%mm2 \n\t"
|
354 |
"paddsw %%mm5, %%mm3 \n\t"
|
355 |
"packuswb %%mm1, %%mm0 \n\t"
|
356 |
"packuswb %%mm3, %%mm2 \n\t"
|
357 |
"movq %%mm0, %0 \n\t"
|
358 |
"movq %%mm2, %1 \n\t"
|
359 |
:"+m"(*pix), "+m"(*(pix+line_size)) |
360 |
:"r"(p)
|
361 |
:"memory");
|
362 |
pix += line_size*2;
|
363 |
p += 16;
|
364 |
} while (--i);
|
365 |
} |
366 |
|
367 |
static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) |
368 |
{ |
369 |
__asm__ volatile(
|
370 |
"lea (%3, %3), %%"REG_a" \n\t" |
371 |
".p2align 3 \n\t"
|
372 |
"1: \n\t"
|
373 |
"movd (%1), %%mm0 \n\t"
|
374 |
"movd (%1, %3), %%mm1 \n\t"
|
375 |
"movd %%mm0, (%2) \n\t"
|
376 |
"movd %%mm1, (%2, %3) \n\t"
|
377 |
"add %%"REG_a", %1 \n\t" |
378 |
"add %%"REG_a", %2 \n\t" |
379 |
"movd (%1), %%mm0 \n\t"
|
380 |
"movd (%1, %3), %%mm1 \n\t"
|
381 |
"movd %%mm0, (%2) \n\t"
|
382 |
"movd %%mm1, (%2, %3) \n\t"
|
383 |
"add %%"REG_a", %1 \n\t" |
384 |
"add %%"REG_a", %2 \n\t" |
385 |
"subl $4, %0 \n\t"
|
386 |
"jnz 1b \n\t"
|
387 |
: "+g"(h), "+r" (pixels), "+r" (block) |
388 |
: "r"((x86_reg)line_size)
|
389 |
: "%"REG_a, "memory" |
390 |
); |
391 |
} |
392 |
|
393 |
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) |
394 |
{ |
395 |
__asm__ volatile(
|
396 |
"lea (%3, %3), %%"REG_a" \n\t" |
397 |
".p2align 3 \n\t"
|
398 |
"1: \n\t"
|
399 |
"movq (%1), %%mm0 \n\t"
|
400 |
"movq (%1, %3), %%mm1 \n\t"
|
401 |
"movq %%mm0, (%2) \n\t"
|
402 |
"movq %%mm1, (%2, %3) \n\t"
|
403 |
"add %%"REG_a", %1 \n\t" |
404 |
"add %%"REG_a", %2 \n\t" |
405 |
"movq (%1), %%mm0 \n\t"
|
406 |
"movq (%1, %3), %%mm1 \n\t"
|
407 |
"movq %%mm0, (%2) \n\t"
|
408 |
"movq %%mm1, (%2, %3) \n\t"
|
409 |
"add %%"REG_a", %1 \n\t" |
410 |
"add %%"REG_a", %2 \n\t" |
411 |
"subl $4, %0 \n\t"
|
412 |
"jnz 1b \n\t"
|
413 |
: "+g"(h), "+r" (pixels), "+r" (block) |
414 |
: "r"((x86_reg)line_size)
|
415 |
: "%"REG_a, "memory" |
416 |
); |
417 |
} |
418 |
|
419 |
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) |
420 |
{ |
421 |
__asm__ volatile(
|
422 |
"lea (%3, %3), %%"REG_a" \n\t" |
423 |
".p2align 3 \n\t"
|
424 |
"1: \n\t"
|
425 |
"movq (%1), %%mm0 \n\t"
|
426 |
"movq 8(%1), %%mm4 \n\t"
|
427 |
"movq (%1, %3), %%mm1 \n\t"
|
428 |
"movq 8(%1, %3), %%mm5 \n\t"
|
429 |
"movq %%mm0, (%2) \n\t"
|
430 |
"movq %%mm4, 8(%2) \n\t"
|
431 |
"movq %%mm1, (%2, %3) \n\t"
|
432 |
"movq %%mm5, 8(%2, %3) \n\t"
|
433 |
"add %%"REG_a", %1 \n\t" |
434 |
"add %%"REG_a", %2 \n\t" |
435 |
"movq (%1), %%mm0 \n\t"
|
436 |
"movq 8(%1), %%mm4 \n\t"
|
437 |
"movq (%1, %3), %%mm1 \n\t"
|
438 |
"movq 8(%1, %3), %%mm5 \n\t"
|
439 |
"movq %%mm0, (%2) \n\t"
|
440 |
"movq %%mm4, 8(%2) \n\t"
|
441 |
"movq %%mm1, (%2, %3) \n\t"
|
442 |
"movq %%mm5, 8(%2, %3) \n\t"
|
443 |
"add %%"REG_a", %1 \n\t" |
444 |
"add %%"REG_a", %2 \n\t" |
445 |
"subl $4, %0 \n\t"
|
446 |
"jnz 1b \n\t"
|
447 |
: "+g"(h), "+r" (pixels), "+r" (block) |
448 |
: "r"((x86_reg)line_size)
|
449 |
: "%"REG_a, "memory" |
450 |
); |
451 |
} |
452 |
|
453 |
static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) |
454 |
{ |
455 |
__asm__ volatile(
|
456 |
"1: \n\t"
|
457 |
"movdqu (%1), %%xmm0 \n\t"
|
458 |
"movdqu (%1,%3), %%xmm1 \n\t"
|
459 |
"movdqu (%1,%3,2), %%xmm2 \n\t"
|
460 |
"movdqu (%1,%4), %%xmm3 \n\t"
|
461 |
"movdqa %%xmm0, (%2) \n\t"
|
462 |
"movdqa %%xmm1, (%2,%3) \n\t"
|
463 |
"movdqa %%xmm2, (%2,%3,2) \n\t"
|
464 |
"movdqa %%xmm3, (%2,%4) \n\t"
|
465 |
"subl $4, %0 \n\t"
|
466 |
"lea (%1,%3,4), %1 \n\t"
|
467 |
"lea (%2,%3,4), %2 \n\t"
|
468 |
"jnz 1b \n\t"
|
469 |
: "+g"(h), "+r" (pixels), "+r" (block) |
470 |
: "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) |
471 |
: "memory"
|
472 |
); |
473 |
} |
474 |
|
475 |
static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) |
476 |
{ |
477 |
__asm__ volatile(
|
478 |
"1: \n\t"
|
479 |
"movdqu (%1), %%xmm0 \n\t"
|
480 |
"movdqu (%1,%3), %%xmm1 \n\t"
|
481 |
"movdqu (%1,%3,2), %%xmm2 \n\t"
|
482 |
"movdqu (%1,%4), %%xmm3 \n\t"
|
483 |
"pavgb (%2), %%xmm0 \n\t"
|
484 |
"pavgb (%2,%3), %%xmm1 \n\t"
|
485 |
"pavgb (%2,%3,2), %%xmm2 \n\t"
|
486 |
"pavgb (%2,%4), %%xmm3 \n\t"
|
487 |
"movdqa %%xmm0, (%2) \n\t"
|
488 |
"movdqa %%xmm1, (%2,%3) \n\t"
|
489 |
"movdqa %%xmm2, (%2,%3,2) \n\t"
|
490 |
"movdqa %%xmm3, (%2,%4) \n\t"
|
491 |
"subl $4, %0 \n\t"
|
492 |
"lea (%1,%3,4), %1 \n\t"
|
493 |
"lea (%2,%3,4), %2 \n\t"
|
494 |
"jnz 1b \n\t"
|
495 |
: "+g"(h), "+r" (pixels), "+r" (block) |
496 |
: "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) |
497 |
: "memory"
|
498 |
); |
499 |
} |
500 |
|
501 |
#define CLEAR_BLOCKS(name,n) \
|
502 |
static void name(DCTELEM *blocks)\ |
503 |
{\ |
504 |
__asm__ volatile(\
|
505 |
"pxor %%mm7, %%mm7 \n\t"\
|
506 |
"mov %1, %%"REG_a" \n\t"\ |
507 |
"1: \n\t"\
|
508 |
"movq %%mm7, (%0, %%"REG_a") \n\t"\ |
509 |
"movq %%mm7, 8(%0, %%"REG_a") \n\t"\ |
510 |
"movq %%mm7, 16(%0, %%"REG_a") \n\t"\ |
511 |
"movq %%mm7, 24(%0, %%"REG_a") \n\t"\ |
512 |
"add $32, %%"REG_a" \n\t"\ |
513 |
" js 1b \n\t"\
|
514 |
: : "r" (((uint8_t *)blocks)+128*n),\ |
515 |
"i" (-128*n)\ |
516 |
: "%"REG_a\
|
517 |
);\ |
518 |
} |
519 |
CLEAR_BLOCKS(clear_blocks_mmx, 6)
|
520 |
CLEAR_BLOCKS(clear_block_mmx, 1)
|
521 |
|
522 |
static void clear_block_sse(DCTELEM *block) |
523 |
{ |
524 |
__asm__ volatile(
|
525 |
"xorps %%xmm0, %%xmm0 \n"
|
526 |
"movaps %%xmm0, (%0) \n"
|
527 |
"movaps %%xmm0, 16(%0) \n"
|
528 |
"movaps %%xmm0, 32(%0) \n"
|
529 |
"movaps %%xmm0, 48(%0) \n"
|
530 |
"movaps %%xmm0, 64(%0) \n"
|
531 |
"movaps %%xmm0, 80(%0) \n"
|
532 |
"movaps %%xmm0, 96(%0) \n"
|
533 |
"movaps %%xmm0, 112(%0) \n"
|
534 |
:: "r"(block)
|
535 |
: "memory"
|
536 |
); |
537 |
} |
538 |
|
539 |
static void clear_blocks_sse(DCTELEM *blocks) |
540 |
{\ |
541 |
__asm__ volatile(
|
542 |
"xorps %%xmm0, %%xmm0 \n"
|
543 |
"mov %1, %%"REG_a" \n" |
544 |
"1: \n"
|
545 |
"movaps %%xmm0, (%0, %%"REG_a") \n" |
546 |
"movaps %%xmm0, 16(%0, %%"REG_a") \n" |
547 |
"movaps %%xmm0, 32(%0, %%"REG_a") \n" |
548 |
"movaps %%xmm0, 48(%0, %%"REG_a") \n" |
549 |
"movaps %%xmm0, 64(%0, %%"REG_a") \n" |
550 |
"movaps %%xmm0, 80(%0, %%"REG_a") \n" |
551 |
"movaps %%xmm0, 96(%0, %%"REG_a") \n" |
552 |
"movaps %%xmm0, 112(%0, %%"REG_a") \n" |
553 |
"add $128, %%"REG_a" \n" |
554 |
" js 1b \n"
|
555 |
: : "r" (((uint8_t *)blocks)+128*6), |
556 |
"i" (-128*6) |
557 |
: "%"REG_a
|
558 |
); |
559 |
} |
560 |
|
561 |
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ |
562 |
x86_reg i=0;
|
563 |
__asm__ volatile(
|
564 |
"jmp 2f \n\t"
|
565 |
"1: \n\t"
|
566 |
"movq (%1, %0), %%mm0 \n\t"
|
567 |
"movq (%2, %0), %%mm1 \n\t"
|
568 |
"paddb %%mm0, %%mm1 \n\t"
|
569 |
"movq %%mm1, (%2, %0) \n\t"
|
570 |
"movq 8(%1, %0), %%mm0 \n\t"
|
571 |
"movq 8(%2, %0), %%mm1 \n\t"
|
572 |
"paddb %%mm0, %%mm1 \n\t"
|
573 |
"movq %%mm1, 8(%2, %0) \n\t"
|
574 |
"add $16, %0 \n\t"
|
575 |
"2: \n\t"
|
576 |
"cmp %3, %0 \n\t"
|
577 |
" js 1b \n\t"
|
578 |
: "+r" (i)
|
579 |
: "r"(src), "r"(dst), "r"((x86_reg)w-15) |
580 |
); |
581 |
for(; i<w; i++)
|
582 |
dst[i+0] += src[i+0]; |
583 |
} |
584 |
|
585 |
static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ |
586 |
x86_reg i=0;
|
587 |
__asm__ volatile(
|
588 |
"jmp 2f \n\t"
|
589 |
"1: \n\t"
|
590 |
"movq (%2, %0), %%mm0 \n\t"
|
591 |
"movq 8(%2, %0), %%mm1 \n\t"
|
592 |
"paddb (%3, %0), %%mm0 \n\t"
|
593 |
"paddb 8(%3, %0), %%mm1 \n\t"
|
594 |
"movq %%mm0, (%1, %0) \n\t"
|
595 |
"movq %%mm1, 8(%1, %0) \n\t"
|
596 |
"add $16, %0 \n\t"
|
597 |
"2: \n\t"
|
598 |
"cmp %4, %0 \n\t"
|
599 |
" js 1b \n\t"
|
600 |
: "+r" (i)
|
601 |
: "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15) |
602 |
); |
603 |
for(; i<w; i++)
|
604 |
dst[i] = src1[i] + src2[i]; |
605 |
} |
606 |
|
607 |
#if HAVE_7REGS && HAVE_TEN_OPERANDS
|
608 |
static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) { |
609 |
x86_reg w2 = -w; |
610 |
x86_reg x; |
611 |
int l = *left & 0xff; |
612 |
int tl = *left_top & 0xff; |
613 |
int t;
|
614 |
__asm__ volatile(
|
615 |
"mov %7, %3 \n"
|
616 |
"1: \n"
|
617 |
"movzbl (%3,%4), %2 \n"
|
618 |
"mov %2, %k3 \n"
|
619 |
"sub %b1, %b3 \n"
|
620 |
"add %b0, %b3 \n"
|
621 |
"mov %2, %1 \n"
|
622 |
"cmp %0, %2 \n"
|
623 |
"cmovg %0, %2 \n"
|
624 |
"cmovg %1, %0 \n"
|
625 |
"cmp %k3, %0 \n"
|
626 |
"cmovg %k3, %0 \n"
|
627 |
"mov %7, %3 \n"
|
628 |
"cmp %2, %0 \n"
|
629 |
"cmovl %2, %0 \n"
|
630 |
"add (%6,%4), %b0 \n"
|
631 |
"mov %b0, (%5,%4) \n"
|
632 |
"inc %4 \n"
|
633 |
"jl 1b \n"
|
634 |
:"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2) |
635 |
:"r"(dst+w), "r"(diff+w), "rm"(top+w) |
636 |
); |
637 |
*left = l; |
638 |
*left_top = tl; |
639 |
} |
640 |
#endif
|
641 |
|
642 |
#define H263_LOOP_FILTER \
|
643 |
"pxor %%mm7, %%mm7 \n\t"\
|
644 |
"movq %0, %%mm0 \n\t"\
|
645 |
"movq %0, %%mm1 \n\t"\
|
646 |
"movq %3, %%mm2 \n\t"\
|
647 |
"movq %3, %%mm3 \n\t"\
|
648 |
"punpcklbw %%mm7, %%mm0 \n\t"\
|
649 |
"punpckhbw %%mm7, %%mm1 \n\t"\
|
650 |
"punpcklbw %%mm7, %%mm2 \n\t"\
|
651 |
"punpckhbw %%mm7, %%mm3 \n\t"\
|
652 |
"psubw %%mm2, %%mm0 \n\t"\
|
653 |
"psubw %%mm3, %%mm1 \n\t"\
|
654 |
"movq %1, %%mm2 \n\t"\
|
655 |
"movq %1, %%mm3 \n\t"\
|
656 |
"movq %2, %%mm4 \n\t"\
|
657 |
"movq %2, %%mm5 \n\t"\
|
658 |
"punpcklbw %%mm7, %%mm2 \n\t"\
|
659 |
"punpckhbw %%mm7, %%mm3 \n\t"\
|
660 |
"punpcklbw %%mm7, %%mm4 \n\t"\
|
661 |
"punpckhbw %%mm7, %%mm5 \n\t"\
|
662 |
"psubw %%mm2, %%mm4 \n\t"\
|
663 |
"psubw %%mm3, %%mm5 \n\t"\
|
664 |
"psllw $2, %%mm4 \n\t"\
|
665 |
"psllw $2, %%mm5 \n\t"\
|
666 |
"paddw %%mm0, %%mm4 \n\t"\
|
667 |
"paddw %%mm1, %%mm5 \n\t"\
|
668 |
"pxor %%mm6, %%mm6 \n\t"\
|
669 |
"pcmpgtw %%mm4, %%mm6 \n\t"\
|
670 |
"pcmpgtw %%mm5, %%mm7 \n\t"\
|
671 |
"pxor %%mm6, %%mm4 \n\t"\
|
672 |
"pxor %%mm7, %%mm5 \n\t"\
|
673 |
"psubw %%mm6, %%mm4 \n\t"\
|
674 |
"psubw %%mm7, %%mm5 \n\t"\
|
675 |
"psrlw $3, %%mm4 \n\t"\
|
676 |
"psrlw $3, %%mm5 \n\t"\
|
677 |
"packuswb %%mm5, %%mm4 \n\t"\
|
678 |
"packsswb %%mm7, %%mm6 \n\t"\
|
679 |
"pxor %%mm7, %%mm7 \n\t"\
|
680 |
"movd %4, %%mm2 \n\t"\
|
681 |
"punpcklbw %%mm2, %%mm2 \n\t"\
|
682 |
"punpcklbw %%mm2, %%mm2 \n\t"\
|
683 |
"punpcklbw %%mm2, %%mm2 \n\t"\
|
684 |
"psubusb %%mm4, %%mm2 \n\t"\
|
685 |
"movq %%mm2, %%mm3 \n\t"\
|
686 |
"psubusb %%mm4, %%mm3 \n\t"\
|
687 |
"psubb %%mm3, %%mm2 \n\t"\
|
688 |
"movq %1, %%mm3 \n\t"\
|
689 |
"movq %2, %%mm4 \n\t"\
|
690 |
"pxor %%mm6, %%mm3 \n\t"\
|
691 |
"pxor %%mm6, %%mm4 \n\t"\
|
692 |
"paddusb %%mm2, %%mm3 \n\t"\
|
693 |
"psubusb %%mm2, %%mm4 \n\t"\
|
694 |
"pxor %%mm6, %%mm3 \n\t"\
|
695 |
"pxor %%mm6, %%mm4 \n\t"\
|
696 |
"paddusb %%mm2, %%mm2 \n\t"\
|
697 |
"packsswb %%mm1, %%mm0 \n\t"\
|
698 |
"pcmpgtb %%mm0, %%mm7 \n\t"\
|
699 |
"pxor %%mm7, %%mm0 \n\t"\
|
700 |
"psubb %%mm7, %%mm0 \n\t"\
|
701 |
"movq %%mm0, %%mm1 \n\t"\
|
702 |
"psubusb %%mm2, %%mm0 \n\t"\
|
703 |
"psubb %%mm0, %%mm1 \n\t"\
|
704 |
"pand %5, %%mm1 \n\t"\
|
705 |
"psrlw $2, %%mm1 \n\t"\
|
706 |
"pxor %%mm7, %%mm1 \n\t"\
|
707 |
"psubb %%mm7, %%mm1 \n\t"\
|
708 |
"movq %0, %%mm5 \n\t"\
|
709 |
"movq %3, %%mm6 \n\t"\
|
710 |
"psubb %%mm1, %%mm5 \n\t"\
|
711 |
"paddb %%mm1, %%mm6 \n\t"
|
712 |
|
713 |
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){ |
714 |
if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
|
715 |
const int strength= ff_h263_loop_filter_strength[qscale]; |
716 |
|
717 |
__asm__ volatile(
|
718 |
|
719 |
H263_LOOP_FILTER |
720 |
|
721 |
"movq %%mm3, %1 \n\t"
|
722 |
"movq %%mm4, %2 \n\t"
|
723 |
"movq %%mm5, %0 \n\t"
|
724 |
"movq %%mm6, %3 \n\t"
|
725 |
: "+m" (*(uint64_t*)(src - 2*stride)), |
726 |
"+m" (*(uint64_t*)(src - 1*stride)), |
727 |
"+m" (*(uint64_t*)(src + 0*stride)), |
728 |
"+m" (*(uint64_t*)(src + 1*stride)) |
729 |
: "g" (2*strength), "m"(ff_pb_FC) |
730 |
); |
731 |
} |
732 |
} |
733 |
|
734 |
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ |
735 |
if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
|
736 |
const int strength= ff_h263_loop_filter_strength[qscale]; |
737 |
DECLARE_ALIGNED(8, uint64_t, temp)[4]; |
738 |
uint8_t *btemp= (uint8_t*)temp; |
739 |
|
740 |
src -= 2;
|
741 |
|
742 |
transpose4x4(btemp , src , 8, stride);
|
743 |
transpose4x4(btemp+4, src + 4*stride, 8, stride); |
744 |
__asm__ volatile(
|
745 |
H263_LOOP_FILTER // 5 3 4 6
|
746 |
|
747 |
: "+m" (temp[0]), |
748 |
"+m" (temp[1]), |
749 |
"+m" (temp[2]), |
750 |
"+m" (temp[3]) |
751 |
: "g" (2*strength), "m"(ff_pb_FC) |
752 |
); |
753 |
|
754 |
__asm__ volatile(
|
755 |
"movq %%mm5, %%mm1 \n\t"
|
756 |
"movq %%mm4, %%mm0 \n\t"
|
757 |
"punpcklbw %%mm3, %%mm5 \n\t"
|
758 |
"punpcklbw %%mm6, %%mm4 \n\t"
|
759 |
"punpckhbw %%mm3, %%mm1 \n\t"
|
760 |
"punpckhbw %%mm6, %%mm0 \n\t"
|
761 |
"movq %%mm5, %%mm3 \n\t"
|
762 |
"movq %%mm1, %%mm6 \n\t"
|
763 |
"punpcklwd %%mm4, %%mm5 \n\t"
|
764 |
"punpcklwd %%mm0, %%mm1 \n\t"
|
765 |
"punpckhwd %%mm4, %%mm3 \n\t"
|
766 |
"punpckhwd %%mm0, %%mm6 \n\t"
|
767 |
"movd %%mm5, (%0) \n\t"
|
768 |
"punpckhdq %%mm5, %%mm5 \n\t"
|
769 |
"movd %%mm5, (%0,%2) \n\t"
|
770 |
"movd %%mm3, (%0,%2,2) \n\t"
|
771 |
"punpckhdq %%mm3, %%mm3 \n\t"
|
772 |
"movd %%mm3, (%0,%3) \n\t"
|
773 |
"movd %%mm1, (%1) \n\t"
|
774 |
"punpckhdq %%mm1, %%mm1 \n\t"
|
775 |
"movd %%mm1, (%1,%2) \n\t"
|
776 |
"movd %%mm6, (%1,%2,2) \n\t"
|
777 |
"punpckhdq %%mm6, %%mm6 \n\t"
|
778 |
"movd %%mm6, (%1,%3) \n\t"
|
779 |
:: "r" (src),
|
780 |
"r" (src + 4*stride), |
781 |
"r" ((x86_reg) stride ),
|
782 |
"r" ((x86_reg)(3*stride)) |
783 |
); |
784 |
} |
785 |
} |
786 |
|
787 |
/* draw the edges of width 'w' of an image of size width, height
|
788 |
this mmx version can only handle w==8 || w==16 */
|
789 |
static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w) |
790 |
{ |
791 |
uint8_t *ptr, *last_line; |
792 |
int i;
|
793 |
|
794 |
last_line = buf + (height - 1) * wrap;
|
795 |
/* left and right */
|
796 |
ptr = buf; |
797 |
if(w==8) |
798 |
{ |
799 |
__asm__ volatile(
|
800 |
"1: \n\t"
|
801 |
"movd (%0), %%mm0 \n\t"
|
802 |
"punpcklbw %%mm0, %%mm0 \n\t"
|
803 |
"punpcklwd %%mm0, %%mm0 \n\t"
|
804 |
"punpckldq %%mm0, %%mm0 \n\t"
|
805 |
"movq %%mm0, -8(%0) \n\t"
|
806 |
"movq -8(%0, %2), %%mm1 \n\t"
|
807 |
"punpckhbw %%mm1, %%mm1 \n\t"
|
808 |
"punpckhwd %%mm1, %%mm1 \n\t"
|
809 |
"punpckhdq %%mm1, %%mm1 \n\t"
|
810 |
"movq %%mm1, (%0, %2) \n\t"
|
811 |
"add %1, %0 \n\t"
|
812 |
"cmp %3, %0 \n\t"
|
813 |
" jb 1b \n\t"
|
814 |
: "+r" (ptr)
|
815 |
: "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) |
816 |
); |
817 |
} |
818 |
else
|
819 |
{ |
820 |
__asm__ volatile(
|
821 |
"1: \n\t"
|
822 |
"movd (%0), %%mm0 \n\t"
|
823 |
"punpcklbw %%mm0, %%mm0 \n\t"
|
824 |
"punpcklwd %%mm0, %%mm0 \n\t"
|
825 |
"punpckldq %%mm0, %%mm0 \n\t"
|
826 |
"movq %%mm0, -8(%0) \n\t"
|
827 |
"movq %%mm0, -16(%0) \n\t"
|
828 |
"movq -8(%0, %2), %%mm1 \n\t"
|
829 |
"punpckhbw %%mm1, %%mm1 \n\t"
|
830 |
"punpckhwd %%mm1, %%mm1 \n\t"
|
831 |
"punpckhdq %%mm1, %%mm1 \n\t"
|
832 |
"movq %%mm1, (%0, %2) \n\t"
|
833 |
"movq %%mm1, 8(%0, %2) \n\t"
|
834 |
"add %1, %0 \n\t"
|
835 |
"cmp %3, %0 \n\t"
|
836 |
" jb 1b \n\t"
|
837 |
: "+r" (ptr)
|
838 |
: "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) |
839 |
); |
840 |
} |
841 |
|
842 |
for(i=0;i<w;i+=4) { |
843 |
/* top and bottom (and hopefully also the corners) */
|
844 |
ptr= buf - (i + 1) * wrap - w;
|
845 |
__asm__ volatile(
|
846 |
"1: \n\t"
|
847 |
"movq (%1, %0), %%mm0 \n\t"
|
848 |
"movq %%mm0, (%0) \n\t"
|
849 |
"movq %%mm0, (%0, %2) \n\t"
|
850 |
"movq %%mm0, (%0, %2, 2) \n\t"
|
851 |
"movq %%mm0, (%0, %3) \n\t"
|
852 |
"add $8, %0 \n\t"
|
853 |
"cmp %4, %0 \n\t"
|
854 |
" jb 1b \n\t"
|
855 |
: "+r" (ptr)
|
856 |
: "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w) |
857 |
); |
858 |
ptr= last_line + (i + 1) * wrap - w;
|
859 |
__asm__ volatile(
|
860 |
"1: \n\t"
|
861 |
"movq (%1, %0), %%mm0 \n\t"
|
862 |
"movq %%mm0, (%0) \n\t"
|
863 |
"movq %%mm0, (%0, %2) \n\t"
|
864 |
"movq %%mm0, (%0, %2, 2) \n\t"
|
865 |
"movq %%mm0, (%0, %3) \n\t"
|
866 |
"add $8, %0 \n\t"
|
867 |
"cmp %4, %0 \n\t"
|
868 |
" jb 1b \n\t"
|
869 |
: "+r" (ptr)
|
870 |
: "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w) |
871 |
); |
872 |
} |
873 |
} |
874 |
|
875 |
#define PAETH(cpu, abs3)\
|
876 |
static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\ |
877 |
{\ |
878 |
x86_reg i = -bpp;\ |
879 |
x86_reg end = w-3;\
|
880 |
__asm__ volatile(\
|
881 |
"pxor %%mm7, %%mm7 \n"\
|
882 |
"movd (%1,%0), %%mm0 \n"\
|
883 |
"movd (%2,%0), %%mm1 \n"\
|
884 |
"punpcklbw %%mm7, %%mm0 \n"\
|
885 |
"punpcklbw %%mm7, %%mm1 \n"\
|
886 |
"add %4, %0 \n"\
|
887 |
"1: \n"\
|
888 |
"movq %%mm1, %%mm2 \n"\
|
889 |
"movd (%2,%0), %%mm1 \n"\
|
890 |
"movq %%mm2, %%mm3 \n"\
|
891 |
"punpcklbw %%mm7, %%mm1 \n"\
|
892 |
"movq %%mm2, %%mm4 \n"\
|
893 |
"psubw %%mm1, %%mm3 \n"\
|
894 |
"psubw %%mm0, %%mm4 \n"\
|
895 |
"movq %%mm3, %%mm5 \n"\
|
896 |
"paddw %%mm4, %%mm5 \n"\
|
897 |
abs3\ |
898 |
"movq %%mm4, %%mm6 \n"\
|
899 |
"pminsw %%mm5, %%mm6 \n"\
|
900 |
"pcmpgtw %%mm6, %%mm3 \n"\
|
901 |
"pcmpgtw %%mm5, %%mm4 \n"\
|
902 |
"movq %%mm4, %%mm6 \n"\
|
903 |
"pand %%mm3, %%mm4 \n"\
|
904 |
"pandn %%mm3, %%mm6 \n"\
|
905 |
"pandn %%mm0, %%mm3 \n"\
|
906 |
"movd (%3,%0), %%mm0 \n"\
|
907 |
"pand %%mm1, %%mm6 \n"\
|
908 |
"pand %%mm4, %%mm2 \n"\
|
909 |
"punpcklbw %%mm7, %%mm0 \n"\
|
910 |
"movq %6, %%mm5 \n"\
|
911 |
"paddw %%mm6, %%mm0 \n"\
|
912 |
"paddw %%mm2, %%mm3 \n"\
|
913 |
"paddw %%mm3, %%mm0 \n"\
|
914 |
"pand %%mm5, %%mm0 \n"\
|
915 |
"movq %%mm0, %%mm3 \n"\
|
916 |
"packuswb %%mm3, %%mm3 \n"\
|
917 |
"movd %%mm3, (%1,%0) \n"\
|
918 |
"add %4, %0 \n"\
|
919 |
"cmp %5, %0 \n"\
|
920 |
"jle 1b \n"\
|
921 |
:"+r"(i)\
|
922 |
:"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\ |
923 |
"m"(ff_pw_255)\
|
924 |
:"memory"\
|
925 |
);\ |
926 |
} |
927 |
|
928 |
#define ABS3_MMX2\
|
929 |
"psubw %%mm5, %%mm7 \n"\
|
930 |
"pmaxsw %%mm7, %%mm5 \n"\
|
931 |
"pxor %%mm6, %%mm6 \n"\
|
932 |
"pxor %%mm7, %%mm7 \n"\
|
933 |
"psubw %%mm3, %%mm6 \n"\
|
934 |
"psubw %%mm4, %%mm7 \n"\
|
935 |
"pmaxsw %%mm6, %%mm3 \n"\
|
936 |
"pmaxsw %%mm7, %%mm4 \n"\
|
937 |
"pxor %%mm7, %%mm7 \n"
|
938 |
|
939 |
#define ABS3_SSSE3\
|
940 |
"pabsw %%mm3, %%mm3 \n"\
|
941 |
"pabsw %%mm4, %%mm4 \n"\
|
942 |
"pabsw %%mm5, %%mm5 \n"
|
943 |
|
944 |
PAETH(mmx2, ABS3_MMX2) |
945 |
#if HAVE_SSSE3
|
946 |
PAETH(ssse3, ABS3_SSSE3) |
947 |
#endif
|
948 |
|
949 |
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
|
950 |
"paddw " #m4 ", " #m3 " \n\t" /* x1 */\ |
951 |
"movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\ |
952 |
"pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\ |
953 |
"movq "#in7", " #m3 " \n\t" /* d */\ |
954 |
"movq "#in0", %%mm5 \n\t" /* D */\ |
955 |
"paddw " #m3 ", %%mm5 \n\t" /* x4 */\ |
956 |
"psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\ |
957 |
"movq "#in1", %%mm5 \n\t" /* C */\ |
958 |
"movq "#in2", %%mm6 \n\t" /* B */\ |
959 |
"paddw " #m6 ", %%mm5 \n\t" /* x3 */\ |
960 |
"paddw " #m5 ", %%mm6 \n\t" /* x2 */\ |
961 |
"paddw %%mm6, %%mm6 \n\t" /* 2x2 */\ |
962 |
"psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\ |
963 |
"pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\ |
964 |
"paddw " #rnd ", %%mm4 \n\t" /* x2 */\ |
965 |
"paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\ |
966 |
"psraw $5, %%mm5 \n\t"\
|
967 |
"packuswb %%mm5, %%mm5 \n\t"\
|
968 |
OP(%%mm5, out, %%mm7, d) |
969 |
|
970 |
#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
|
971 |
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ |
972 |
uint64_t temp;\ |
973 |
\ |
974 |
__asm__ volatile(\
|
975 |
"pxor %%mm7, %%mm7 \n\t"\
|
976 |
"1: \n\t"\
|
977 |
"movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ |
978 |
"movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ |
979 |
"movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ |
980 |
"punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ |
981 |
"punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ |
982 |
"pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ |
983 |
"pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ |
984 |
"movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ |
985 |
"movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ |
986 |
"psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ |
987 |
"psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ |
988 |
"psllq $24, %%mm4 \n\t" /* 000ABCDE */\ |
989 |
"punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ |
990 |
"punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ |
991 |
"punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ |
992 |
"paddw %%mm3, %%mm5 \n\t" /* b */\ |
993 |
"paddw %%mm2, %%mm6 \n\t" /* c */\ |
994 |
"paddw %%mm5, %%mm5 \n\t" /* 2b */\ |
995 |
"psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ |
996 |
"pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ |
997 |
"pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ |
998 |
"paddw %%mm4, %%mm0 \n\t" /* a */\ |
999 |
"paddw %%mm1, %%mm5 \n\t" /* d */\ |
1000 |
"pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ |
1001 |
"psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ |
1002 |
"paddw %6, %%mm6 \n\t"\
|
1003 |
"paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ |
1004 |
"psraw $5, %%mm0 \n\t"\
|
1005 |
"movq %%mm0, %5 \n\t"\
|
1006 |
/* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
|
1007 |
\ |
1008 |
"movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\ |
1009 |
"movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\ |
1010 |
"movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\ |
1011 |
"psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\ |
1012 |
"psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\ |
1013 |
"punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\ |
1014 |
"punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\ |
1015 |
"paddw %%mm0, %%mm2 \n\t" /* b */\ |
1016 |
"paddw %%mm5, %%mm3 \n\t" /* c */\ |
1017 |
"paddw %%mm2, %%mm2 \n\t" /* 2b */\ |
1018 |
"psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ |
1019 |
"movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\ |
1020 |
"psrlq $24, %%mm6 \n\t" /* IJKLM000 */\ |
1021 |
"punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\ |
1022 |
"punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\ |
1023 |
"pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ |
1024 |
"paddw %%mm2, %%mm1 \n\t" /* a */\ |
1025 |
"paddw %%mm6, %%mm4 \n\t" /* d */\ |
1026 |
"pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ |
1027 |
"psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\ |
1028 |
"paddw %6, %%mm1 \n\t"\
|
1029 |
"paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\ |
1030 |
"psraw $5, %%mm3 \n\t"\
|
1031 |
"movq %5, %%mm1 \n\t"\
|
1032 |
"packuswb %%mm3, %%mm1 \n\t"\
|
1033 |
OP_MMX2(%%mm1, (%1),%%mm4, q)\
|
1034 |
/* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
|
1035 |
\ |
1036 |
"movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\ |
1037 |
"movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\ |
1038 |
"movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\ |
1039 |
"psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\ |
1040 |
"psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\ |
1041 |
"punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\ |
1042 |
"punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\ |
1043 |
"paddw %%mm1, %%mm5 \n\t" /* b */\ |
1044 |
"paddw %%mm4, %%mm0 \n\t" /* c */\ |
1045 |
"paddw %%mm5, %%mm5 \n\t" /* 2b */\ |
1046 |
"psubw %%mm5, %%mm0 \n\t" /* c - 2b */\ |
1047 |
"movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\ |
1048 |
"psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\ |
1049 |
"pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\ |
1050 |
"punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\ |
1051 |
"paddw %%mm3, %%mm2 \n\t" /* d */\ |
1052 |
"psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\ |
1053 |
"movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\ |
1054 |
"punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\ |
1055 |
"punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\ |
1056 |
"paddw %%mm2, %%mm6 \n\t" /* a */\ |
1057 |
"pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\ |
1058 |
"paddw %6, %%mm0 \n\t"\
|
1059 |
"paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ |
1060 |
"psraw $5, %%mm0 \n\t"\
|
1061 |
/* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
|
1062 |
\ |
1063 |
"paddw %%mm5, %%mm3 \n\t" /* a */\ |
1064 |
"pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\ |
1065 |
"paddw %%mm4, %%mm6 \n\t" /* b */\ |
1066 |
"pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\ |
1067 |
"pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\ |
1068 |
"paddw %%mm1, %%mm4 \n\t" /* c */\ |
1069 |
"paddw %%mm2, %%mm5 \n\t" /* d */\ |
1070 |
"paddw %%mm6, %%mm6 \n\t" /* 2b */\ |
1071 |
"psubw %%mm6, %%mm4 \n\t" /* c - 2b */\ |
1072 |
"pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\ |
1073 |
"pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\ |
1074 |
"psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\ |
1075 |
"paddw %6, %%mm4 \n\t"\
|
1076 |
"paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\ |
1077 |
"psraw $5, %%mm4 \n\t"\
|
1078 |
"packuswb %%mm4, %%mm0 \n\t"\
|
1079 |
OP_MMX2(%%mm0, 8(%1), %%mm4, q)\ |
1080 |
\ |
1081 |
"add %3, %0 \n\t"\
|
1082 |
"add %4, %1 \n\t"\
|
1083 |
"decl %2 \n\t"\
|
1084 |
" jnz 1b \n\t"\
|
1085 |
: "+a"(src), "+c"(dst), "+D"(h)\ |
1086 |
: "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\ |
1087 |
: "memory"\
|
1088 |
);\ |
1089 |
}\ |
1090 |
\ |
1091 |
static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ |
1092 |
int i;\
|
1093 |
int16_t temp[16];\
|
1094 |
/* quick HACK, XXX FIXME MUST be optimized */\
|
1095 |
for(i=0; i<h; i++)\ |
1096 |
{\ |
1097 |
temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ |
1098 |
temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ |
1099 |
temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ |
1100 |
temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ |
1101 |
temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ |
1102 |
temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\ |
1103 |
temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\ |
1104 |
temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\ |
1105 |
temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\ |
1106 |
temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\ |
1107 |
temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\ |
1108 |
temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\ |
1109 |
temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\ |
1110 |
temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\ |
1111 |
temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\ |
1112 |
temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\ |
1113 |
__asm__ volatile(\
|
1114 |
"movq (%0), %%mm0 \n\t"\
|
1115 |
"movq 8(%0), %%mm1 \n\t"\
|
1116 |
"paddw %2, %%mm0 \n\t"\
|
1117 |
"paddw %2, %%mm1 \n\t"\
|
1118 |
"psraw $5, %%mm0 \n\t"\
|
1119 |
"psraw $5, %%mm1 \n\t"\
|
1120 |
"packuswb %%mm1, %%mm0 \n\t"\
|
1121 |
OP_3DNOW(%%mm0, (%1), %%mm1, q)\
|
1122 |
"movq 16(%0), %%mm0 \n\t"\
|
1123 |
"movq 24(%0), %%mm1 \n\t"\
|
1124 |
"paddw %2, %%mm0 \n\t"\
|
1125 |
"paddw %2, %%mm1 \n\t"\
|
1126 |
"psraw $5, %%mm0 \n\t"\
|
1127 |
"psraw $5, %%mm1 \n\t"\
|
1128 |
"packuswb %%mm1, %%mm0 \n\t"\
|
1129 |
OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\ |
1130 |
:: "r"(temp), "r"(dst), "m"(ROUNDER)\ |
1131 |
: "memory"\
|
1132 |
);\ |
1133 |
dst+=dstStride;\ |
1134 |
src+=srcStride;\ |
1135 |
}\ |
1136 |
}\ |
1137 |
\ |
1138 |
static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ |
1139 |
__asm__ volatile(\
|
1140 |
"pxor %%mm7, %%mm7 \n\t"\
|
1141 |
"1: \n\t"\
|
1142 |
"movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ |
1143 |
"movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ |
1144 |
"movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ |
1145 |
"punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ |
1146 |
"punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ |
1147 |
"pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ |
1148 |
"pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ |
1149 |
"movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ |
1150 |
"movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ |
1151 |
"psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ |
1152 |
"psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ |
1153 |
"psllq $24, %%mm4 \n\t" /* 000ABCDE */\ |
1154 |
"punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ |
1155 |
"punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ |
1156 |
"punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ |
1157 |
"paddw %%mm3, %%mm5 \n\t" /* b */\ |
1158 |
"paddw %%mm2, %%mm6 \n\t" /* c */\ |
1159 |
"paddw %%mm5, %%mm5 \n\t" /* 2b */\ |
1160 |
"psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ |
1161 |
"pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ |
1162 |
"pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ |
1163 |
"paddw %%mm4, %%mm0 \n\t" /* a */\ |
1164 |
"paddw %%mm1, %%mm5 \n\t" /* d */\ |
1165 |
"pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ |
1166 |
"psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ |
1167 |
"paddw %5, %%mm6 \n\t"\
|
1168 |
"paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ |
1169 |
"psraw $5, %%mm0 \n\t"\
|
1170 |
/* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
|
1171 |
\ |
1172 |
"movd 5(%0), %%mm5 \n\t" /* FGHI */\ |
1173 |
"punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\ |
1174 |
"pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\ |
1175 |
"paddw %%mm5, %%mm1 \n\t" /* a */\ |
1176 |
"paddw %%mm6, %%mm2 \n\t" /* b */\ |
1177 |
"pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\ |
1178 |
"pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\ |
1179 |
"paddw %%mm6, %%mm3 \n\t" /* c */\ |
1180 |
"paddw %%mm5, %%mm4 \n\t" /* d */\ |
1181 |
"paddw %%mm2, %%mm2 \n\t" /* 2b */\ |
1182 |
"psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ |
1183 |
"pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ |
1184 |
"pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ |
1185 |
"psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\ |
1186 |
"paddw %5, %%mm1 \n\t"\
|
1187 |
"paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\ |
1188 |
"psraw $5, %%mm3 \n\t"\
|
1189 |
"packuswb %%mm3, %%mm0 \n\t"\
|
1190 |
OP_MMX2(%%mm0, (%1), %%mm4, q)\
|
1191 |
\ |
1192 |
"add %3, %0 \n\t"\
|
1193 |
"add %4, %1 \n\t"\
|
1194 |
"decl %2 \n\t"\
|
1195 |
" jnz 1b \n\t"\
|
1196 |
: "+a"(src), "+c"(dst), "+d"(h)\ |
1197 |
: "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\ |
1198 |
: "memory"\
|
1199 |
);\ |
1200 |
}\ |
1201 |
\ |
1202 |
static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ |
1203 |
int i;\
|
1204 |
int16_t temp[8];\
|
1205 |
/* quick HACK, XXX FIXME MUST be optimized */\
|
1206 |
for(i=0; i<h; i++)\ |
1207 |
{\ |
1208 |
temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ |
1209 |
temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ |
1210 |
temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ |
1211 |
temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ |
1212 |
temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ |
1213 |
temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\ |
1214 |
temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\ |
1215 |
temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\ |
1216 |
__asm__ volatile(\
|
1217 |
"movq (%0), %%mm0 \n\t"\
|
1218 |
"movq 8(%0), %%mm1 \n\t"\
|
1219 |
"paddw %2, %%mm0 \n\t"\
|
1220 |
"paddw %2, %%mm1 \n\t"\
|
1221 |
"psraw $5, %%mm0 \n\t"\
|
1222 |
"psraw $5, %%mm1 \n\t"\
|
1223 |
"packuswb %%mm1, %%mm0 \n\t"\
|
1224 |
OP_3DNOW(%%mm0, (%1), %%mm1, q)\
|
1225 |
:: "r"(temp), "r"(dst), "m"(ROUNDER)\ |
1226 |
:"memory"\
|
1227 |
);\ |
1228 |
dst+=dstStride;\ |
1229 |
src+=srcStride;\ |
1230 |
}\ |
1231 |
} |
1232 |
|
1233 |
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
|
1234 |
\ |
1235 |
static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
1236 |
uint64_t temp[17*4];\ |
1237 |
uint64_t *temp_ptr= temp;\ |
1238 |
int count= 17;\ |
1239 |
\ |
1240 |
/*FIXME unroll */\
|
1241 |
__asm__ volatile(\
|
1242 |
"pxor %%mm7, %%mm7 \n\t"\
|
1243 |
"1: \n\t"\
|
1244 |
"movq (%0), %%mm0 \n\t"\
|
1245 |
"movq (%0), %%mm1 \n\t"\
|
1246 |
"movq 8(%0), %%mm2 \n\t"\
|
1247 |
"movq 8(%0), %%mm3 \n\t"\
|
1248 |
"punpcklbw %%mm7, %%mm0 \n\t"\
|
1249 |
"punpckhbw %%mm7, %%mm1 \n\t"\
|
1250 |
"punpcklbw %%mm7, %%mm2 \n\t"\
|
1251 |
"punpckhbw %%mm7, %%mm3 \n\t"\
|
1252 |
"movq %%mm0, (%1) \n\t"\
|
1253 |
"movq %%mm1, 17*8(%1) \n\t"\
|
1254 |
"movq %%mm2, 2*17*8(%1) \n\t"\
|
1255 |
"movq %%mm3, 3*17*8(%1) \n\t"\
|
1256 |
"add $8, %1 \n\t"\
|
1257 |
"add %3, %0 \n\t"\
|
1258 |
"decl %2 \n\t"\
|
1259 |
" jnz 1b \n\t"\
|
1260 |
: "+r" (src), "+r" (temp_ptr), "+r"(count)\ |
1261 |
: "r" ((x86_reg)srcStride)\
|
1262 |
: "memory"\
|
1263 |
);\ |
1264 |
\ |
1265 |
temp_ptr= temp;\ |
1266 |
count=4;\
|
1267 |
\ |
1268 |
/*FIXME reorder for speed */\
|
1269 |
__asm__ volatile(\
|
1270 |
/*"pxor %%mm7, %%mm7 \n\t"*/\
|
1271 |
"1: \n\t"\
|
1272 |
"movq (%0), %%mm0 \n\t"\
|
1273 |
"movq 8(%0), %%mm1 \n\t"\
|
1274 |
"movq 16(%0), %%mm2 \n\t"\
|
1275 |
"movq 24(%0), %%mm3 \n\t"\
|
1276 |
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ |
1277 |
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ |
1278 |
"add %4, %1 \n\t"\
|
1279 |
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ |
1280 |
\ |
1281 |
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ |
1282 |
"add %4, %1 \n\t"\
|
1283 |
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ |
1284 |
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\ |
1285 |
"add %4, %1 \n\t"\
|
1286 |
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\ |
1287 |
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\ |
1288 |
"add %4, %1 \n\t"\
|
1289 |
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\ |
1290 |
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\ |
1291 |
"add %4, %1 \n\t"\
|
1292 |
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\ |
1293 |
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\ |
1294 |
"add %4, %1 \n\t"\
|
1295 |
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\ |
1296 |
\ |
1297 |
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\ |
1298 |
"add %4, %1 \n\t" \
|
1299 |
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\ |
1300 |
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\ |
1301 |
\ |
1302 |
"add $136, %0 \n\t"\
|
1303 |
"add %6, %1 \n\t"\
|
1304 |
"decl %2 \n\t"\
|
1305 |
" jnz 1b \n\t"\
|
1306 |
\ |
1307 |
: "+r"(temp_ptr), "+r"(dst), "+g"(count)\ |
1308 |
: "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\ |
1309 |
:"memory"\
|
1310 |
);\ |
1311 |
}\ |
1312 |
\ |
1313 |
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
1314 |
uint64_t temp[9*2];\ |
1315 |
uint64_t *temp_ptr= temp;\ |
1316 |
int count= 9;\ |
1317 |
\ |
1318 |
/*FIXME unroll */\
|
1319 |
__asm__ volatile(\
|
1320 |
"pxor %%mm7, %%mm7 \n\t"\
|
1321 |
"1: \n\t"\
|
1322 |
"movq (%0), %%mm0 \n\t"\
|
1323 |
"movq (%0), %%mm1 \n\t"\
|
1324 |
"punpcklbw %%mm7, %%mm0 \n\t"\
|
1325 |
"punpckhbw %%mm7, %%mm1 \n\t"\
|
1326 |
"movq %%mm0, (%1) \n\t"\
|
1327 |
"movq %%mm1, 9*8(%1) \n\t"\
|
1328 |
"add $8, %1 \n\t"\
|
1329 |
"add %3, %0 \n\t"\
|
1330 |
"decl %2 \n\t"\
|
1331 |
" jnz 1b \n\t"\
|
1332 |
: "+r" (src), "+r" (temp_ptr), "+r"(count)\ |
1333 |
: "r" ((x86_reg)srcStride)\
|
1334 |
: "memory"\
|
1335 |
);\ |
1336 |
\ |
1337 |
temp_ptr= temp;\ |
1338 |
count=2;\
|
1339 |
\ |
1340 |
/*FIXME reorder for speed */\
|
1341 |
__asm__ volatile(\
|
1342 |
/*"pxor %%mm7, %%mm7 \n\t"*/\
|
1343 |
"1: \n\t"\
|
1344 |
"movq (%0), %%mm0 \n\t"\
|
1345 |
"movq 8(%0), %%mm1 \n\t"\
|
1346 |
"movq 16(%0), %%mm2 \n\t"\
|
1347 |
"movq 24(%0), %%mm3 \n\t"\
|
1348 |
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ |
1349 |
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ |
1350 |
"add %4, %1 \n\t"\
|
1351 |
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ |
1352 |
\ |
1353 |
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ |
1354 |
"add %4, %1 \n\t"\
|
1355 |
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ |
1356 |
\ |
1357 |
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\ |
1358 |
"add %4, %1 \n\t"\
|
1359 |
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\ |
1360 |
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\ |
1361 |
\ |
1362 |
"add $72, %0 \n\t"\
|
1363 |
"add %6, %1 \n\t"\
|
1364 |
"decl %2 \n\t"\
|
1365 |
" jnz 1b \n\t"\
|
1366 |
\ |
1367 |
: "+r"(temp_ptr), "+r"(dst), "+g"(count)\ |
1368 |
: "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\ |
1369 |
: "memory"\
|
1370 |
);\ |
1371 |
}\ |
1372 |
\ |
1373 |
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ |
1374 |
OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\ |
1375 |
}\ |
1376 |
\ |
1377 |
static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1378 |
uint64_t temp[8];\
|
1379 |
uint8_t * const half= (uint8_t*)temp;\
|
1380 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ |
1381 |
OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ |
1382 |
}\ |
1383 |
\ |
1384 |
static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1385 |
OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\ |
1386 |
}\ |
1387 |
\ |
1388 |
static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1389 |
uint64_t temp[8];\
|
1390 |
uint8_t * const half= (uint8_t*)temp;\
|
1391 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ |
1392 |
OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\ |
1393 |
}\ |
1394 |
\ |
1395 |
static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1396 |
uint64_t temp[8];\
|
1397 |
uint8_t * const half= (uint8_t*)temp;\
|
1398 |
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ |
1399 |
OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ |
1400 |
}\ |
1401 |
\ |
1402 |
static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1403 |
OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\ |
1404 |
}\ |
1405 |
\ |
1406 |
static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1407 |
uint64_t temp[8];\
|
1408 |
uint8_t * const half= (uint8_t*)temp;\
|
1409 |
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ |
1410 |
OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\ |
1411 |
}\ |
1412 |
static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1413 |
uint64_t half[8 + 9];\ |
1414 |
uint8_t * const halfH= ((uint8_t*)half) + 64;\ |
1415 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1416 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
1417 |
put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ |
1418 |
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
1419 |
OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ |
1420 |
}\ |
1421 |
static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1422 |
uint64_t half[8 + 9];\ |
1423 |
uint8_t * const halfH= ((uint8_t*)half) + 64;\ |
1424 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1425 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
1426 |
put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ |
1427 |
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
1428 |
OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ |
1429 |
}\ |
1430 |
static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1431 |
uint64_t half[8 + 9];\ |
1432 |
uint8_t * const halfH= ((uint8_t*)half) + 64;\ |
1433 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1434 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
1435 |
put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ |
1436 |
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
1437 |
OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ |
1438 |
}\ |
1439 |
static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1440 |
uint64_t half[8 + 9];\ |
1441 |
uint8_t * const halfH= ((uint8_t*)half) + 64;\ |
1442 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1443 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
1444 |
put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ |
1445 |
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
1446 |
OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ |
1447 |
}\ |
1448 |
static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1449 |
uint64_t half[8 + 9];\ |
1450 |
uint8_t * const halfH= ((uint8_t*)half) + 64;\ |
1451 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1452 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
1453 |
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
1454 |
OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ |
1455 |
}\ |
1456 |
static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1457 |
uint64_t half[8 + 9];\ |
1458 |
uint8_t * const halfH= ((uint8_t*)half) + 64;\ |
1459 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1460 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
1461 |
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
1462 |
OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ |
1463 |
}\ |
1464 |
static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1465 |
uint64_t half[8 + 9];\ |
1466 |
uint8_t * const halfH= ((uint8_t*)half);\
|
1467 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
1468 |
put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ |
1469 |
OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ |
1470 |
}\ |
1471 |
static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1472 |
uint64_t half[8 + 9];\ |
1473 |
uint8_t * const halfH= ((uint8_t*)half);\
|
1474 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
1475 |
put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ |
1476 |
OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ |
1477 |
}\ |
1478 |
static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1479 |
uint64_t half[9];\
|
1480 |
uint8_t * const halfH= ((uint8_t*)half);\
|
1481 |
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
1482 |
OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ |
1483 |
}\ |
1484 |
static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ |
1485 |
OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\ |
1486 |
}\ |
1487 |
\ |
1488 |
static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1489 |
uint64_t temp[32];\
|
1490 |
uint8_t * const half= (uint8_t*)temp;\
|
1491 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ |
1492 |
OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ |
1493 |
}\ |
1494 |
\ |
1495 |
static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1496 |
OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\ |
1497 |
}\ |
1498 |
\ |
1499 |
static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1500 |
uint64_t temp[32];\
|
1501 |
uint8_t * const half= (uint8_t*)temp;\
|
1502 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ |
1503 |
OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\ |
1504 |
}\ |
1505 |
\ |
1506 |
static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1507 |
uint64_t temp[32];\
|
1508 |
uint8_t * const half= (uint8_t*)temp;\
|
1509 |
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ |
1510 |
OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ |
1511 |
}\ |
1512 |
\ |
1513 |
static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1514 |
OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\ |
1515 |
}\ |
1516 |
\ |
1517 |
static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1518 |
uint64_t temp[32];\
|
1519 |
uint8_t * const half= (uint8_t*)temp;\
|
1520 |
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ |
1521 |
OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\ |
1522 |
}\ |
1523 |
static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1524 |
uint64_t half[16*2 + 17*2];\ |
1525 |
uint8_t * const halfH= ((uint8_t*)half) + 256;\ |
1526 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1527 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
1528 |
put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ |
1529 |
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
1530 |
OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ |
1531 |
}\ |
1532 |
static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1533 |
uint64_t half[16*2 + 17*2];\ |
1534 |
uint8_t * const halfH= ((uint8_t*)half) + 256;\ |
1535 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1536 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
1537 |
put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ |
1538 |
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
1539 |
OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ |
1540 |
}\ |
1541 |
static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1542 |
uint64_t half[16*2 + 17*2];\ |
1543 |
uint8_t * const halfH= ((uint8_t*)half) + 256;\ |
1544 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1545 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
1546 |
put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ |
1547 |
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
1548 |
OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ |
1549 |
}\ |
1550 |
static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1551 |
uint64_t half[16*2 + 17*2];\ |
1552 |
uint8_t * const halfH= ((uint8_t*)half) + 256;\ |
1553 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1554 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
1555 |
put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ |
1556 |
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
1557 |
OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ |
1558 |
}\ |
1559 |
static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1560 |
uint64_t half[16*2 + 17*2];\ |
1561 |
uint8_t * const halfH= ((uint8_t*)half) + 256;\ |
1562 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1563 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
1564 |
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
1565 |
OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ |
1566 |
}\ |
1567 |
static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1568 |
uint64_t half[16*2 + 17*2];\ |
1569 |
uint8_t * const halfH= ((uint8_t*)half) + 256;\ |
1570 |
uint8_t * const halfHV= ((uint8_t*)half);\
|
1571 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
1572 |
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
1573 |
OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ |
1574 |
}\ |
1575 |
static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1576 |
uint64_t half[17*2];\ |
1577 |
uint8_t * const halfH= ((uint8_t*)half);\
|
1578 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
1579 |
put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ |
1580 |
OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ |
1581 |
}\ |
1582 |
static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1583 |
uint64_t half[17*2];\ |
1584 |
uint8_t * const halfH= ((uint8_t*)half);\
|
1585 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
1586 |
put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ |
1587 |
OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ |
1588 |
}\ |
1589 |
static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1590 |
uint64_t half[17*2];\ |
1591 |
uint8_t * const halfH= ((uint8_t*)half);\
|
1592 |
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
1593 |
OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ |
1594 |
} |
1595 |
|
1596 |
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" |
1597 |
#define AVG_3DNOW_OP(a,b,temp, size) \
|
1598 |
"mov" #size " " #b ", " #temp " \n\t"\ |
1599 |
"pavgusb " #temp ", " #a " \n\t"\ |
1600 |
"mov" #size " " #a ", " #b " \n\t" |
1601 |
#define AVG_MMX2_OP(a,b,temp, size) \
|
1602 |
"mov" #size " " #b ", " #temp " \n\t"\ |
1603 |
"pavgb " #temp ", " #a " \n\t"\ |
1604 |
"mov" #size " " #a ", " #b " \n\t" |
1605 |
|
1606 |
QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP) |
1607 |
QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP) |
1608 |
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP) |
1609 |
QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
|
1610 |
QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
|
1611 |
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
|
1612 |
QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2) |
1613 |
QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2) |
1614 |
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2) |
1615 |
|
1616 |
/***********************************/
|
1617 |
/* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
|
1618 |
|
1619 |
#define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
|
1620 |
static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1621 |
OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\ |
1622 |
} |
1623 |
#define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
|
1624 |
static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1625 |
OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\ |
1626 |
} |
1627 |
|
1628 |
#define QPEL_2TAP(OPNAME, SIZE, MMX)\
|
1629 |
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\ |
1630 |
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\ |
1631 |
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
|
1632 |
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\ |
1633 |
OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\ |
1634 |
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\ |
1635 |
OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\ |
1636 |
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\ |
1637 |
OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\ |
1638 |
static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1639 |
OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\ |
1640 |
}\ |
1641 |
static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
1642 |
OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\ |
1643 |
}\ |
1644 |
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\ |
1645 |
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\ |
1646 |
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\ |
1647 |
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\ |
1648 |
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\ |
1649 |
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\ |
1650 |
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\ |
1651 |
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\ |
1652 |
|
1653 |
QPEL_2TAP(put_, 16, mmx2)
|
1654 |
QPEL_2TAP(avg_, 16, mmx2)
|
1655 |
QPEL_2TAP(put_, 8, mmx2)
|
1656 |
QPEL_2TAP(avg_, 8, mmx2)
|
1657 |
QPEL_2TAP(put_, 16, 3dnow) |
1658 |
QPEL_2TAP(avg_, 16, 3dnow) |
1659 |
QPEL_2TAP(put_, 8, 3dnow) |
1660 |
QPEL_2TAP(avg_, 8, 3dnow) |
1661 |
|
1662 |
|
1663 |
#if 0
|
1664 |
static void just_return(void) { return; }
|
1665 |
#endif
|
1666 |
|
1667 |
#if HAVE_YASM
|
1668 |
typedef void emu_edge_core_func (uint8_t *buf, const uint8_t *src, |
1669 |
x86_reg linesize, x86_reg start_y, |
1670 |
x86_reg end_y, x86_reg block_h, |
1671 |
x86_reg start_x, x86_reg end_x, |
1672 |
x86_reg block_w); |
1673 |
extern emu_edge_core_func ff_emu_edge_core_mmx;
|
1674 |
extern emu_edge_core_func ff_emu_edge_core_sse;
|
1675 |
|
1676 |
static av_always_inline
|
1677 |
void emulated_edge_mc(uint8_t *buf, const uint8_t *src, int linesize, |
1678 |
int block_w, int block_h, |
1679 |
int src_x, int src_y, int w, int h, |
1680 |
emu_edge_core_func *core_fn) |
1681 |
{ |
1682 |
int start_y, start_x, end_y, end_x, src_y_add=0; |
1683 |
|
1684 |
if(src_y>= h){
|
1685 |
src_y_add = h-1-src_y;
|
1686 |
src_y=h-1;
|
1687 |
}else if(src_y<=-block_h){ |
1688 |
src_y_add = 1-block_h-src_y;
|
1689 |
src_y=1-block_h;
|
1690 |
} |
1691 |
if(src_x>= w){
|
1692 |
src+= (w-1-src_x);
|
1693 |
src_x=w-1;
|
1694 |
}else if(src_x<=-block_w){ |
1695 |
src+= (1-block_w-src_x);
|
1696 |
src_x=1-block_w;
|
1697 |
} |
1698 |
|
1699 |
start_y= FFMAX(0, -src_y);
|
1700 |
start_x= FFMAX(0, -src_x);
|
1701 |
end_y= FFMIN(block_h, h-src_y); |
1702 |
end_x= FFMIN(block_w, w-src_x); |
1703 |
assert(start_x < end_x && block_w > 0);
|
1704 |
assert(start_y < end_y && block_h > 0);
|
1705 |
|
1706 |
// fill in the to-be-copied part plus all above/below
|
1707 |
src += (src_y_add+start_y)*linesize + start_x; |
1708 |
buf += start_x; |
1709 |
core_fn(buf, src, linesize, start_y, end_y, block_h, start_x, end_x, block_w); |
1710 |
} |
1711 |
|
1712 |
#if ARCH_X86_32
|
1713 |
static av_noinline
|
1714 |
void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src, int linesize, |
1715 |
int block_w, int block_h, |
1716 |
int src_x, int src_y, int w, int h) |
1717 |
{ |
1718 |
emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y, |
1719 |
w, h, &ff_emu_edge_core_mmx); |
1720 |
} |
1721 |
#endif
|
1722 |
static av_noinline
|
1723 |
void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src, int linesize, |
1724 |
int block_w, int block_h, |
1725 |
int src_x, int src_y, int w, int h) |
1726 |
{ |
1727 |
emulated_edge_mc(buf, src, linesize, block_w, block_h, src_x, src_y, |
1728 |
w, h, &ff_emu_edge_core_sse); |
1729 |
} |
1730 |
#endif /* HAVE_YASM */ |
1731 |
|
1732 |
typedef void emulated_edge_mc_func (uint8_t *dst, const uint8_t *src, |
1733 |
int linesize, int block_w, int block_h, |
1734 |
int src_x, int src_y, int w, int h); |
1735 |
|
1736 |
static av_always_inline
|
1737 |
void gmc(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, |
1738 |
int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height, |
1739 |
emulated_edge_mc_func *emu_edge_fn) |
1740 |
{ |
1741 |
const int w = 8; |
1742 |
const int ix = ox>>(16+shift); |
1743 |
const int iy = oy>>(16+shift); |
1744 |
const int oxs = ox>>4; |
1745 |
const int oys = oy>>4; |
1746 |
const int dxxs = dxx>>4; |
1747 |
const int dxys = dxy>>4; |
1748 |
const int dyxs = dyx>>4; |
1749 |
const int dyys = dyy>>4; |
1750 |
const uint16_t r4[4] = {r,r,r,r}; |
1751 |
const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys}; |
1752 |
const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys}; |
1753 |
const uint64_t shift2 = 2*shift; |
1754 |
uint8_t edge_buf[(h+1)*stride];
|
1755 |
int x, y;
|
1756 |
|
1757 |
const int dxw = (dxx-(1<<(16+shift)))*(w-1); |
1758 |
const int dyh = (dyy-(1<<(16+shift)))*(h-1); |
1759 |
const int dxh = dxy*(h-1); |
1760 |
const int dyw = dyx*(w-1); |
1761 |
if( // non-constant fullpel offset (3% of blocks) |
1762 |
((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) | |
1763 |
(oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
|
1764 |
// uses more than 16 bits of subpel mv (only at huge resolution)
|
1765 |
|| (dxx|dxy|dyx|dyy)&15 )
|
1766 |
{ |
1767 |
//FIXME could still use mmx for some of the rows
|
1768 |
ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height); |
1769 |
return;
|
1770 |
} |
1771 |
|
1772 |
src += ix + iy*stride; |
1773 |
if( (unsigned)ix >= width-w || |
1774 |
(unsigned)iy >= height-h )
|
1775 |
{ |
1776 |
emu_edge_fn(edge_buf, src, stride, w+1, h+1, ix, iy, width, height); |
1777 |
src = edge_buf; |
1778 |
} |
1779 |
|
1780 |
__asm__ volatile(
|
1781 |
"movd %0, %%mm6 \n\t"
|
1782 |
"pxor %%mm7, %%mm7 \n\t"
|
1783 |
"punpcklwd %%mm6, %%mm6 \n\t"
|
1784 |
"punpcklwd %%mm6, %%mm6 \n\t"
|
1785 |
:: "r"(1<<shift) |
1786 |
); |
1787 |
|
1788 |
for(x=0; x<w; x+=4){ |
1789 |
uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0), |
1790 |
oxs - dxys + dxxs*(x+1),
|
1791 |
oxs - dxys + dxxs*(x+2),
|
1792 |
oxs - dxys + dxxs*(x+3) };
|
1793 |
uint16_t dy4[4] = { oys - dyys + dyxs*(x+0), |
1794 |
oys - dyys + dyxs*(x+1),
|
1795 |
oys - dyys + dyxs*(x+2),
|
1796 |
oys - dyys + dyxs*(x+3) };
|
1797 |
|
1798 |
for(y=0; y<h; y++){ |
1799 |
__asm__ volatile(
|
1800 |
"movq %0, %%mm4 \n\t"
|
1801 |
"movq %1, %%mm5 \n\t"
|
1802 |
"paddw %2, %%mm4 \n\t"
|
1803 |
"paddw %3, %%mm5 \n\t"
|
1804 |
"movq %%mm4, %0 \n\t"
|
1805 |
"movq %%mm5, %1 \n\t"
|
1806 |
"psrlw $12, %%mm4 \n\t"
|
1807 |
"psrlw $12, %%mm5 \n\t"
|
1808 |
: "+m"(*dx4), "+m"(*dy4) |
1809 |
: "m"(*dxy4), "m"(*dyy4) |
1810 |
); |
1811 |
|
1812 |
__asm__ volatile(
|
1813 |
"movq %%mm6, %%mm2 \n\t"
|
1814 |
"movq %%mm6, %%mm1 \n\t"
|
1815 |
"psubw %%mm4, %%mm2 \n\t"
|
1816 |
"psubw %%mm5, %%mm1 \n\t"
|
1817 |
"movq %%mm2, %%mm0 \n\t"
|
1818 |
"movq %%mm4, %%mm3 \n\t"
|
1819 |
"pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy) |
1820 |
"pmullw %%mm5, %%mm3 \n\t" // dx*dy |
1821 |
"pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy |
1822 |
"pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy) |
1823 |
|
1824 |
"movd %4, %%mm5 \n\t"
|
1825 |
"movd %3, %%mm4 \n\t"
|
1826 |
"punpcklbw %%mm7, %%mm5 \n\t"
|
1827 |
"punpcklbw %%mm7, %%mm4 \n\t"
|
1828 |
"pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy |
1829 |
"pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy |
1830 |
|
1831 |
"movd %2, %%mm5 \n\t"
|
1832 |
"movd %1, %%mm4 \n\t"
|
1833 |
"punpcklbw %%mm7, %%mm5 \n\t"
|
1834 |
"punpcklbw %%mm7, %%mm4 \n\t"
|
1835 |
"pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy) |
1836 |
"pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy) |
1837 |
"paddw %5, %%mm1 \n\t"
|
1838 |
"paddw %%mm3, %%mm2 \n\t"
|
1839 |
"paddw %%mm1, %%mm0 \n\t"
|
1840 |
"paddw %%mm2, %%mm0 \n\t"
|
1841 |
|
1842 |
"psrlw %6, %%mm0 \n\t"
|
1843 |
"packuswb %%mm0, %%mm0 \n\t"
|
1844 |
"movd %%mm0, %0 \n\t"
|
1845 |
|
1846 |
: "=m"(dst[x+y*stride])
|
1847 |
: "m"(src[0]), "m"(src[1]), |
1848 |
"m"(src[stride]), "m"(src[stride+1]), |
1849 |
"m"(*r4), "m"(shift2) |
1850 |
); |
1851 |
src += stride; |
1852 |
} |
1853 |
src += 4-h*stride;
|
1854 |
} |
1855 |
} |
1856 |
|
1857 |
#if HAVE_YASM
|
1858 |
#if ARCH_X86_32
|
1859 |
static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, |
1860 |
int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height) |
1861 |
{ |
1862 |
gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, |
1863 |
width, height, &emulated_edge_mc_mmx); |
1864 |
} |
1865 |
#endif
|
1866 |
static void gmc_sse(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, |
1867 |
int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height) |
1868 |
{ |
1869 |
gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, |
1870 |
width, height, &emulated_edge_mc_sse); |
1871 |
} |
1872 |
#else
|
1873 |
static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, |
1874 |
int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height) |
1875 |
{ |
1876 |
gmc(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, |
1877 |
width, height, &ff_emulated_edge_mc); |
1878 |
} |
1879 |
#endif
|
1880 |
|
1881 |
#define PREFETCH(name, op) \
|
1882 |
static void name(void *mem, int stride, int h){\ |
1883 |
const uint8_t *p= mem;\
|
1884 |
do{\
|
1885 |
__asm__ volatile(#op" %0" :: "m"(*p));\ |
1886 |
p+= stride;\ |
1887 |
}while(--h);\
|
1888 |
} |
1889 |
PREFETCH(prefetch_mmx2, prefetcht0) |
1890 |
PREFETCH(prefetch_3dnow, prefetch) |
1891 |
#undef PREFETCH
|
1892 |
|
1893 |
#include "h264_qpel_mmx.c" |
1894 |
|
1895 |
void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src,
|
1896 |
int stride, int h, int x, int y); |
1897 |
void ff_put_vc1_chroma_mc8_mmx_nornd (uint8_t *dst, uint8_t *src,
|
1898 |
int stride, int h, int x, int y); |
1899 |
void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
|
1900 |
int stride, int h, int x, int y); |
1901 |
void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src,
|
1902 |
int stride, int h, int x, int y); |
1903 |
void ff_avg_vc1_chroma_mc8_mmx2_nornd (uint8_t *dst, uint8_t *src,
|
1904 |
int stride, int h, int x, int y); |
1905 |
void ff_avg_rv40_chroma_mc8_mmx2 (uint8_t *dst, uint8_t *src,
|
1906 |
int stride, int h, int x, int y); |
1907 |
void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src,
|
1908 |
int stride, int h, int x, int y); |
1909 |
void ff_avg_vc1_chroma_mc8_3dnow_nornd(uint8_t *dst, uint8_t *src,
|
1910 |
int stride, int h, int x, int y); |
1911 |
void ff_avg_rv40_chroma_mc8_3dnow (uint8_t *dst, uint8_t *src,
|
1912 |
int stride, int h, int x, int y); |
1913 |
|
1914 |
void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
|
1915 |
int stride, int h, int x, int y); |
1916 |
void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
|
1917 |
int stride, int h, int x, int y); |
1918 |
void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
|
1919 |
int stride, int h, int x, int y); |
1920 |
void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
|
1921 |
int stride, int h, int x, int y); |
1922 |
void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
|
1923 |
int stride, int h, int x, int y); |
1924 |
void ff_avg_rv40_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
|
1925 |
int stride, int h, int x, int y); |
1926 |
|
1927 |
void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
|
1928 |
int stride, int h, int x, int y); |
1929 |
void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src,
|
1930 |
int stride, int h, int x, int y); |
1931 |
|
1932 |
void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
|
1933 |
int stride, int h, int x, int y); |
1934 |
void ff_put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
|
1935 |
int stride, int h, int x, int y); |
1936 |
void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
|
1937 |
int stride, int h, int x, int y); |
1938 |
|
1939 |
void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
|
1940 |
int stride, int h, int x, int y); |
1941 |
void ff_avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
|
1942 |
int stride, int h, int x, int y); |
1943 |
void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
|
1944 |
int stride, int h, int x, int y); |
1945 |
|
1946 |
|
1947 |
/* CAVS specific */
|
1948 |
void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { |
1949 |
put_pixels8_mmx(dst, src, stride, 8);
|
1950 |
} |
1951 |
void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { |
1952 |
avg_pixels8_mmx(dst, src, stride, 8);
|
1953 |
} |
1954 |
void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { |
1955 |
put_pixels16_mmx(dst, src, stride, 16);
|
1956 |
} |
1957 |
void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { |
1958 |
avg_pixels16_mmx(dst, src, stride, 16);
|
1959 |
} |
1960 |
|
1961 |
/* VC1 specific */
|
1962 |
void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { |
1963 |
put_pixels8_mmx(dst, src, stride, 8);
|
1964 |
} |
1965 |
void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { |
1966 |
avg_pixels8_mmx2(dst, src, stride, 8);
|
1967 |
} |
1968 |
|
1969 |
/* XXX: those functions should be suppressed ASAP when all IDCTs are
|
1970 |
converted */
|
1971 |
#if CONFIG_GPL
|
1972 |
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block) |
1973 |
{ |
1974 |
ff_mmx_idct (block); |
1975 |
ff_put_pixels_clamped_mmx(block, dest, line_size); |
1976 |
} |
1977 |
static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block) |
1978 |
{ |
1979 |
ff_mmx_idct (block); |
1980 |
ff_add_pixels_clamped_mmx(block, dest, line_size); |
1981 |
} |
1982 |
static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block) |
1983 |
{ |
1984 |
ff_mmxext_idct (block); |
1985 |
ff_put_pixels_clamped_mmx(block, dest, line_size); |
1986 |
} |
1987 |
static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block) |
1988 |
{ |
1989 |
ff_mmxext_idct (block); |
1990 |
ff_add_pixels_clamped_mmx(block, dest, line_size); |
1991 |
} |
1992 |
#endif
|
1993 |
static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block) |
1994 |
{ |
1995 |
ff_idct_xvid_mmx (block); |
1996 |
ff_put_pixels_clamped_mmx(block, dest, line_size); |
1997 |
} |
1998 |
static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block) |
1999 |
{ |
2000 |
ff_idct_xvid_mmx (block); |
2001 |
ff_add_pixels_clamped_mmx(block, dest, line_size); |
2002 |
} |
2003 |
static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block) |
2004 |
{ |
2005 |
ff_idct_xvid_mmx2 (block); |
2006 |
ff_put_pixels_clamped_mmx(block, dest, line_size); |
2007 |
} |
2008 |
static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block) |
2009 |
{ |
2010 |
ff_idct_xvid_mmx2 (block); |
2011 |
ff_add_pixels_clamped_mmx(block, dest, line_size); |
2012 |
} |
2013 |
|
2014 |
static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize) |
2015 |
{ |
2016 |
int i;
|
2017 |
__asm__ volatile("pxor %%mm7, %%mm7":); |
2018 |
for(i=0; i<blocksize; i+=2) { |
2019 |
__asm__ volatile(
|
2020 |
"movq %0, %%mm0 \n\t"
|
2021 |
"movq %1, %%mm1 \n\t"
|
2022 |
"movq %%mm0, %%mm2 \n\t"
|
2023 |
"movq %%mm1, %%mm3 \n\t"
|
2024 |
"pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0 |
2025 |
"pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0 |
2026 |
"pslld $31, %%mm2 \n\t" // keep only the sign bit |
2027 |
"pxor %%mm2, %%mm1 \n\t"
|
2028 |
"movq %%mm3, %%mm4 \n\t"
|
2029 |
"pand %%mm1, %%mm3 \n\t"
|
2030 |
"pandn %%mm1, %%mm4 \n\t"
|
2031 |
"pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) |
2032 |
"pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) |
2033 |
"movq %%mm3, %1 \n\t"
|
2034 |
"movq %%mm0, %0 \n\t"
|
2035 |
:"+m"(mag[i]), "+m"(ang[i]) |
2036 |
::"memory"
|
2037 |
); |
2038 |
} |
2039 |
__asm__ volatile("femms"); |
2040 |
} |
2041 |
static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize) |
2042 |
{ |
2043 |
int i;
|
2044 |
|
2045 |
__asm__ volatile(
|
2046 |
"movaps %0, %%xmm5 \n\t"
|
2047 |
::"m"(ff_pdw_80000000[0]) |
2048 |
); |
2049 |
for(i=0; i<blocksize; i+=4) { |
2050 |
__asm__ volatile(
|
2051 |
"movaps %0, %%xmm0 \n\t"
|
2052 |
"movaps %1, %%xmm1 \n\t"
|
2053 |
"xorps %%xmm2, %%xmm2 \n\t"
|
2054 |
"xorps %%xmm3, %%xmm3 \n\t"
|
2055 |
"cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0 |
2056 |
"cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0 |
2057 |
"andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit |
2058 |
"xorps %%xmm2, %%xmm1 \n\t"
|
2059 |
"movaps %%xmm3, %%xmm4 \n\t"
|
2060 |
"andps %%xmm1, %%xmm3 \n\t"
|
2061 |
"andnps %%xmm1, %%xmm4 \n\t"
|
2062 |
"addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) |
2063 |
"subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) |
2064 |
"movaps %%xmm3, %1 \n\t"
|
2065 |
"movaps %%xmm0, %0 \n\t"
|
2066 |
:"+m"(mag[i]), "+m"(ang[i]) |
2067 |
::"memory"
|
2068 |
); |
2069 |
} |
2070 |
} |
2071 |
|
2072 |
#define IF1(x) x
|
2073 |
#define IF0(x)
|
2074 |
|
2075 |
#define MIX5(mono,stereo)\
|
2076 |
__asm__ volatile(\
|
2077 |
"movss 0(%2), %%xmm5 \n"\
|
2078 |
"movss 8(%2), %%xmm6 \n"\
|
2079 |
"movss 24(%2), %%xmm7 \n"\
|
2080 |
"shufps $0, %%xmm5, %%xmm5 \n"\
|
2081 |
"shufps $0, %%xmm6, %%xmm6 \n"\
|
2082 |
"shufps $0, %%xmm7, %%xmm7 \n"\
|
2083 |
"1: \n"\
|
2084 |
"movaps (%0,%1), %%xmm0 \n"\
|
2085 |
"movaps 0x400(%0,%1), %%xmm1 \n"\
|
2086 |
"movaps 0x800(%0,%1), %%xmm2 \n"\
|
2087 |
"movaps 0xc00(%0,%1), %%xmm3 \n"\
|
2088 |
"movaps 0x1000(%0,%1), %%xmm4 \n"\
|
2089 |
"mulps %%xmm5, %%xmm0 \n"\
|
2090 |
"mulps %%xmm6, %%xmm1 \n"\
|
2091 |
"mulps %%xmm5, %%xmm2 \n"\
|
2092 |
"mulps %%xmm7, %%xmm3 \n"\
|
2093 |
"mulps %%xmm7, %%xmm4 \n"\
|
2094 |
stereo("addps %%xmm1, %%xmm0 \n")\
|
2095 |
"addps %%xmm1, %%xmm2 \n"\
|
2096 |
"addps %%xmm3, %%xmm0 \n"\
|
2097 |
"addps %%xmm4, %%xmm2 \n"\
|
2098 |
mono("addps %%xmm2, %%xmm0 \n")\
|
2099 |
"movaps %%xmm0, (%0,%1) \n"\
|
2100 |
stereo("movaps %%xmm2, 0x400(%0,%1) \n")\
|
2101 |
"add $16, %0 \n"\
|
2102 |
"jl 1b \n"\
|
2103 |
:"+&r"(i)\
|
2104 |
:"r"(samples[0]+len), "r"(matrix)\ |
2105 |
:XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \ |
2106 |
"%xmm4", "%xmm5", "%xmm6", "%xmm7",)\ |
2107 |
"memory"\
|
2108 |
); |
2109 |
|
2110 |
#define MIX_MISC(stereo)\
|
2111 |
__asm__ volatile(\
|
2112 |
"1: \n"\
|
2113 |
"movaps (%3,%0), %%xmm0 \n"\
|
2114 |
stereo("movaps %%xmm0, %%xmm1 \n")\
|
2115 |
"mulps %%xmm4, %%xmm0 \n"\
|
2116 |
stereo("mulps %%xmm5, %%xmm1 \n")\
|
2117 |
"lea 1024(%3,%0), %1 \n"\
|
2118 |
"mov %5, %2 \n"\
|
2119 |
"2: \n"\
|
2120 |
"movaps (%1), %%xmm2 \n"\
|
2121 |
stereo("movaps %%xmm2, %%xmm3 \n")\
|
2122 |
"mulps (%4,%2), %%xmm2 \n"\
|
2123 |
stereo("mulps 16(%4,%2), %%xmm3 \n")\
|
2124 |
"addps %%xmm2, %%xmm0 \n"\
|
2125 |
stereo("addps %%xmm3, %%xmm1 \n")\
|
2126 |
"add $1024, %1 \n"\
|
2127 |
"add $32, %2 \n"\
|
2128 |
"jl 2b \n"\
|
2129 |
"movaps %%xmm0, (%3,%0) \n"\
|
2130 |
stereo("movaps %%xmm1, 1024(%3,%0) \n")\
|
2131 |
"add $16, %0 \n"\
|
2132 |
"jl 1b \n"\
|
2133 |
:"+&r"(i), "=&r"(j), "=&r"(k)\ |
2134 |
:"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\ |
2135 |
:"memory"\
|
2136 |
); |
2137 |
|
2138 |
static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len) |
2139 |
{ |
2140 |
int (*matrix_cmp)[2] = (int(*)[2])matrix; |
2141 |
intptr_t i,j,k; |
2142 |
|
2143 |
i = -len*sizeof(float); |
2144 |
if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) { |
2145 |
MIX5(IF0,IF1); |
2146 |
} else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) { |
2147 |
MIX5(IF1,IF0); |
2148 |
} else {
|
2149 |
DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4]; |
2150 |
j = 2*in_ch*sizeof(float); |
2151 |
__asm__ volatile(
|
2152 |
"1: \n"
|
2153 |
"sub $8, %0 \n"
|
2154 |
"movss (%2,%0), %%xmm4 \n"
|
2155 |
"movss 4(%2,%0), %%xmm5 \n"
|
2156 |
"shufps $0, %%xmm4, %%xmm4 \n"
|
2157 |
"shufps $0, %%xmm5, %%xmm5 \n"
|
2158 |
"movaps %%xmm4, (%1,%0,4) \n"
|
2159 |
"movaps %%xmm5, 16(%1,%0,4) \n"
|
2160 |
"jg 1b \n"
|
2161 |
:"+&r"(j)
|
2162 |
:"r"(matrix_simd), "r"(matrix) |
2163 |
:"memory"
|
2164 |
); |
2165 |
if(out_ch == 2) { |
2166 |
MIX_MISC(IF1); |
2167 |
} else {
|
2168 |
MIX_MISC(IF0); |
2169 |
} |
2170 |
} |
2171 |
} |
2172 |
|
2173 |
static void vector_fmul_3dnow(float *dst, const float *src0, const float *src1, int len){ |
2174 |
x86_reg i = (len-4)*4; |
2175 |
__asm__ volatile(
|
2176 |
"1: \n\t"
|
2177 |
"movq (%2,%0), %%mm0 \n\t"
|
2178 |
"movq 8(%2,%0), %%mm1 \n\t"
|
2179 |
"pfmul (%3,%0), %%mm0 \n\t"
|
2180 |
"pfmul 8(%3,%0), %%mm1 \n\t"
|
2181 |
"movq %%mm0, (%1,%0) \n\t"
|
2182 |
"movq %%mm1, 8(%1,%0) \n\t"
|
2183 |
"sub $16, %0 \n\t"
|
2184 |
"jge 1b \n\t"
|
2185 |
"femms \n\t"
|
2186 |
:"+r"(i)
|
2187 |
:"r"(dst), "r"(src0), "r"(src1) |
2188 |
:"memory"
|
2189 |
); |
2190 |
} |
2191 |
static void vector_fmul_sse(float *dst, const float *src0, const float *src1, int len){ |
2192 |
x86_reg i = (len-8)*4; |
2193 |
__asm__ volatile(
|
2194 |
"1: \n\t"
|
2195 |
"movaps (%2,%0), %%xmm0 \n\t"
|
2196 |
"movaps 16(%2,%0), %%xmm1 \n\t"
|
2197 |
"mulps (%3,%0), %%xmm0 \n\t"
|
2198 |
"mulps 16(%3,%0), %%xmm1 \n\t"
|
2199 |
"movaps %%xmm0, (%1,%0) \n\t"
|
2200 |
"movaps %%xmm1, 16(%1,%0) \n\t"
|
2201 |
"sub $32, %0 \n\t"
|
2202 |
"jge 1b \n\t"
|
2203 |
:"+r"(i)
|
2204 |
:"r"(dst), "r"(src0), "r"(src1) |
2205 |
:"memory"
|
2206 |
); |
2207 |
} |
2208 |
|
2209 |
static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){ |
2210 |
x86_reg i = len*4-16; |
2211 |
__asm__ volatile(
|
2212 |
"1: \n\t"
|
2213 |
"pswapd 8(%1), %%mm0 \n\t"
|
2214 |
"pswapd (%1), %%mm1 \n\t"
|
2215 |
"pfmul (%3,%0), %%mm0 \n\t"
|
2216 |
"pfmul 8(%3,%0), %%mm1 \n\t"
|
2217 |
"movq %%mm0, (%2,%0) \n\t"
|
2218 |
"movq %%mm1, 8(%2,%0) \n\t"
|
2219 |
"add $16, %1 \n\t"
|
2220 |
"sub $16, %0 \n\t"
|
2221 |
"jge 1b \n\t"
|
2222 |
:"+r"(i), "+r"(src1) |
2223 |
:"r"(dst), "r"(src0) |
2224 |
); |
2225 |
__asm__ volatile("femms"); |
2226 |
} |
2227 |
static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){ |
2228 |
x86_reg i = len*4-32; |
2229 |
__asm__ volatile(
|
2230 |
"1: \n\t"
|
2231 |
"movaps 16(%1), %%xmm0 \n\t"
|
2232 |
"movaps (%1), %%xmm1 \n\t"
|
2233 |
"shufps $0x1b, %%xmm0, %%xmm0 \n\t"
|
2234 |
"shufps $0x1b, %%xmm1, %%xmm1 \n\t"
|
2235 |
"mulps (%3,%0), %%xmm0 \n\t"
|
2236 |
"mulps 16(%3,%0), %%xmm1 \n\t"
|
2237 |
"movaps %%xmm0, (%2,%0) \n\t"
|
2238 |
"movaps %%xmm1, 16(%2,%0) \n\t"
|
2239 |
"add $32, %1 \n\t"
|
2240 |
"sub $32, %0 \n\t"
|
2241 |
"jge 1b \n\t"
|
2242 |
:"+r"(i), "+r"(src1) |
2243 |
:"r"(dst), "r"(src0) |
2244 |
); |
2245 |
} |
2246 |
|
2247 |
static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1, |
2248 |
const float *src2, int len){ |
2249 |
x86_reg i = (len-4)*4; |
2250 |
__asm__ volatile(
|
2251 |
"1: \n\t"
|
2252 |
"movq (%2,%0), %%mm0 \n\t"
|
2253 |
"movq 8(%2,%0), %%mm1 \n\t"
|
2254 |
"pfmul (%3,%0), %%mm0 \n\t"
|
2255 |
"pfmul 8(%3,%0), %%mm1 \n\t"
|
2256 |
"pfadd (%4,%0), %%mm0 \n\t"
|
2257 |
"pfadd 8(%4,%0), %%mm1 \n\t"
|
2258 |
"movq %%mm0, (%1,%0) \n\t"
|
2259 |
"movq %%mm1, 8(%1,%0) \n\t"
|
2260 |
"sub $16, %0 \n\t"
|
2261 |
"jge 1b \n\t"
|
2262 |
:"+r"(i)
|
2263 |
:"r"(dst), "r"(src0), "r"(src1), "r"(src2) |
2264 |
:"memory"
|
2265 |
); |
2266 |
__asm__ volatile("femms"); |
2267 |
} |
2268 |
static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1, |
2269 |
const float *src2, int len){ |
2270 |
x86_reg i = (len-8)*4; |
2271 |
__asm__ volatile(
|
2272 |
"1: \n\t"
|
2273 |
"movaps (%2,%0), %%xmm0 \n\t"
|
2274 |
"movaps 16(%2,%0), %%xmm1 \n\t"
|
2275 |
"mulps (%3,%0), %%xmm0 \n\t"
|
2276 |
"mulps 16(%3,%0), %%xmm1 \n\t"
|
2277 |
"addps (%4,%0), %%xmm0 \n\t"
|
2278 |
"addps 16(%4,%0), %%xmm1 \n\t"
|
2279 |
"movaps %%xmm0, (%1,%0) \n\t"
|
2280 |
"movaps %%xmm1, 16(%1,%0) \n\t"
|
2281 |
"sub $32, %0 \n\t"
|
2282 |
"jge 1b \n\t"
|
2283 |
:"+r"(i)
|
2284 |
:"r"(dst), "r"(src0), "r"(src1), "r"(src2) |
2285 |
:"memory"
|
2286 |
); |
2287 |
} |
2288 |
|
2289 |
#if HAVE_6REGS
|
2290 |
static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1, |
2291 |
const float *win, int len){ |
2292 |
x86_reg i = -len*4;
|
2293 |
x86_reg j = len*4-8; |
2294 |
__asm__ volatile(
|
2295 |
"1: \n"
|
2296 |
"pswapd (%5,%1), %%mm1 \n"
|
2297 |
"movq (%5,%0), %%mm0 \n"
|
2298 |
"pswapd (%4,%1), %%mm5 \n"
|
2299 |
"movq (%3,%0), %%mm4 \n"
|
2300 |
"movq %%mm0, %%mm2 \n"
|
2301 |
"movq %%mm1, %%mm3 \n"
|
2302 |
"pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i] |
2303 |
"pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j] |
2304 |
"pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j] |
2305 |
"pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i] |
2306 |
"pfadd %%mm3, %%mm2 \n"
|
2307 |
"pfsub %%mm0, %%mm1 \n"
|
2308 |
"pswapd %%mm2, %%mm2 \n"
|
2309 |
"movq %%mm1, (%2,%0) \n"
|
2310 |
"movq %%mm2, (%2,%1) \n"
|
2311 |
"sub $8, %1 \n"
|
2312 |
"add $8, %0 \n"
|
2313 |
"jl 1b \n"
|
2314 |
"femms \n"
|
2315 |
:"+r"(i), "+r"(j) |
2316 |
:"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) |
2317 |
); |
2318 |
} |
2319 |
|
2320 |
static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1, |
2321 |
const float *win, int len){ |
2322 |
x86_reg i = -len*4;
|
2323 |
x86_reg j = len*4-16; |
2324 |
__asm__ volatile(
|
2325 |
"1: \n"
|
2326 |
"movaps (%5,%1), %%xmm1 \n"
|
2327 |
"movaps (%5,%0), %%xmm0 \n"
|
2328 |
"movaps (%4,%1), %%xmm5 \n"
|
2329 |
"movaps (%3,%0), %%xmm4 \n"
|
2330 |
"shufps $0x1b, %%xmm1, %%xmm1 \n"
|
2331 |
"shufps $0x1b, %%xmm5, %%xmm5 \n"
|
2332 |
"movaps %%xmm0, %%xmm2 \n"
|
2333 |
"movaps %%xmm1, %%xmm3 \n"
|
2334 |
"mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i] |
2335 |
"mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j] |
2336 |
"mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j] |
2337 |
"mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i] |
2338 |
"addps %%xmm3, %%xmm2 \n"
|
2339 |
"subps %%xmm0, %%xmm1 \n"
|
2340 |
"shufps $0x1b, %%xmm2, %%xmm2 \n"
|
2341 |
"movaps %%xmm1, (%2,%0) \n"
|
2342 |
"movaps %%xmm2, (%2,%1) \n"
|
2343 |
"sub $16, %1 \n"
|
2344 |
"add $16, %0 \n"
|
2345 |
"jl 1b \n"
|
2346 |
:"+r"(i), "+r"(j) |
2347 |
:"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) |
2348 |
); |
2349 |
} |
2350 |
#endif /* HAVE_6REGS */ |
2351 |
|
2352 |
static void vector_clipf_sse(float *dst, const float *src, float min, float max, |
2353 |
int len)
|
2354 |
{ |
2355 |
x86_reg i = (len-16)*4; |
2356 |
__asm__ volatile(
|
2357 |
"movss %3, %%xmm4 \n"
|
2358 |
"movss %4, %%xmm5 \n"
|
2359 |
"shufps $0, %%xmm4, %%xmm4 \n"
|
2360 |
"shufps $0, %%xmm5, %%xmm5 \n"
|
2361 |
"1: \n\t"
|
2362 |
"movaps (%2,%0), %%xmm0 \n\t" // 3/1 on intel |
2363 |
"movaps 16(%2,%0), %%xmm1 \n\t"
|
2364 |
"movaps 32(%2,%0), %%xmm2 \n\t"
|
2365 |
"movaps 48(%2,%0), %%xmm3 \n\t"
|
2366 |
"maxps %%xmm4, %%xmm0 \n\t"
|
2367 |
"maxps %%xmm4, %%xmm1 \n\t"
|
2368 |
"maxps %%xmm4, %%xmm2 \n\t"
|
2369 |
"maxps %%xmm4, %%xmm3 \n\t"
|
2370 |
"minps %%xmm5, %%xmm0 \n\t"
|
2371 |
"minps %%xmm5, %%xmm1 \n\t"
|
2372 |
"minps %%xmm5, %%xmm2 \n\t"
|
2373 |
"minps %%xmm5, %%xmm3 \n\t"
|
2374 |
"movaps %%xmm0, (%1,%0) \n\t"
|
2375 |
"movaps %%xmm1, 16(%1,%0) \n\t"
|
2376 |
"movaps %%xmm2, 32(%1,%0) \n\t"
|
2377 |
"movaps %%xmm3, 48(%1,%0) \n\t"
|
2378 |
"sub $64, %0 \n\t"
|
2379 |
"jge 1b \n\t"
|
2380 |
:"+&r"(i)
|
2381 |
:"r"(dst), "r"(src), "m"(min), "m"(max) |
2382 |
:"memory"
|
2383 |
); |
2384 |
} |
2385 |
|
2386 |
void ff_vp3_idct_mmx(int16_t *input_data);
|
2387 |
void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block); |
2388 |
void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block); |
2389 |
|
2390 |
void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block); |
2391 |
|
2392 |
void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values); |
2393 |
void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values); |
2394 |
|
2395 |
void ff_vp3_idct_sse2(int16_t *input_data);
|
2396 |
void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block); |
2397 |
void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block); |
2398 |
|
2399 |
int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift); |
2400 |
int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift); |
2401 |
int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul); |
2402 |
int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul); |
2403 |
int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul); |
2404 |
void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top); |
2405 |
int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left); |
2406 |
int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left); |
2407 |
|
2408 |
float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order); |
2409 |
|
2410 |
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
|
2411 |
{ |
2412 |
int mm_flags = av_get_cpu_flags();
|
2413 |
|
2414 |
if (avctx->dsp_mask) {
|
2415 |
if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
|
2416 |
mm_flags |= (avctx->dsp_mask & 0xffff);
|
2417 |
else
|
2418 |
mm_flags &= ~(avctx->dsp_mask & 0xffff);
|
2419 |
} |
2420 |
|
2421 |
#if 0
|
2422 |
av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
|
2423 |
if (mm_flags & AV_CPU_FLAG_MMX)
|
2424 |
av_log(avctx, AV_LOG_INFO, " mmx");
|
2425 |
if (mm_flags & AV_CPU_FLAG_MMX2)
|
2426 |
av_log(avctx, AV_LOG_INFO, " mmx2");
|
2427 |
if (mm_flags & AV_CPU_FLAG_3DNOW)
|
2428 |
av_log(avctx, AV_LOG_INFO, " 3dnow");
|
2429 |
if (mm_flags & AV_CPU_FLAG_SSE)
|
2430 |
av_log(avctx, AV_LOG_INFO, " sse");
|
2431 |
if (mm_flags & AV_CPU_FLAG_SSE2)
|
2432 |
av_log(avctx, AV_LOG_INFO, " sse2");
|
2433 |
av_log(avctx, AV_LOG_INFO, "\n");
|
2434 |
#endif
|
2435 |
|
2436 |
if (mm_flags & AV_CPU_FLAG_MMX) {
|
2437 |
const int idct_algo= avctx->idct_algo; |
2438 |
|
2439 |
if(avctx->lowres==0){ |
2440 |
if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
|
2441 |
c->idct_put= ff_simple_idct_put_mmx; |
2442 |
c->idct_add= ff_simple_idct_add_mmx; |
2443 |
c->idct = ff_simple_idct_mmx; |
2444 |
c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; |
2445 |
#if CONFIG_GPL
|
2446 |
}else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ |
2447 |
if(mm_flags & AV_CPU_FLAG_MMX2){
|
2448 |
c->idct_put= ff_libmpeg2mmx2_idct_put; |
2449 |
c->idct_add= ff_libmpeg2mmx2_idct_add; |
2450 |
c->idct = ff_mmxext_idct; |
2451 |
}else{
|
2452 |
c->idct_put= ff_libmpeg2mmx_idct_put; |
2453 |
c->idct_add= ff_libmpeg2mmx_idct_add; |
2454 |
c->idct = ff_mmx_idct; |
2455 |
} |
2456 |
c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; |
2457 |
#endif
|
2458 |
}else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) && |
2459 |
idct_algo==FF_IDCT_VP3 && HAVE_YASM){ |
2460 |
if(mm_flags & AV_CPU_FLAG_SSE2){
|
2461 |
c->idct_put= ff_vp3_idct_put_sse2; |
2462 |
c->idct_add= ff_vp3_idct_add_sse2; |
2463 |
c->idct = ff_vp3_idct_sse2; |
2464 |
c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; |
2465 |
}else{
|
2466 |
c->idct_put= ff_vp3_idct_put_mmx; |
2467 |
c->idct_add= ff_vp3_idct_add_mmx; |
2468 |
c->idct = ff_vp3_idct_mmx; |
2469 |
c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM; |
2470 |
} |
2471 |
}else if(idct_algo==FF_IDCT_CAVS){ |
2472 |
c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; |
2473 |
}else if(idct_algo==FF_IDCT_XVIDMMX){ |
2474 |
if(mm_flags & AV_CPU_FLAG_SSE2){
|
2475 |
c->idct_put= ff_idct_xvid_sse2_put; |
2476 |
c->idct_add= ff_idct_xvid_sse2_add; |
2477 |
c->idct = ff_idct_xvid_sse2; |
2478 |
c->idct_permutation_type= FF_SSE2_IDCT_PERM; |
2479 |
}else if(mm_flags & AV_CPU_FLAG_MMX2){ |
2480 |
c->idct_put= ff_idct_xvid_mmx2_put; |
2481 |
c->idct_add= ff_idct_xvid_mmx2_add; |
2482 |
c->idct = ff_idct_xvid_mmx2; |
2483 |
}else{
|
2484 |
c->idct_put= ff_idct_xvid_mmx_put; |
2485 |
c->idct_add= ff_idct_xvid_mmx_add; |
2486 |
c->idct = ff_idct_xvid_mmx; |
2487 |
} |
2488 |
} |
2489 |
} |
2490 |
|
2491 |
c->put_pixels_clamped = ff_put_pixels_clamped_mmx; |
2492 |
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx; |
2493 |
c->add_pixels_clamped = ff_add_pixels_clamped_mmx; |
2494 |
c->clear_block = clear_block_mmx; |
2495 |
c->clear_blocks = clear_blocks_mmx; |
2496 |
if ((mm_flags & AV_CPU_FLAG_SSE) &&
|
2497 |
!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
|
2498 |
/* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
|
2499 |
c->clear_block = clear_block_sse; |
2500 |
c->clear_blocks = clear_blocks_sse; |
2501 |
} |
2502 |
|
2503 |
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
|
2504 |
c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \ |
2505 |
c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \ |
2506 |
c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \ |
2507 |
c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU |
2508 |
|
2509 |
SET_HPEL_FUNCS(put, 0, 16, mmx); |
2510 |
SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx); |
2511 |
SET_HPEL_FUNCS(avg, 0, 16, mmx); |
2512 |
SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx); |
2513 |
SET_HPEL_FUNCS(put, 1, 8, mmx); |
2514 |
SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx); |
2515 |
SET_HPEL_FUNCS(avg, 1, 8, mmx); |
2516 |
SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx); |
2517 |
|
2518 |
#if ARCH_X86_32 || !HAVE_YASM
|
2519 |
c->gmc= gmc_mmx; |
2520 |
#endif
|
2521 |
#if ARCH_X86_32 && HAVE_YASM
|
2522 |
c->emulated_edge_mc = emulated_edge_mc_mmx; |
2523 |
#endif
|
2524 |
|
2525 |
c->add_bytes= add_bytes_mmx; |
2526 |
c->add_bytes_l2= add_bytes_l2_mmx; |
2527 |
|
2528 |
c->draw_edges = draw_edges_mmx; |
2529 |
|
2530 |
if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
|
2531 |
c->h263_v_loop_filter= h263_v_loop_filter_mmx; |
2532 |
c->h263_h_loop_filter= h263_h_loop_filter_mmx; |
2533 |
} |
2534 |
|
2535 |
#if HAVE_YASM
|
2536 |
c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd;
|
2537 |
c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx;
|
2538 |
c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_mmx_nornd;
|
2539 |
|
2540 |
c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx;
|
2541 |
c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx;
|
2542 |
#endif
|
2543 |
|
2544 |
if (mm_flags & AV_CPU_FLAG_MMX2) {
|
2545 |
c->prefetch = prefetch_mmx2; |
2546 |
|
2547 |
c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; |
2548 |
c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; |
2549 |
|
2550 |
c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; |
2551 |
c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; |
2552 |
c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; |
2553 |
|
2554 |
c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; |
2555 |
c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; |
2556 |
|
2557 |
c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; |
2558 |
c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; |
2559 |
c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; |
2560 |
|
2561 |
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
|
2562 |
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; |
2563 |
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; |
2564 |
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; |
2565 |
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; |
2566 |
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; |
2567 |
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; |
2568 |
|
2569 |
if (CONFIG_VP3_DECODER && HAVE_YASM) {
|
2570 |
c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2; |
2571 |
c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2; |
2572 |
} |
2573 |
} |
2574 |
if (CONFIG_VP3_DECODER && HAVE_YASM) {
|
2575 |
c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2; |
2576 |
} |
2577 |
|
2578 |
if (CONFIG_VP3_DECODER
|
2579 |
&& (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) { |
2580 |
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2; |
2581 |
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2; |
2582 |
} |
2583 |
|
2584 |
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
|
2585 |
c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \ |
2586 |
c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \ |
2587 |
c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \ |
2588 |
c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \ |
2589 |
c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \ |
2590 |
c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \ |
2591 |
c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \ |
2592 |
c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \ |
2593 |
c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \ |
2594 |
c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \ |
2595 |
c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \ |
2596 |
c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \ |
2597 |
c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \ |
2598 |
c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \ |
2599 |
c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \ |
2600 |
c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU |
2601 |
|
2602 |
SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2); |
2603 |
SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2); |
2604 |
SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2); |
2605 |
SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2); |
2606 |
SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2); |
2607 |
SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2); |
2608 |
|
2609 |
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2); |
2610 |
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2); |
2611 |
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2); |
2612 |
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2); |
2613 |
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2); |
2614 |
SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2); |
2615 |
|
2616 |
SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2); |
2617 |
SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2); |
2618 |
SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2); |
2619 |
SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2); |
2620 |
|
2621 |
#if HAVE_YASM
|
2622 |
c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2;
|
2623 |
c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2;
|
2624 |
|
2625 |
c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_mmx2_nornd;
|
2626 |
|
2627 |
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd;
|
2628 |
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2;
|
2629 |
c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
|
2630 |
c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2;
|
2631 |
|
2632 |
c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; |
2633 |
#endif
|
2634 |
#if HAVE_7REGS && HAVE_TEN_OPERANDS
|
2635 |
if( mm_flags&AV_CPU_FLAG_3DNOW )
|
2636 |
c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; |
2637 |
#endif
|
2638 |
|
2639 |
if (CONFIG_VC1_DECODER)
|
2640 |
ff_vc1dsp_init_mmx(c, avctx); |
2641 |
|
2642 |
c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2; |
2643 |
} else if (mm_flags & AV_CPU_FLAG_3DNOW) { |
2644 |
c->prefetch = prefetch_3dnow; |
2645 |
|
2646 |
c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; |
2647 |
c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; |
2648 |
|
2649 |
c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; |
2650 |
c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; |
2651 |
c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; |
2652 |
|
2653 |
c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; |
2654 |
c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; |
2655 |
|
2656 |
c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; |
2657 |
c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; |
2658 |
c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; |
2659 |
|
2660 |
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
|
2661 |
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; |
2662 |
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; |
2663 |
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; |
2664 |
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; |
2665 |
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; |
2666 |
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; |
2667 |
} |
2668 |
|
2669 |
if (CONFIG_VP3_DECODER
|
2670 |
&& (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) { |
2671 |
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow; |
2672 |
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow; |
2673 |
} |
2674 |
|
2675 |
SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow); |
2676 |
SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow); |
2677 |
SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow); |
2678 |
SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow); |
2679 |
SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow); |
2680 |
SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow); |
2681 |
|
2682 |
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow); |
2683 |
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow); |
2684 |
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow); |
2685 |
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow); |
2686 |
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow); |
2687 |
SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow); |
2688 |
|
2689 |
SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow); |
2690 |
SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow); |
2691 |
SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow); |
2692 |
SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow); |
2693 |
|
2694 |
#if HAVE_YASM
|
2695 |
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd;
|
2696 |
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow;
|
2697 |
|
2698 |
c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_3dnow_nornd;
|
2699 |
|
2700 |
c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_3dnow;
|
2701 |
c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_3dnow;
|
2702 |
#endif
|
2703 |
} |
2704 |
|
2705 |
|
2706 |
#define H264_QPEL_FUNCS(x, y, CPU)\
|
2707 |
c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\ |
2708 |
c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\ |
2709 |
c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\ |
2710 |
c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU; |
2711 |
if((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW)){
|
2712 |
// these functions are slower than mmx on AMD, but faster on Intel
|
2713 |
c->put_pixels_tab[0][0] = put_pixels16_sse2; |
2714 |
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2; |
2715 |
c->avg_pixels_tab[0][0] = avg_pixels16_sse2; |
2716 |
H264_QPEL_FUNCS(0, 0, sse2); |
2717 |
} |
2718 |
if(mm_flags & AV_CPU_FLAG_SSE2){
|
2719 |
H264_QPEL_FUNCS(0, 1, sse2); |
2720 |
H264_QPEL_FUNCS(0, 2, sse2); |
2721 |
H264_QPEL_FUNCS(0, 3, sse2); |
2722 |
H264_QPEL_FUNCS(1, 1, sse2); |
2723 |
H264_QPEL_FUNCS(1, 2, sse2); |
2724 |
H264_QPEL_FUNCS(1, 3, sse2); |
2725 |
H264_QPEL_FUNCS(2, 1, sse2); |
2726 |
H264_QPEL_FUNCS(2, 2, sse2); |
2727 |
H264_QPEL_FUNCS(2, 3, sse2); |
2728 |
H264_QPEL_FUNCS(3, 1, sse2); |
2729 |
H264_QPEL_FUNCS(3, 2, sse2); |
2730 |
H264_QPEL_FUNCS(3, 3, sse2); |
2731 |
} |
2732 |
#if HAVE_SSSE3
|
2733 |
if(mm_flags & AV_CPU_FLAG_SSSE3){
|
2734 |
H264_QPEL_FUNCS(1, 0, ssse3); |
2735 |
H264_QPEL_FUNCS(1, 1, ssse3); |
2736 |
H264_QPEL_FUNCS(1, 2, ssse3); |
2737 |
H264_QPEL_FUNCS(1, 3, ssse3); |
2738 |
H264_QPEL_FUNCS(2, 0, ssse3); |
2739 |
H264_QPEL_FUNCS(2, 1, ssse3); |
2740 |
H264_QPEL_FUNCS(2, 2, ssse3); |
2741 |
H264_QPEL_FUNCS(2, 3, ssse3); |
2742 |
H264_QPEL_FUNCS(3, 0, ssse3); |
2743 |
H264_QPEL_FUNCS(3, 1, ssse3); |
2744 |
H264_QPEL_FUNCS(3, 2, ssse3); |
2745 |
H264_QPEL_FUNCS(3, 3, ssse3); |
2746 |
c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3; |
2747 |
#if HAVE_YASM
|
2748 |
c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_ssse3_nornd;
|
2749 |
c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_ssse3_nornd;
|
2750 |
c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd;
|
2751 |
c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd;
|
2752 |
c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
|
2753 |
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3;
|
2754 |
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3; |
2755 |
if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe |
2756 |
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4; |
2757 |
#endif
|
2758 |
} |
2759 |
#endif
|
2760 |
|
2761 |
if(mm_flags & AV_CPU_FLAG_3DNOW){
|
2762 |
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; |
2763 |
c->vector_fmul = vector_fmul_3dnow; |
2764 |
} |
2765 |
if(mm_flags & AV_CPU_FLAG_3DNOWEXT){
|
2766 |
c->vector_fmul_reverse = vector_fmul_reverse_3dnow2; |
2767 |
#if HAVE_6REGS
|
2768 |
c->vector_fmul_window = vector_fmul_window_3dnow2; |
2769 |
#endif
|
2770 |
} |
2771 |
if(mm_flags & AV_CPU_FLAG_MMX2){
|
2772 |
#if HAVE_YASM
|
2773 |
c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2; |
2774 |
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2; |
2775 |
#endif
|
2776 |
} |
2777 |
if(mm_flags & AV_CPU_FLAG_SSE){
|
2778 |
c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; |
2779 |
c->ac3_downmix = ac3_downmix_sse; |
2780 |
c->vector_fmul = vector_fmul_sse; |
2781 |
c->vector_fmul_reverse = vector_fmul_reverse_sse; |
2782 |
c->vector_fmul_add = vector_fmul_add_sse; |
2783 |
#if HAVE_6REGS
|
2784 |
c->vector_fmul_window = vector_fmul_window_sse; |
2785 |
#endif
|
2786 |
c->vector_clipf = vector_clipf_sse; |
2787 |
#if HAVE_YASM
|
2788 |
c->scalarproduct_float = ff_scalarproduct_float_sse; |
2789 |
#endif
|
2790 |
} |
2791 |
if(mm_flags & AV_CPU_FLAG_3DNOW)
|
2792 |
c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
|
2793 |
if(mm_flags & AV_CPU_FLAG_SSE2){
|
2794 |
#if HAVE_YASM
|
2795 |
c->scalarproduct_int16 = ff_scalarproduct_int16_sse2; |
2796 |
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2; |
2797 |
|
2798 |
c->emulated_edge_mc = emulated_edge_mc_sse; |
2799 |
c->gmc= gmc_sse; |
2800 |
#endif
|
2801 |
} |
2802 |
if((mm_flags & AV_CPU_FLAG_SSSE3) && !(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW)) && HAVE_YASM) // cachesplit |
2803 |
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3; |
2804 |
} |
2805 |
|
2806 |
if (CONFIG_ENCODERS)
|
2807 |
dsputilenc_init_mmx(c, avctx); |
2808 |
|
2809 |
#if 0
|
2810 |
// for speed testing
|
2811 |
get_pixels = just_return;
|
2812 |
put_pixels_clamped = just_return;
|
2813 |
add_pixels_clamped = just_return;
|
2814 |
|
2815 |
pix_abs16x16 = just_return;
|
2816 |
pix_abs16x16_x2 = just_return;
|
2817 |
pix_abs16x16_y2 = just_return;
|
2818 |
pix_abs16x16_xy2 = just_return;
|
2819 |
|
2820 |
put_pixels_tab[0] = just_return;
|
2821 |
put_pixels_tab[1] = just_return;
|
2822 |
put_pixels_tab[2] = just_return;
|
2823 |
put_pixels_tab[3] = just_return;
|
2824 |
|
2825 |
put_no_rnd_pixels_tab[0] = just_return;
|
2826 |
put_no_rnd_pixels_tab[1] = just_return;
|
2827 |
put_no_rnd_pixels_tab[2] = just_return;
|
2828 |
put_no_rnd_pixels_tab[3] = just_return;
|
2829 |
|
2830 |
avg_pixels_tab[0] = just_return;
|
2831 |
avg_pixels_tab[1] = just_return;
|
2832 |
avg_pixels_tab[2] = just_return;
|
2833 |
avg_pixels_tab[3] = just_return;
|
2834 |
|
2835 |
avg_no_rnd_pixels_tab[0] = just_return;
|
2836 |
avg_no_rnd_pixels_tab[1] = just_return;
|
2837 |
avg_no_rnd_pixels_tab[2] = just_return;
|
2838 |
avg_no_rnd_pixels_tab[3] = just_return;
|
2839 |
|
2840 |
//av_fdct = just_return;
|
2841 |
//ff_idct = just_return;
|
2842 |
#endif
|
2843 |
} |