|
1 |
/* ffmpeg/libavcodec/ppc/fdct_altivec.c, this file is part of the
|
|
2 |
* AltiVec optimized library for the FFMPEG Multimedia System
|
|
3 |
* Copyright (C) 2003 James Klicman <james@klicman.org>
|
|
4 |
*
|
|
5 |
* This library is free software; you can redistribute it and/or
|
|
6 |
* modify it under the terms of the GNU Lesser General Public
|
|
7 |
* License as published by the Free Software Foundation; either
|
|
8 |
* version 2.1 of the License, or (at your option) any later version.
|
|
9 |
*
|
|
10 |
* This library is distributed in the hope that it will be useful,
|
|
11 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
12 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
13 |
* Lesser General Public License for more details.
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU Lesser General Public
|
|
16 |
* License along with this library; if not, write to the Free Software
|
|
17 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
18 |
*/
|
|
19 |
|
|
20 |
|
|
21 |
#include "../common.h"
|
|
22 |
#include "../dsputil.h"
|
|
23 |
#include "dsputil_altivec.h"
|
|
24 |
#include "gcc_fixes.h"
|
|
25 |
|
|
26 |
|
|
27 |
#define vs16(v) ((vector signed short)(v))
|
|
28 |
#define vs32(v) ((vector signed int)(v))
|
|
29 |
#define vu8(v) ((vector unsigned char)(v))
|
|
30 |
#define vu16(v) ((vector unsigned short)(v))
|
|
31 |
#define vu32(v) ((vector unsigned int)(v))
|
|
32 |
|
|
33 |
|
|
34 |
#define C1 0.98078525066375732421875000 /* cos(1*PI/16) */
|
|
35 |
#define C2 0.92387950420379638671875000 /* cos(2*PI/16) */
|
|
36 |
#define C3 0.83146959543228149414062500 /* cos(3*PI/16) */
|
|
37 |
#define C4 0.70710676908493041992187500 /* cos(4*PI/16) */
|
|
38 |
#define C5 0.55557024478912353515625000 /* cos(5*PI/16) */
|
|
39 |
#define C6 0.38268342614173889160156250 /* cos(6*PI/16) */
|
|
40 |
#define C7 0.19509032368659973144531250 /* cos(7*PI/16) */
|
|
41 |
#define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */
|
|
42 |
|
|
43 |
|
|
44 |
#define W0 -(2 * C2)
|
|
45 |
#define W1 (2 * C6)
|
|
46 |
#define W2 (SQRT_2 * C6)
|
|
47 |
#define W3 (SQRT_2 * C3)
|
|
48 |
#define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
|
|
49 |
#define W5 (SQRT_2 * ( C1 + C3 - C5 + C7))
|
|
50 |
#define W6 (SQRT_2 * ( C1 + C3 + C5 - C7))
|
|
51 |
#define W7 (SQRT_2 * ( C1 + C3 - C5 - C7))
|
|
52 |
#define W8 (SQRT_2 * ( C7 - C3))
|
|
53 |
#define W9 (SQRT_2 * (-C1 - C3))
|
|
54 |
#define WA (SQRT_2 * (-C3 - C5))
|
|
55 |
#define WB (SQRT_2 * ( C5 - C3))
|
|
56 |
|
|
57 |
|
|
58 |
static vector float fdctconsts[3] = {
|
|
59 |
(vector float)AVV( W0, W1, W2, W3 ),
|
|
60 |
(vector float)AVV( W4, W5, W6, W7 ),
|
|
61 |
(vector float)AVV( W8, W9, WA, WB )
|
|
62 |
};
|
|
63 |
|
|
64 |
#define LD_W0 vec_splat(cnsts0, 0)
|
|
65 |
#define LD_W1 vec_splat(cnsts0, 1)
|
|
66 |
#define LD_W2 vec_splat(cnsts0, 2)
|
|
67 |
#define LD_W3 vec_splat(cnsts0, 3)
|
|
68 |
#define LD_W4 vec_splat(cnsts1, 0)
|
|
69 |
#define LD_W5 vec_splat(cnsts1, 1)
|
|
70 |
#define LD_W6 vec_splat(cnsts1, 2)
|
|
71 |
#define LD_W7 vec_splat(cnsts1, 3)
|
|
72 |
#define LD_W8 vec_splat(cnsts2, 0)
|
|
73 |
#define LD_W9 vec_splat(cnsts2, 1)
|
|
74 |
#define LD_WA vec_splat(cnsts2, 2)
|
|
75 |
#define LD_WB vec_splat(cnsts2, 3)
|
|
76 |
|
|
77 |
|
|
78 |
#define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
|
|
79 |
x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
|
|
80 |
x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
|
|
81 |
x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
|
|
82 |
x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
|
|
83 |
x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
|
|
84 |
x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
|
|
85 |
x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
|
|
86 |
x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
|
|
87 |
\
|
|
88 |
b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
|
|
89 |
b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
|
|
90 |
b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
|
|
91 |
b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
|
|
92 |
\
|
|
93 |
b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
|
|
94 |
b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
|
|
95 |
b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
|
|
96 |
cnst = LD_W2; \
|
|
97 |
b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
|
|
98 |
cnst = LD_W1; \
|
|
99 |
b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
|
|
100 |
cnst = LD_W0; \
|
|
101 |
b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
|
|
102 |
\
|
|
103 |
x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
|
|
104 |
x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
|
|
105 |
x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
|
|
106 |
x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
|
|
107 |
x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
|
|
108 |
cnst = LD_W3; \
|
|
109 |
x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
|
|
110 |
\
|
|
111 |
cnst = LD_W8; \
|
|
112 |
x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
|
|
113 |
cnst = LD_W9; \
|
|
114 |
x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
|
|
115 |
cnst = LD_WA; \
|
|
116 |
x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
|
|
117 |
cnst = LD_WB; \
|
|
118 |
x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
|
|
119 |
\
|
|
120 |
cnst = LD_W4; \
|
|
121 |
b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
|
|
122 |
cnst = LD_W5; \
|
|
123 |
b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
|
|
124 |
cnst = LD_W6; \
|
|
125 |
b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
|
|
126 |
cnst = LD_W7; \
|
|
127 |
b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
|
|
128 |
\
|
|
129 |
b7 = vec_add(b7, x2); /* b7 = b7 + x2; */ \
|
|
130 |
b5 = vec_add(b5, x3); /* b5 = b5 + x3; */ \
|
|
131 |
b3 = vec_add(b3, x2); /* b3 = b3 + x2; */ \
|
|
132 |
b1 = vec_add(b1, x3); /* b1 = b1 + x3; */ \
|
|
133 |
/* }}} */
|
|
134 |
|
|
135 |
#define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
|
|
136 |
x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
|
|
137 |
x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
|
|
138 |
x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
|
|
139 |
x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
|
|
140 |
x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
|
|
141 |
x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
|
|
142 |
x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
|
|
143 |
x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
|
|
144 |
\
|
|
145 |
b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
|
|
146 |
b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
|
|
147 |
b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
|
|
148 |
b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
|
|
149 |
\
|
|
150 |
b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
|
|
151 |
b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
|
|
152 |
b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
|
|
153 |
cnst = LD_W2; \
|
|
154 |
b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
|
|
155 |
cnst = LD_W1; \
|
|
156 |
b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
|
|
157 |
cnst = LD_W0; \
|
|
158 |
b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
|
|
159 |
\
|
|
160 |
x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
|
|
161 |
x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
|
|
162 |
x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
|
|
163 |
x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
|
|
164 |
x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
|
|
165 |
cnst = LD_W3; \
|
|
166 |
x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
|
|
167 |
\
|
|
168 |
cnst = LD_W8; \
|
|
169 |
x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
|
|
170 |
cnst = LD_W9; \
|
|
171 |
x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
|
|
172 |
cnst = LD_WA; \
|
|
173 |
x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
|
|
174 |
cnst = LD_WB; \
|
|
175 |
x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
|
|
176 |
\
|
|
177 |
cnst = LD_W4; \
|
|
178 |
b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
|
|
179 |
cnst = LD_W5; \
|
|
180 |
b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
|
|
181 |
cnst = LD_W6; \
|
|
182 |
b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
|
|
183 |
cnst = LD_W7; \
|
|
184 |
b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
|
|
185 |
\
|
|
186 |
b7 = vec_add(b7, x2); /* b7 += x2; */ \
|
|
187 |
b5 = vec_add(b5, x3); /* b5 += x3; */ \
|
|
188 |
b3 = vec_add(b3, x2); /* b3 += x2; */ \
|
|
189 |
b1 = vec_add(b1, x3); /* b1 += x3; */ \
|
|
190 |
/* }}} */
|
|
191 |
|
|
192 |
|
|
193 |
|
|
194 |
/* two dimensional discrete cosine transform */
|
|
195 |
|
|
196 |
void fdct_altivec(int16_t *block)
|
|
197 |
{
|
|
198 |
POWERPC_PERF_DECLARE(altivec_fdct, 1);
|
|
199 |
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
|
|
200 |
POWERPC_PERF_START_COUNT(altivec_fdct, 1);
|
|
201 |
void ff_jpeg_fdct_islow(int16_t *block);
|
|
202 |
ff_jpeg_fdct_islow(block);
|
|
203 |
POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
|
|
204 |
#else /* ALTIVEC_USE_REFERENCE_C_CODE */
|
|
205 |
vector signed short *bp;
|
|
206 |
vector float *cp;
|
|
207 |
vector float b00, b10, b20, b30, b40, b50, b60, b70;
|
|
208 |
vector float b01, b11, b21, b31, b41, b51, b61, b71;
|
|
209 |
vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
|
|
210 |
vector float x0, x1, x2, x3, x4, x5, x6, x7, x8;
|
|
211 |
|
|
212 |
POWERPC_PERF_START_COUNT(altivec_fdct, 1);
|
|
213 |
|
|
214 |
|
|
215 |
/* setup constants {{{ */
|
|
216 |
/* mzero = -0.0 */
|
|
217 |
vu32(mzero) = vec_splat_u32(-1);
|
|
218 |
vu32(mzero) = vec_sl(vu32(mzero), vu32(mzero));
|
|
219 |
cp = fdctconsts;
|
|
220 |
cnsts0 = vec_ld(0, cp); cp++;
|
|
221 |
cnsts1 = vec_ld(0, cp); cp++;
|
|
222 |
cnsts2 = vec_ld(0, cp);
|
|
223 |
/* }}} */
|
|
224 |
|
|
225 |
|
|
226 |
/* 8x8 matrix transpose (vector short[8]) {{{ */
|
|
227 |
#define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))
|
|
228 |
|
|
229 |
bp = (vector signed short*)block;
|
|
230 |
vs16(b00) = vec_ld(0, bp);
|
|
231 |
vs16(b40) = vec_ld(16*4, bp);
|
|
232 |
vs16(b01) = MERGE_S16(h, b00, b40);
|
|
233 |
vs16(b11) = MERGE_S16(l, b00, b40);
|
|
234 |
bp++;
|
|
235 |
vs16(b10) = vec_ld(0, bp);
|
|
236 |
vs16(b50) = vec_ld(16*4, bp);
|
|
237 |
vs16(b21) = MERGE_S16(h, b10, b50);
|
|
238 |
vs16(b31) = MERGE_S16(l, b10, b50);
|
|
239 |
bp++;
|
|
240 |
vs16(b20) = vec_ld(0, bp);
|
|
241 |
vs16(b60) = vec_ld(16*4, bp);
|
|
242 |
vs16(b41) = MERGE_S16(h, b20, b60);
|
|
243 |
vs16(b51) = MERGE_S16(l, b20, b60);
|
|
244 |
bp++;
|
|
245 |
vs16(b30) = vec_ld(0, bp);
|
|
246 |
vs16(b70) = vec_ld(16*4, bp);
|
|
247 |
vs16(b61) = MERGE_S16(h, b30, b70);
|
|
248 |
vs16(b71) = MERGE_S16(l, b30, b70);
|
|
249 |
|
|
250 |
vs16(x0) = MERGE_S16(h, b01, b41);
|
|
251 |
vs16(x1) = MERGE_S16(l, b01, b41);
|
|
252 |
vs16(x2) = MERGE_S16(h, b11, b51);
|
|
253 |
vs16(x3) = MERGE_S16(l, b11, b51);
|
|
254 |
vs16(x4) = MERGE_S16(h, b21, b61);
|
|
255 |
vs16(x5) = MERGE_S16(l, b21, b61);
|
|
256 |
vs16(x6) = MERGE_S16(h, b31, b71);
|
|
257 |
vs16(x7) = MERGE_S16(l, b31, b71);
|
|
258 |
|
|
259 |
vs16(b00) = MERGE_S16(h, x0, x4);
|
|
260 |
vs16(b10) = MERGE_S16(l, x0, x4);
|
|
261 |
vs16(b20) = MERGE_S16(h, x1, x5);
|
|
262 |
vs16(b30) = MERGE_S16(l, x1, x5);
|
|
263 |
vs16(b40) = MERGE_S16(h, x2, x6);
|
|
264 |
vs16(b50) = MERGE_S16(l, x2, x6);
|
|
265 |
vs16(b60) = MERGE_S16(h, x3, x7);
|
|
266 |
vs16(b70) = MERGE_S16(l, x3, x7);
|
|
267 |
|
|
268 |
#undef MERGE_S16
|
|
269 |
/* }}} */
|
|
270 |
|
|
271 |
|
|
272 |
/* Some of the initial calculations can be done as vector short before
|
|
273 |
* conversion to vector float. The following code section takes advantage
|
|
274 |
* of this.
|
|
275 |
*/
|
|
276 |
#if 1
|
|
277 |
/* fdct rows {{{ */
|
|
278 |
vs16(x0) = vec_add(vs16(b00), vs16(b70));
|
|
279 |
vs16(x7) = vec_sub(vs16(b00), vs16(b70));
|
|
280 |
vs16(x1) = vec_add(vs16(b10), vs16(b60));
|
|
281 |
vs16(x6) = vec_sub(vs16(b10), vs16(b60));
|
|
282 |
vs16(x2) = vec_add(vs16(b20), vs16(b50));
|
|
283 |
vs16(x5) = vec_sub(vs16(b20), vs16(b50));
|
|
284 |
vs16(x3) = vec_add(vs16(b30), vs16(b40));
|
|
285 |
vs16(x4) = vec_sub(vs16(b30), vs16(b40));
|
|
286 |
|
|
287 |
vs16(b70) = vec_add(vs16(x0), vs16(x3));
|
|
288 |
vs16(b10) = vec_add(vs16(x1), vs16(x2));
|
|
289 |
|
|
290 |
vs16(b00) = vec_add(vs16(b70), vs16(b10));
|
|
291 |
vs16(b40) = vec_sub(vs16(b70), vs16(b10));
|
|
292 |
|
|
293 |
#define CTF0(n) \
|
|
294 |
vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \
|
|
295 |
vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \
|
|
296 |
b##n##1 = vec_ctf(vs32(b##n##1), 0); \
|
|
297 |
b##n##0 = vec_ctf(vs32(b##n##0), 0);
|
|
298 |
|
|
299 |
CTF0(0);
|
|
300 |
CTF0(4);
|
|
301 |
|
|
302 |
vs16(b20) = vec_sub(vs16(x0), vs16(x3));
|
|
303 |
vs16(b60) = vec_sub(vs16(x1), vs16(x2));
|
|
304 |
|
|
305 |
CTF0(2);
|
|
306 |
CTF0(6);
|
|
307 |
|
|
308 |
#undef CTF0
|
|
309 |
|
|
310 |
x0 = vec_add(b60, b20);
|
|
311 |
x1 = vec_add(b61, b21);
|
|
312 |
|
|
313 |
cnst = LD_W2;
|
|
314 |
x0 = vec_madd(cnst, x0, mzero);
|
|
315 |
x1 = vec_madd(cnst, x1, mzero);
|
|
316 |
cnst = LD_W1;
|
|
317 |
b20 = vec_madd(cnst, b20, x0);
|
|
318 |
b21 = vec_madd(cnst, b21, x1);
|
|
319 |
cnst = LD_W0;
|
|
320 |
b60 = vec_madd(cnst, b60, x0);
|
|
321 |
b61 = vec_madd(cnst, b61, x1);
|
|
322 |
|
|
323 |
#define CTFX(x,b) \
|
|
324 |
vs32(b##0) = vec_unpackh(vs16(x)); \
|
|
325 |
vs32(b##1) = vec_unpackl(vs16(x)); \
|
|
326 |
b##0 = vec_ctf(vs32(b##0), 0); \
|
|
327 |
b##1 = vec_ctf(vs32(b##1), 0); \
|
|
328 |
|
|
329 |
CTFX(x4, b7);
|
|
330 |
CTFX(x5, b5);
|
|
331 |
CTFX(x6, b3);
|
|
332 |
CTFX(x7, b1);
|
|
333 |
|
|
334 |
#undef CTFX
|
|
335 |
|
|
336 |
|
|
337 |
x0 = vec_add(b70, b10);
|
|
338 |
x1 = vec_add(b50, b30);
|
|
339 |
x2 = vec_add(b70, b30);
|
|
340 |
x3 = vec_add(b50, b10);
|
|
341 |
x8 = vec_add(x2, x3);
|
|
342 |
cnst = LD_W3;
|
|
343 |
x8 = vec_madd(cnst, x8, mzero);
|
|
344 |
|
|
345 |
cnst = LD_W8;
|
|
346 |
x0 = vec_madd(cnst, x0, mzero);
|
|
347 |
cnst = LD_W9;
|
|
348 |
x1 = vec_madd(cnst, x1, mzero);
|
|
349 |
cnst = LD_WA;
|
|
350 |
x2 = vec_madd(cnst, x2, x8);
|
|
351 |
cnst = LD_WB;
|
|
352 |
x3 = vec_madd(cnst, x3, x8);
|
|
353 |
|
|
354 |
cnst = LD_W4;
|
|
355 |
b70 = vec_madd(cnst, b70, x0);
|
|
356 |
cnst = LD_W5;
|
|
357 |
b50 = vec_madd(cnst, b50, x1);
|
|
358 |
cnst = LD_W6;
|
|
359 |
b30 = vec_madd(cnst, b30, x1);
|
|
360 |
cnst = LD_W7;
|
|
361 |
b10 = vec_madd(cnst, b10, x0);
|
|
362 |
|
|
363 |
b70 = vec_add(b70, x2);
|
|
364 |
b50 = vec_add(b50, x3);
|
|
365 |
b30 = vec_add(b30, x2);
|
|
366 |
b10 = vec_add(b10, x3);
|
|
367 |
|
|
368 |
|
|
369 |
x0 = vec_add(b71, b11);
|
|
370 |
x1 = vec_add(b51, b31);
|
|
371 |
x2 = vec_add(b71, b31);
|
|
372 |
x3 = vec_add(b51, b11);
|
|
373 |
x8 = vec_add(x2, x3);
|
|
374 |
cnst = LD_W3;
|
|
375 |
x8 = vec_madd(cnst, x8, mzero);
|
|
376 |
|
|
377 |
cnst = LD_W8;
|
|
378 |
x0 = vec_madd(cnst, x0, mzero);
|
|
379 |
cnst = LD_W9;
|
|
380 |
x1 = vec_madd(cnst, x1, mzero);
|
|
381 |
cnst = LD_WA;
|
|
382 |
x2 = vec_madd(cnst, x2, x8);
|
|
383 |
cnst = LD_WB;
|
|
384 |
x3 = vec_madd(cnst, x3, x8);
|
|
385 |
|
|
386 |
cnst = LD_W4;
|
|
387 |
b71 = vec_madd(cnst, b71, x0);
|
|
388 |
cnst = LD_W5;
|
|
389 |
b51 = vec_madd(cnst, b51, x1);
|
|
390 |
cnst = LD_W6;
|
|
391 |
b31 = vec_madd(cnst, b31, x1);
|
|
392 |
cnst = LD_W7;
|
|
393 |
b11 = vec_madd(cnst, b11, x0);
|
|
394 |
|
|
395 |
b71 = vec_add(b71, x2);
|
|
396 |
b51 = vec_add(b51, x3);
|
|
397 |
b31 = vec_add(b31, x2);
|
|
398 |
b11 = vec_add(b11, x3);
|
|
399 |
/* }}} */
|
|
400 |
#else
|
|
401 |
/* convert to float {{{ */
|
|
402 |
#define CTF(n) \
|
|
403 |
vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \
|
|
404 |
vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \
|
|
405 |
b##n##1 = vec_ctf(vs32(b##n##1), 0); \
|
|
406 |
b##n##0 = vec_ctf(vs32(b##n##0), 0); \
|
|
407 |
|
|
408 |
CTF(0);
|
|
409 |
CTF(1);
|
|
410 |
CTF(2);
|
|
411 |
CTF(3);
|
|
412 |
CTF(4);
|
|
413 |
CTF(5);
|
|
414 |
CTF(6);
|
|
415 |
CTF(7);
|
|
416 |
|
|
417 |
#undef CTF
|
|
418 |
/* }}} */
|
|
419 |
|
|
420 |
FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70);
|
|
421 |
FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71);
|
|
422 |
#endif
|
|
423 |
|
|
424 |
|
|
425 |
/* 8x8 matrix transpose (vector float[8][2]) {{{ */
|
|
426 |
x0 = vec_mergel(b00, b20);
|
|
427 |
x1 = vec_mergeh(b00, b20);
|
|
428 |
x2 = vec_mergel(b10, b30);
|
|
429 |
x3 = vec_mergeh(b10, b30);
|
|
430 |
|
|
431 |
b00 = vec_mergeh(x1, x3);
|
|
432 |
b10 = vec_mergel(x1, x3);
|
|
433 |
b20 = vec_mergeh(x0, x2);
|
|
434 |
b30 = vec_mergel(x0, x2);
|
|
435 |
|
|
436 |
x4 = vec_mergel(b41, b61);
|
|
437 |
x5 = vec_mergeh(b41, b61);
|
|
438 |
x6 = vec_mergel(b51, b71);
|
|
439 |
x7 = vec_mergeh(b51, b71);
|
|
440 |
|
|
441 |
b41 = vec_mergeh(x5, x7);
|
|
442 |
b51 = vec_mergel(x5, x7);
|
|
443 |
b61 = vec_mergeh(x4, x6);
|
|
444 |
b71 = vec_mergel(x4, x6);
|
|
445 |
|
|
446 |
x0 = vec_mergel(b01, b21);
|
|
447 |
x1 = vec_mergeh(b01, b21);
|
|
448 |
x2 = vec_mergel(b11, b31);
|
|
449 |
x3 = vec_mergeh(b11, b31);
|
|
450 |
|
|
451 |
x4 = vec_mergel(b40, b60);
|
|
452 |
x5 = vec_mergeh(b40, b60);
|
|
453 |
x6 = vec_mergel(b50, b70);
|
|
454 |
x7 = vec_mergeh(b50, b70);
|
|
455 |
|
|
456 |
b40 = vec_mergeh(x1, x3);
|
|
457 |
b50 = vec_mergel(x1, x3);
|
|
458 |
b60 = vec_mergeh(x0, x2);
|
|
459 |
b70 = vec_mergel(x0, x2);
|
|
460 |
|
|
461 |
b01 = vec_mergeh(x5, x7);
|
|
462 |
b11 = vec_mergel(x5, x7);
|
|
463 |
b21 = vec_mergeh(x4, x6);
|
|
464 |
b31 = vec_mergel(x4, x6);
|
|
465 |
/* }}} */
|
|
466 |
|
|
467 |
|
|
468 |
FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70);
|
|
469 |
FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71);
|
|
470 |
|
|
471 |
|
|
472 |
/* round, convert back to short {{{ */
|
|
473 |
#define CTS(n) \
|
|
474 |
b##n##0 = vec_round(b##n##0); \
|
|
475 |
b##n##1 = vec_round(b##n##1); \
|
|
476 |
vs32(b##n##0) = vec_cts(b##n##0, 0); \
|
|
477 |
vs32(b##n##1) = vec_cts(b##n##1, 0); \
|
|
478 |
vs16(b##n##0) = vec_pack(vs32(b##n##0), vs32(b##n##1)); \
|
|
479 |
vec_st(vs16(b##n##0), 0, bp);
|
|
480 |
|
|
481 |
bp = (vector signed short*)block;
|
|
482 |
CTS(0); bp++;
|
|
483 |
CTS(1); bp++;
|
|
484 |
CTS(2); bp++;
|
|
485 |
CTS(3); bp++;
|
|
486 |
CTS(4); bp++;
|
|
487 |
CTS(5); bp++;
|
|
488 |
CTS(6); bp++;
|
|
489 |
CTS(7);
|
|
490 |
|
|
491 |
#undef CTS
|
|
492 |
/* }}} */
|
|
493 |
|
|
494 |
POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
|
|
495 |
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
|
|
496 |
}
|
|
497 |
|
|
498 |
/* vim:set foldmethod=marker foldlevel=0: */
|