Statistics
| Branch: | Revision:

ffmpeg / libavcodec / ppc / mpegvideo_altivec.c @ cb231c48

History | View | Annotate | Download (24.5 KB)

1 05c4072b Michael Niedermayer
/*
2
 * Copyright (c) 2002 Dieter Shirley
3
 *
4
 * This library is free software; you can redistribute it and/or
5
 * modify it under the terms of the GNU Lesser General Public
6
 * License as published by the Free Software Foundation; either
7
 * version 2 of the License, or (at your option) any later version.
8
 *
9
 * This library is distributed in the hope that it will be useful,
10
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
 * Lesser General Public License for more details.
13
 *
14
 * You should have received a copy of the GNU Lesser General Public
15
 * License along with this library; if not, write to the Free Software
16
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17
 */
18 a9a07762 Michael Niedermayer
19 05c4072b Michael Niedermayer
#include <stdlib.h>
20
#include <stdio.h>
21
#include "../dsputil.h"
22
#include "../mpegvideo.h"
23 a9a07762 Michael Niedermayer
24
#include "gcc_fixes.h"
25
 
26 db40a39a Michael Niedermayer
#include "dsputil_altivec.h"
27 05c4072b Michael Niedermayer
28
// Swaps two variables (used for altivec registers)
29
#define SWAP(a,b) \
30
do { \
31
    __typeof__(a) swap_temp=a; \
32
    a=b; \
33
    b=swap_temp; \
34
} while (0)
35
36
// transposes a matrix consisting of four vectors with four elements each
37
#define TRANSPOSE4(a,b,c,d) \
38
do { \
39
  __typeof__(a) _trans_ach = vec_mergeh(a, c); \
40
  __typeof__(a) _trans_acl = vec_mergel(a, c); \
41
  __typeof__(a) _trans_bdh = vec_mergeh(b, d); \
42
  __typeof__(a) _trans_bdl = vec_mergel(b, d); \
43
 \
44
  a = vec_mergeh(_trans_ach, _trans_bdh); \
45
  b = vec_mergel(_trans_ach, _trans_bdh); \
46
  c = vec_mergeh(_trans_acl, _trans_bdl); \
47
  d = vec_mergel(_trans_acl, _trans_bdl); \
48
} while (0)
49
50
#define TRANSPOSE8(a,b,c,d,e,f,g,h) \
51
do { \
52
    __typeof__(a)  _A1, _B1, _C1, _D1, _E1, _F1, _G1, _H1; \
53
    __typeof__(a)  _A2, _B2, _C2, _D2, _E2, _F2, _G2, _H2; \
54
 \
55
    _A1 = vec_mergeh (a, e); \
56
    _B1 = vec_mergel (a, e); \
57
    _C1 = vec_mergeh (b, f); \
58
    _D1 = vec_mergel (b, f); \
59
    _E1 = vec_mergeh (c, g); \
60
    _F1 = vec_mergel (c, g); \
61
    _G1 = vec_mergeh (d, h); \
62
    _H1 = vec_mergel (d, h); \
63
 \
64
    _A2 = vec_mergeh (_A1, _E1); \
65
    _B2 = vec_mergel (_A1, _E1); \
66
    _C2 = vec_mergeh (_B1, _F1); \
67
    _D2 = vec_mergel (_B1, _F1); \
68
    _E2 = vec_mergeh (_C1, _G1); \
69
    _F2 = vec_mergel (_C1, _G1); \
70
    _G2 = vec_mergeh (_D1, _H1); \
71
    _H2 = vec_mergel (_D1, _H1); \
72
 \
73
    a = vec_mergeh (_A2, _E2); \
74
    b = vec_mergel (_A2, _E2); \
75
    c = vec_mergeh (_B2, _F2); \
76
    d = vec_mergel (_B2, _F2); \
77
    e = vec_mergeh (_C2, _G2); \
78
    f = vec_mergel (_C2, _G2); \
79
    g = vec_mergeh (_D2, _H2); \
80
    h = vec_mergel (_D2, _H2); \
81
} while (0)
82
83
84
// Loads a four-byte value (int or float) from the target address
85
// into every element in the target vector.  Only works if the
86
// target address is four-byte aligned (which should be always).
87
#define LOAD4(vec, address) \
88
{ \
89
    __typeof__(vec)* _load_addr = (__typeof__(vec)*)(address); \
90
    vector unsigned char _perm_vec = vec_lvsl(0,(address)); \
91
    vec = vec_ld(0, _load_addr); \
92
    vec = vec_perm(vec, vec, _perm_vec); \
93
    vec = vec_splat(vec, 0); \
94
}
95
96 3b991c54 Romain Dolbeau
97
#ifdef CONFIG_DARWIN
98
#define FOUROF(a) (a)
99
#else
100
// slower, for dumb non-apple GCC
101
#define FOUROF(a) {a,a,a,a}
102
#endif
103 05c4072b Michael Niedermayer
int dct_quantize_altivec(MpegEncContext* s, 
104
                        DCTELEM* data, int n,
105
                        int qscale, int* overflow)
106
{
107
    int lastNonZero;
108
    vector float row0, row1, row2, row3, row4, row5, row6, row7;
109
    vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7;
110 3b991c54 Romain Dolbeau
    const vector float zero = (const vector float)FOUROF(0.);
111 05c4072b Michael Niedermayer
112
    // Load the data into the row/alt vectors
113
    {
114
        vector signed short data0, data1, data2, data3, data4, data5, data6, data7;
115
116
        data0 = vec_ld(0, data);
117
        data1 = vec_ld(16, data);
118
        data2 = vec_ld(32, data);
119
        data3 = vec_ld(48, data);
120
        data4 = vec_ld(64, data);
121
        data5 = vec_ld(80, data);
122
        data6 = vec_ld(96, data);
123
        data7 = vec_ld(112, data);
124
125
        // Transpose the data before we start
126
        TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7);
127
128
        // load the data into floating point vectors.  We load
129
        // the high half of each row into the main row vectors
130
        // and the low half into the alt vectors.
131
        row0 = vec_ctf(vec_unpackh(data0), 0);
132
        alt0 = vec_ctf(vec_unpackl(data0), 0);
133
        row1 = vec_ctf(vec_unpackh(data1), 0);
134
        alt1 = vec_ctf(vec_unpackl(data1), 0);
135
        row2 = vec_ctf(vec_unpackh(data2), 0);
136
        alt2 = vec_ctf(vec_unpackl(data2), 0);
137
        row3 = vec_ctf(vec_unpackh(data3), 0);
138
        alt3 = vec_ctf(vec_unpackl(data3), 0);
139
        row4 = vec_ctf(vec_unpackh(data4), 0);
140
        alt4 = vec_ctf(vec_unpackl(data4), 0);
141
        row5 = vec_ctf(vec_unpackh(data5), 0);
142
        alt5 = vec_ctf(vec_unpackl(data5), 0);
143
        row6 = vec_ctf(vec_unpackh(data6), 0);
144
        alt6 = vec_ctf(vec_unpackl(data6), 0);
145
        row7 = vec_ctf(vec_unpackh(data7), 0);
146
        alt7 = vec_ctf(vec_unpackl(data7), 0);
147
    }
148
149
    // The following block could exist as a separate an altivec dct
150
                // function.  However, if we put it inline, the DCT data can remain
151
                // in the vector local variables, as floats, which we'll use during the
152
                // quantize step...
153
    {
154 3b991c54 Romain Dolbeau
        const vector float vec_0_298631336 = (vector float)FOUROF(0.298631336f);
155
        const vector float vec_0_390180644 = (vector float)FOUROF(-0.390180644f);
156
        const vector float vec_0_541196100 = (vector float)FOUROF(0.541196100f);
157
        const vector float vec_0_765366865 = (vector float)FOUROF(0.765366865f);
158
        const vector float vec_0_899976223 = (vector float)FOUROF(-0.899976223f);
159
        const vector float vec_1_175875602 = (vector float)FOUROF(1.175875602f);
160
        const vector float vec_1_501321110 = (vector float)FOUROF(1.501321110f);
161
        const vector float vec_1_847759065 = (vector float)FOUROF(-1.847759065f);
162
        const vector float vec_1_961570560 = (vector float)FOUROF(-1.961570560f);
163
        const vector float vec_2_053119869 = (vector float)FOUROF(2.053119869f);
164
        const vector float vec_2_562915447 = (vector float)FOUROF(-2.562915447f);
165
        const vector float vec_3_072711026 = (vector float)FOUROF(3.072711026f);
166 05c4072b Michael Niedermayer
167
168
        int whichPass, whichHalf;
169
170
        for(whichPass = 1; whichPass<=2; whichPass++)
171
        {
172
            for(whichHalf = 1; whichHalf<=2; whichHalf++)
173
            {
174
                vector float tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
175
                vector float tmp10, tmp11, tmp12, tmp13;
176
                vector float z1, z2, z3, z4, z5;
177
178
                tmp0 = vec_add(row0, row7); // tmp0 = dataptr[0] + dataptr[7];
179
                tmp7 = vec_sub(row0, row7); // tmp7 = dataptr[0] - dataptr[7];
180
                tmp3 = vec_add(row3, row4); // tmp3 = dataptr[3] + dataptr[4];
181
                tmp4 = vec_sub(row3, row4); // tmp4 = dataptr[3] - dataptr[4];
182
                tmp1 = vec_add(row1, row6); // tmp1 = dataptr[1] + dataptr[6];
183
                tmp6 = vec_sub(row1, row6); // tmp6 = dataptr[1] - dataptr[6];
184
                tmp2 = vec_add(row2, row5); // tmp2 = dataptr[2] + dataptr[5];
185
                tmp5 = vec_sub(row2, row5); // tmp5 = dataptr[2] - dataptr[5];
186
187
                tmp10 = vec_add(tmp0, tmp3); // tmp10 = tmp0 + tmp3;
188
                tmp13 = vec_sub(tmp0, tmp3); // tmp13 = tmp0 - tmp3;
189
                tmp11 = vec_add(tmp1, tmp2); // tmp11 = tmp1 + tmp2;
190
                tmp12 = vec_sub(tmp1, tmp2); // tmp12 = tmp1 - tmp2;
191
192
193
                // dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS);
194
                row0 = vec_add(tmp10, tmp11);
195
196
                // dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS);
197
                row4 = vec_sub(tmp10, tmp11);
198
199
200
                // z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100);
201
                z1 = vec_madd(vec_add(tmp12, tmp13), vec_0_541196100, (vector float)zero);
202
203
                // dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865),
204
                //                   CONST_BITS-PASS1_BITS);
205
                row2 = vec_madd(tmp13, vec_0_765366865, z1);
206
207
                // dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065),
208
                //                   CONST_BITS-PASS1_BITS);
209
                row6 = vec_madd(tmp12, vec_1_847759065, z1);
210
211
                z1 = vec_add(tmp4, tmp7); // z1 = tmp4 + tmp7;
212
                z2 = vec_add(tmp5, tmp6); // z2 = tmp5 + tmp6;
213
                z3 = vec_add(tmp4, tmp6); // z3 = tmp4 + tmp6;
214
                z4 = vec_add(tmp5, tmp7); // z4 = tmp5 + tmp7;
215
216
                // z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
217
                z5 = vec_madd(vec_add(z3, z4), vec_1_175875602, (vector float)zero);
218
219
                // z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
220
                z3 = vec_madd(z3, vec_1_961570560, z5);
221
222
                // z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
223
                z4 = vec_madd(z4, vec_0_390180644, z5);
224
225
                // The following adds are rolled into the multiplies above
226
                // z3 = vec_add(z3, z5);  // z3 += z5;
227
                // z4 = vec_add(z4, z5);  // z4 += z5;
228
229
                // z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
230
                // Wow!  It's actually more effecient to roll this multiply
231
                // into the adds below, even thought the multiply gets done twice!
232
                // z2 = vec_madd(z2, vec_2_562915447, (vector float)zero);
233
234
                // z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
235
                // Same with this one...
236
                // z1 = vec_madd(z1, vec_0_899976223, (vector float)zero);
237
238
                // tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
239
                // dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS);
240
                row7 = vec_madd(tmp4, vec_0_298631336, vec_madd(z1, vec_0_899976223, z3));
241
242
                // tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
243
                // dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS);
244
                row5 = vec_madd(tmp5, vec_2_053119869, vec_madd(z2, vec_2_562915447, z4));
245
246
                // tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
247
                // dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS);
248
                row3 = vec_madd(tmp6, vec_3_072711026, vec_madd(z2, vec_2_562915447, z3));
249
250
                // tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
251
                // dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS);
252
                row1 = vec_madd(z1, vec_0_899976223, vec_madd(tmp7, vec_1_501321110, z4));
253
254
                // Swap the row values with the alts.  If this is the first half,
255
                // this sets up the low values to be acted on in the second half.
256
                // If this is the second half, it puts the high values back in
257
                // the row values where they are expected to be when we're done.
258
                SWAP(row0, alt0);
259
                SWAP(row1, alt1);
260
                SWAP(row2, alt2);
261
                SWAP(row3, alt3);
262
                SWAP(row4, alt4);
263
                SWAP(row5, alt5);
264
                SWAP(row6, alt6);
265
                SWAP(row7, alt7);
266
            }
267
268
            if (whichPass == 1)
269
            {
270
                // transpose the data for the second pass
271
                 
272
                // First, block transpose the upper right with lower left.
273
                SWAP(row4, alt0);
274
                SWAP(row5, alt1);
275
                SWAP(row6, alt2);
276
                SWAP(row7, alt3);
277
278
                // Now, transpose each block of four
279
                TRANSPOSE4(row0, row1, row2, row3);
280
                TRANSPOSE4(row4, row5, row6, row7);
281
                TRANSPOSE4(alt0, alt1, alt2, alt3);
282
                TRANSPOSE4(alt4, alt5, alt6, alt7);
283
            }
284
        }
285
    }
286
287
    // used after quantise step
288
    int oldBaseValue = 0;
289
290
    // perform the quantise step, using the floating point data
291
    // still in the row/alt registers
292
    {
293
        const int* biasAddr;
294
        const vector signed int* qmat;
295
        vector float bias, negBias;
296
297
        if (s->mb_intra)
298
        {
299
            vector signed int baseVector;
300
301
            // We must cache element 0 in the intra case
302
            // (it needs special handling).
303
            baseVector = vec_cts(vec_splat(row0, 0), 0);
304
            vec_ste(baseVector, 0, &oldBaseValue);
305
306
            qmat = (vector signed int*)s->q_intra_matrix[qscale];
307
            biasAddr = &(s->intra_quant_bias);
308
        }
309
        else
310
        {
311
            qmat = (vector signed int*)s->q_inter_matrix[qscale];
312
            biasAddr = &(s->inter_quant_bias);
313
        }
314
315
        // Load the bias vector (We add 0.5 to the bias so that we're
316
                                // rounding when we convert to int, instead of flooring.)
317
        {
318
            vector signed int biasInt;
319 3b991c54 Romain Dolbeau
            const vector float negOneFloat = (vector float)FOUROF(-1.0f);
320 05c4072b Michael Niedermayer
            LOAD4(biasInt, biasAddr);
321
            bias = vec_ctf(biasInt, QUANT_BIAS_SHIFT);
322
            negBias = vec_madd(bias, negOneFloat, zero);
323
        }
324
325
        {
326
            vector float q0, q1, q2, q3, q4, q5, q6, q7;
327
328
            q0 = vec_ctf(qmat[0], QMAT_SHIFT);
329
            q1 = vec_ctf(qmat[2], QMAT_SHIFT);
330
            q2 = vec_ctf(qmat[4], QMAT_SHIFT);
331
            q3 = vec_ctf(qmat[6], QMAT_SHIFT);
332
            q4 = vec_ctf(qmat[8], QMAT_SHIFT);
333
            q5 = vec_ctf(qmat[10], QMAT_SHIFT);
334
            q6 = vec_ctf(qmat[12], QMAT_SHIFT);
335
            q7 = vec_ctf(qmat[14], QMAT_SHIFT);
336
337
            row0 = vec_sel(vec_madd(row0, q0, negBias), vec_madd(row0, q0, bias),
338
                    vec_cmpgt(row0, zero));
339
            row1 = vec_sel(vec_madd(row1, q1, negBias), vec_madd(row1, q1, bias),
340
                    vec_cmpgt(row1, zero));
341
            row2 = vec_sel(vec_madd(row2, q2, negBias), vec_madd(row2, q2, bias),
342
                    vec_cmpgt(row2, zero));
343
            row3 = vec_sel(vec_madd(row3, q3, negBias), vec_madd(row3, q3, bias),
344
                    vec_cmpgt(row3, zero));
345
            row4 = vec_sel(vec_madd(row4, q4, negBias), vec_madd(row4, q4, bias),
346
                    vec_cmpgt(row4, zero));
347
            row5 = vec_sel(vec_madd(row5, q5, negBias), vec_madd(row5, q5, bias),
348
                    vec_cmpgt(row5, zero));
349
            row6 = vec_sel(vec_madd(row6, q6, negBias), vec_madd(row6, q6, bias),
350
                    vec_cmpgt(row6, zero));
351
            row7 = vec_sel(vec_madd(row7, q7, negBias), vec_madd(row7, q7, bias),
352
                    vec_cmpgt(row7, zero));
353
354
            q0 = vec_ctf(qmat[1], QMAT_SHIFT);
355
            q1 = vec_ctf(qmat[3], QMAT_SHIFT);
356
            q2 = vec_ctf(qmat[5], QMAT_SHIFT);
357
            q3 = vec_ctf(qmat[7], QMAT_SHIFT);
358
            q4 = vec_ctf(qmat[9], QMAT_SHIFT);
359
            q5 = vec_ctf(qmat[11], QMAT_SHIFT);
360
            q6 = vec_ctf(qmat[13], QMAT_SHIFT);
361
            q7 = vec_ctf(qmat[15], QMAT_SHIFT);
362
363
            alt0 = vec_sel(vec_madd(alt0, q0, negBias), vec_madd(alt0, q0, bias),
364
                    vec_cmpgt(alt0, zero));
365
            alt1 = vec_sel(vec_madd(alt1, q1, negBias), vec_madd(alt1, q1, bias),
366
                    vec_cmpgt(alt1, zero));
367
            alt2 = vec_sel(vec_madd(alt2, q2, negBias), vec_madd(alt2, q2, bias),
368
                    vec_cmpgt(alt2, zero));
369
            alt3 = vec_sel(vec_madd(alt3, q3, negBias), vec_madd(alt3, q3, bias),
370
                    vec_cmpgt(alt3, zero));
371
            alt4 = vec_sel(vec_madd(alt4, q4, negBias), vec_madd(alt4, q4, bias),
372
                    vec_cmpgt(alt4, zero));
373
            alt5 = vec_sel(vec_madd(alt5, q5, negBias), vec_madd(alt5, q5, bias),
374
                    vec_cmpgt(alt5, zero));
375
            alt6 = vec_sel(vec_madd(alt6, q6, negBias), vec_madd(alt6, q6, bias),
376
                    vec_cmpgt(alt6, zero));
377
            alt7 = vec_sel(vec_madd(alt7, q7, negBias), vec_madd(alt7, q7, bias),
378
                    vec_cmpgt(alt7, zero));
379
        }
380
381
 
382
    }
383
384
    // Store the data back into the original block
385
    {
386
        vector signed short data0, data1, data2, data3, data4, data5, data6, data7;
387
388
        data0 = vec_pack(vec_cts(row0, 0), vec_cts(alt0, 0));
389
        data1 = vec_pack(vec_cts(row1, 0), vec_cts(alt1, 0));
390
        data2 = vec_pack(vec_cts(row2, 0), vec_cts(alt2, 0));
391
        data3 = vec_pack(vec_cts(row3, 0), vec_cts(alt3, 0));
392
        data4 = vec_pack(vec_cts(row4, 0), vec_cts(alt4, 0));
393
        data5 = vec_pack(vec_cts(row5, 0), vec_cts(alt5, 0));
394
        data6 = vec_pack(vec_cts(row6, 0), vec_cts(alt6, 0));
395
        data7 = vec_pack(vec_cts(row7, 0), vec_cts(alt7, 0));
396
397
        {
398
            // Clamp for overflow
399
            vector signed int max_q_int, min_q_int;
400
            vector signed short max_q, min_q;
401
402
            LOAD4(max_q_int, &(s->max_qcoeff));
403
            LOAD4(min_q_int, &(s->min_qcoeff));
404
405
            max_q = vec_pack(max_q_int, max_q_int);
406
            min_q = vec_pack(min_q_int, min_q_int);
407
408
            data0 = vec_max(vec_min(data0, max_q), min_q);
409
            data1 = vec_max(vec_min(data1, max_q), min_q);
410
            data2 = vec_max(vec_min(data2, max_q), min_q);
411
            data4 = vec_max(vec_min(data4, max_q), min_q);
412
            data5 = vec_max(vec_min(data5, max_q), min_q);
413
            data6 = vec_max(vec_min(data6, max_q), min_q);
414
            data7 = vec_max(vec_min(data7, max_q), min_q);
415
        }
416
417
        vector bool char zero_01, zero_23, zero_45, zero_67;
418
        vector signed char scanIndices_01, scanIndices_23, scanIndices_45, scanIndices_67;
419
        vector signed char negOne = vec_splat_s8(-1);
420
        vector signed char* scanPtr =
421
                (vector signed char*)(s->intra_scantable.inverse);
422
423
        // Determine the largest non-zero index.
424
        zero_01 = vec_pack(vec_cmpeq(data0, (vector short)zero),
425
                vec_cmpeq(data1, (vector short)zero));
426
        zero_23 = vec_pack(vec_cmpeq(data2, (vector short)zero),
427
                vec_cmpeq(data3, (vector short)zero));
428
        zero_45 = vec_pack(vec_cmpeq(data4, (vector short)zero),
429
                vec_cmpeq(data5, (vector short)zero));
430
        zero_67 = vec_pack(vec_cmpeq(data6, (vector short)zero),
431
                vec_cmpeq(data7, (vector short)zero));
432
433
        // 64 biggest values
434
        scanIndices_01 = vec_sel(scanPtr[0], negOne, zero_01);
435
        scanIndices_23 = vec_sel(scanPtr[1], negOne, zero_23);
436
        scanIndices_45 = vec_sel(scanPtr[2], negOne, zero_45);
437
        scanIndices_67 = vec_sel(scanPtr[3], negOne, zero_67);
438
439
        // 32 largest values
440
        scanIndices_01 = vec_max(scanIndices_01, scanIndices_23);
441
        scanIndices_45 = vec_max(scanIndices_45, scanIndices_67);
442
443
        // 16 largest values
444
        scanIndices_01 = vec_max(scanIndices_01, scanIndices_45);
445
446
        // 8 largest values
447
        scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
448
                vec_mergel(scanIndices_01, negOne));
449
450
        // 4 largest values
451
        scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
452
                vec_mergel(scanIndices_01, negOne));
453
454
        // 2 largest values
455
        scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
456
                vec_mergel(scanIndices_01, negOne));
457
458
        // largest value
459
        scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
460
                vec_mergel(scanIndices_01, negOne));
461
462
        scanIndices_01 = vec_splat(scanIndices_01, 0);
463
464
        signed char lastNonZeroChar;
465
466
        vec_ste(scanIndices_01, 0, &lastNonZeroChar);
467
468
        lastNonZero = lastNonZeroChar;
469
        
470
        // While the data is still in vectors we check for the transpose IDCT permute
471
        // and handle it using the vector unit if we can.  This is the permute used
472
        // by the altivec idct, so it is common when using the altivec dct.
473
474 b0368839 Michael Niedermayer
        if ((lastNonZero > 0) && (s->dsp.idct_permutation_type == FF_TRANSPOSE_IDCT_PERM))
475 05c4072b Michael Niedermayer
        {
476
            TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7);
477
        }
478
479
        vec_st(data0, 0, data);
480
        vec_st(data1, 16, data);
481
        vec_st(data2, 32, data);
482
        vec_st(data3, 48, data);
483
        vec_st(data4, 64, data);
484
        vec_st(data5, 80, data);
485
        vec_st(data6, 96, data);
486
        vec_st(data7, 112, data);
487
    }
488
489
    // special handling of block[0]
490
    if (s->mb_intra)
491
    {
492
        if (!s->h263_aic)
493
        {
494
            if (n < 4)
495
                oldBaseValue /= s->y_dc_scale;
496
            else
497
                oldBaseValue /= s->c_dc_scale;
498
        }
499
500
        // Divide by 8, rounding the result
501
        data[0] = (oldBaseValue + 4) >> 3;
502
    }
503
504
    // We handled the tranpose permutation above and we don't
505
    // need to permute the "no" permutation case.
506
    if ((lastNonZero > 0) &&
507 b0368839 Michael Niedermayer
        (s->dsp.idct_permutation_type != FF_TRANSPOSE_IDCT_PERM) &&
508
        (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM))
509 05c4072b Michael Niedermayer
    {
510 10564521 Michael Niedermayer
        ff_block_permute(data, s->dsp.idct_permutation,
511 05c4072b Michael Niedermayer
                s->intra_scantable.scantable, lastNonZero);
512
    }
513
514
    return lastNonZero;
515
}
516 3b991c54 Romain Dolbeau
#undef FOUROF
517 05c4072b Michael Niedermayer
518 744ac4be Michael Niedermayer
/*
519
  AltiVec version of dct_unquantize_h263
520
  this code assumes `block' is 16 bytes-aligned
521
*/
522
void dct_unquantize_h263_altivec(MpegEncContext *s, 
523
                                 DCTELEM *block, int n, int qscale)
524
{
525 e45a2872 Romain Dolbeau
POWERPC_PERF_DECLARE(altivec_dct_unquantize_h263_num, 1);
526 744ac4be Michael Niedermayer
    int i, level, qmul, qadd;
527
    int nCoeffs;
528
    
529
    assert(s->block_last_index[n]>=0);
530 db40a39a Michael Niedermayer
531 e45a2872 Romain Dolbeau
POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1);
532 744ac4be Michael Niedermayer
    
533
    qadd = (qscale - 1) | 1;
534
    qmul = qscale << 1;
535
    
536
    if (s->mb_intra) {
537
        if (!s->h263_aic) {
538
            if (n < 4) 
539
                block[0] = block[0] * s->y_dc_scale;
540
            else
541
                block[0] = block[0] * s->c_dc_scale;
542
        }else
543
            qadd = 0;
544
        i = 1;
545
        nCoeffs= 63; //does not allways use zigzag table 
546
    } else {
547
        i = 0;
548
        nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
549
    }
550
551 db40a39a Michael Niedermayer
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
552 744ac4be Michael Niedermayer
    for(;i<=nCoeffs;i++) {
553
        level = block[i];
554
        if (level) {
555
            if (level < 0) {
556
                level = level * qmul - qadd;
557
            } else {
558
                level = level * qmul + qadd;
559
            }
560
            block[i] = level;
561
        }
562
    }
563 db40a39a Michael Niedermayer
#else /* ALTIVEC_USE_REFERENCE_C_CODE */
564 744ac4be Michael Niedermayer
    {
565 3b991c54 Romain Dolbeau
      register const vector short vczero = (const vector short)vec_splat_s16(0);
566 744ac4be Michael Niedermayer
      short __attribute__ ((aligned(16))) qmul8[] =
567
          {
568
            qmul, qmul, qmul, qmul,
569
            qmul, qmul, qmul, qmul
570
          };
571
      short __attribute__ ((aligned(16))) qadd8[] =
572
          {
573
            qadd, qadd, qadd, qadd,
574
            qadd, qadd, qadd, qadd
575
          };
576
      short __attribute__ ((aligned(16))) nqadd8[] =
577
          {
578
            -qadd, -qadd, -qadd, -qadd,
579
            -qadd, -qadd, -qadd, -qadd
580
          };
581
      register vector short blockv, qmulv, qaddv, nqaddv, temp1;
582
      register vector bool short blockv_null, blockv_neg;
583
      register short backup_0 = block[0];
584
      register int j = 0;
585
      
586
      qmulv = vec_ld(0, qmul8);
587
      qaddv = vec_ld(0, qadd8);
588
      nqaddv = vec_ld(0, nqadd8);
589
590 db40a39a Michael Niedermayer
#if 0 // block *is* 16 bytes-aligned, it seems.
591 744ac4be Michael Niedermayer
      // first make sure block[j] is 16 bytes-aligned
592
      for(j = 0; (j <= nCoeffs) && ((((unsigned long)block) + (j << 1)) & 0x0000000F) ; j++) {
593
        level = block[j];
594
        if (level) {
595
          if (level < 0) {
596
                level = level * qmul - qadd;
597
            } else {
598
                level = level * qmul + qadd;
599
            }
600
            block[j] = level;
601
        }
602
      }
603 db40a39a Michael Niedermayer
#endif
604 744ac4be Michael Niedermayer
      
605
      // vectorize all the 16 bytes-aligned blocks
606
      // of 8 elements
607
      for(; (j + 7) <= nCoeffs ; j+=8)
608
      {
609
        blockv = vec_ld(j << 1, block);
610
        blockv_neg = vec_cmplt(blockv, vczero);
611
        blockv_null = vec_cmpeq(blockv, vczero);
612
        // choose between +qadd or -qadd as the third operand
613
        temp1 = vec_sel(qaddv, nqaddv, blockv_neg);
614
        // multiply & add (block{i,i+7} * qmul [+-] qadd)
615
        temp1 = vec_mladd(blockv, qmulv, temp1);
616
        // put 0 where block[{i,i+7} used to have 0
617
        blockv = vec_sel(temp1, blockv, blockv_null);
618
        vec_st(blockv, j << 1, block);
619
      }
620
621
      // if nCoeffs isn't a multiple of 8, finish the job
622
      // using good old scalar units.
623
      // (we could do it using a truncated vector,
624
      // but I'm not sure it's worth the hassle)
625
      for(; j <= nCoeffs ; j++) {
626
        level = block[j];
627
        if (level) {
628
          if (level < 0) {
629
                level = level * qmul - qadd;
630
            } else {
631
                level = level * qmul + qadd;
632
            }
633
            block[j] = level;
634
        }
635
      }
636
      
637
      if (i == 1)
638
      { // cheat. this avoid special-casing the first iteration
639
        block[0] = backup_0;
640
      }
641
    }
642 db40a39a Michael Niedermayer
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
643
644 e45a2872 Romain Dolbeau
POWERPC_PERF_STOP_COUNT(altivec_dct_unquantize_h263_num, nCoeffs == 63);
645 744ac4be Michael Niedermayer
}