Statistics
| Branch: | Revision:

ffmpeg / libavcodec / ppc / fft_altivec.c @ b550bfaa

History | View | Annotate | Download (4.49 KB)

1
/*
2
 * FFT/IFFT transforms
3
 * AltiVec-enabled
4
 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5
 * Based on code Copyright (c) 2002 Fabrice Bellard.
6
 *
7
 * This file is part of FFmpeg.
8
 *
9
 * FFmpeg is free software; you can redistribute it and/or
10
 * modify it under the terms of the GNU Lesser General Public
11
 * License as published by the Free Software Foundation; either
12
 * version 2.1 of the License, or (at your option) any later version.
13
 *
14
 * FFmpeg is distributed in the hope that it will be useful,
15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17
 * Lesser General Public License for more details.
18
 *
19
 * You should have received a copy of the GNU Lesser General Public
20
 * License along with FFmpeg; if not, write to the Free Software
21
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22
 */
23
#include "dsputil.h"
24

    
25
#include "gcc_fixes.h"
26

    
27
#include "dsputil_altivec.h"
28

    
29
/*
30
  those three macros are from libavcodec/fft.c
31
  and are required for the reference C code
32
*/
33
/* butter fly op */
34
#define BF(pre, pim, qre, qim, pre1, pim1, qre1, qim1) \
35
{\
36
  FFTSample ax, ay, bx, by;\
37
  bx=pre1;\
38
  by=pim1;\
39
  ax=qre1;\
40
  ay=qim1;\
41
  pre = (bx + ax);\
42
  pim = (by + ay);\
43
  qre = (bx - ax);\
44
  qim = (by - ay);\
45
}
46
#define MUL16(a,b) ((a) * (b))
47
#define CMUL(pre, pim, are, aim, bre, bim) \
48
{\
49
   pre = (MUL16(are, bre) - MUL16(aim, bim));\
50
   pim = (MUL16(are, bim) + MUL16(bre, aim));\
51
}
52

    
53

    
54
/**
55
 * Do a complex FFT with the parameters defined in ff_fft_init(). The
56
 * input data must be permuted before with s->revtab table. No
57
 * 1.0/sqrt(n) normalization is done.
58
 * AltiVec-enabled
59
 * This code assumes that the 'z' pointer is 16 bytes-aligned
60
 * It also assumes all FFTComplex are 8 bytes-aligned pair of float
61
 * The code is exactly the same as the SSE version, except
62
 * that successive MUL + ADD/SUB have been merged into
63
 * fused multiply-add ('vec_madd' in altivec)
64
 */
65
void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
66
{
67
POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6);
68
    register const vector float vczero = (const vector float)vec_splat_u32(0.);
69

    
70
    int ln = s->nbits;
71
    int j, np, np2;
72
    int nblocks, nloops;
73
    register FFTComplex *p, *q;
74
    FFTComplex *cptr, *cptr1;
75
    int k;
76

    
77
POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
78

    
79
    np = 1 << ln;
80

    
81
    {
82
        vector float *r, a, b, a1, c1, c2;
83

    
84
        r = (vector float *)&z[0];
85

    
86
        c1 = vcii(p,p,n,n);
87

    
88
        if (s->inverse)
89
            {
90
                c2 = vcii(p,p,n,p);
91
            }
92
        else
93
            {
94
                c2 = vcii(p,p,p,n);
95
            }
96

    
97
        j = (np >> 2);
98
        do {
99
            a = vec_ld(0, r);
100
            a1 = vec_ld(sizeof(vector float), r);
101

    
102
            b = vec_perm(a,a,vcprmle(1,0,3,2));
103
            a = vec_madd(a,c1,b);
104
            /* do the pass 0 butterfly */
105

    
106
            b = vec_perm(a1,a1,vcprmle(1,0,3,2));
107
            b = vec_madd(a1,c1,b);
108
            /* do the pass 0 butterfly */
109

    
110
            /* multiply third by -i */
111
            b = vec_perm(b,b,vcprmle(2,3,1,0));
112

    
113
            /* do the pass 1 butterfly */
114
            vec_st(vec_madd(b,c2,a), 0, r);
115
            vec_st(vec_nmsub(b,c2,a), sizeof(vector float), r);
116

    
117
            r += 2;
118
        } while (--j != 0);
119
    }
120
    /* pass 2 .. ln-1 */
121

    
122
    nblocks = np >> 3;
123
    nloops = 1 << 2;
124
    np2 = np >> 1;
125

    
126
    cptr1 = s->exptab1;
127
    do {
128
        p = z;
129
        q = z + nloops;
130
        j = nblocks;
131
        do {
132
            cptr = cptr1;
133
            k = nloops >> 1;
134
            do {
135
                vector float a,b,c,t1;
136

    
137
                a = vec_ld(0, (float*)p);
138
                b = vec_ld(0, (float*)q);
139

    
140
                /* complex mul */
141
                c = vec_ld(0, (float*)cptr);
142
                /*  cre*re cim*re */
143
                t1 = vec_madd(c, vec_perm(b,b,vcprmle(2,2,0,0)),vczero);
144
                c = vec_ld(sizeof(vector float), (float*)cptr);
145
                /*  -cim*im cre*im */
146
                b = vec_madd(c, vec_perm(b,b,vcprmle(3,3,1,1)),t1);
147

    
148
                /* butterfly */
149
                vec_st(vec_add(a,b), 0, (float*)p);
150
                vec_st(vec_sub(a,b), 0, (float*)q);
151

    
152
                p += 2;
153
                q += 2;
154
                cptr += 4;
155
            } while (--k);
156

    
157
            p += nloops;
158
            q += nloops;
159
        } while (--j);
160
        cptr1 += nloops * 2;
161
        nblocks = nblocks >> 1;
162
        nloops = nloops << 1;
163
    } while (nblocks != 0);
164

    
165
POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
166
}