Statistics
| Branch: | Revision:

ffmpeg / libavcodec / ppc / idct_altivec.c @ a6b4448c

History | View | Annotate | Download (10.8 KB)

1
/*
2
 * Copyright (c) 2001 Michel Lespinasse
3
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20

    
21
/*
22
 * NOTE: This code is based on GPL code from the libmpeg2 project.  The
23
 * author, Michel Lespinasses, has given explicit permission to release
24
 * under LGPL as part of ffmpeg.
25
 */
26

    
27
/*
28
 * FFMpeg integration by Dieter Shirley
29
 *
30
 * This file is a direct copy of the altivec idct module from the libmpeg2
31
 * project.  I've deleted all of the libmpeg2 specific code, renamed the functions and
32
 * re-ordered the function parameters.  The only change to the IDCT function
33
 * itself was to factor out the partial transposition, and to perform a full
34
 * transpose at the end of the function.
35
 */
36

    
37

    
38
#include <stdlib.h>                                      /* malloc(), free() */
39
#include <string.h>
40
#include "libavcodec/dsputil.h"
41

    
42
#include "gcc_fixes.h"
43
#include "types_altivec.h"
44
#include "dsputil_ppc.h"
45

    
46
#define IDCT_HALF                                       \
47
    /* 1st stage */                                     \
48
    t1 = vec_mradds (a1, vx7, vx1 );                    \
49
    t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7));    \
50
    t7 = vec_mradds (a2, vx5, vx3);                     \
51
    t3 = vec_mradds (ma2, vx3, vx5);                    \
52
                                                        \
53
    /* 2nd stage */                                     \
54
    t5 = vec_adds (vx0, vx4);                           \
55
    t0 = vec_subs (vx0, vx4);                           \
56
    t2 = vec_mradds (a0, vx6, vx2);                     \
57
    t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6));    \
58
    t6 = vec_adds (t8, t3);                             \
59
    t3 = vec_subs (t8, t3);                             \
60
    t8 = vec_subs (t1, t7);                             \
61
    t1 = vec_adds (t1, t7);                             \
62
                                                        \
63
    /* 3rd stage */                                     \
64
    t7 = vec_adds (t5, t2);                             \
65
    t2 = vec_subs (t5, t2);                             \
66
    t5 = vec_adds (t0, t4);                             \
67
    t0 = vec_subs (t0, t4);                             \
68
    t4 = vec_subs (t8, t3);                             \
69
    t3 = vec_adds (t8, t3);                             \
70
                                                        \
71
    /* 4th stage */                                     \
72
    vy0 = vec_adds (t7, t1);                            \
73
    vy7 = vec_subs (t7, t1);                            \
74
    vy1 = vec_mradds (c4, t3, t5);                      \
75
    vy6 = vec_mradds (mc4, t3, t5);                     \
76
    vy2 = vec_mradds (c4, t4, t0);                      \
77
    vy5 = vec_mradds (mc4, t4, t0);                     \
78
    vy3 = vec_adds (t2, t6);                            \
79
    vy4 = vec_subs (t2, t6);
80

    
81

    
82
#define IDCT                                                            \
83
    vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7;                \
84
    vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7;                \
85
    vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias;                  \
86
    vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8;                    \
87
    vec_u16 shift;                                                 \
88
                                                                        \
89
    c4 = vec_splat (constants[0], 0);                                   \
90
    a0 = vec_splat (constants[0], 1);                                   \
91
    a1 = vec_splat (constants[0], 2);                                   \
92
    a2 = vec_splat (constants[0], 3);                                   \
93
    mc4 = vec_splat (constants[0], 4);                                  \
94
    ma2 = vec_splat (constants[0], 5);                                  \
95
    bias = (vec_s16)vec_splat ((vec_s32)constants[0], 3);     \
96
                                                                        \
97
    zero = vec_splat_s16 (0);                                           \
98
    shift = vec_splat_u16 (4);                                          \
99
                                                                        \
100
    vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero);    \
101
    vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero);    \
102
    vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero);    \
103
    vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero);    \
104
    vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero);    \
105
    vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero);    \
106
    vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero);    \
107
    vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero);    \
108
                                                                        \
109
    IDCT_HALF                                                           \
110
                                                                        \
111
    vx0 = vec_mergeh (vy0, vy4);                                        \
112
    vx1 = vec_mergel (vy0, vy4);                                        \
113
    vx2 = vec_mergeh (vy1, vy5);                                        \
114
    vx3 = vec_mergel (vy1, vy5);                                        \
115
    vx4 = vec_mergeh (vy2, vy6);                                        \
116
    vx5 = vec_mergel (vy2, vy6);                                        \
117
    vx6 = vec_mergeh (vy3, vy7);                                        \
118
    vx7 = vec_mergel (vy3, vy7);                                        \
119
                                                                        \
120
    vy0 = vec_mergeh (vx0, vx4);                                        \
121
    vy1 = vec_mergel (vx0, vx4);                                        \
122
    vy2 = vec_mergeh (vx1, vx5);                                        \
123
    vy3 = vec_mergel (vx1, vx5);                                        \
124
    vy4 = vec_mergeh (vx2, vx6);                                        \
125
    vy5 = vec_mergel (vx2, vx6);                                        \
126
    vy6 = vec_mergeh (vx3, vx7);                                        \
127
    vy7 = vec_mergel (vx3, vx7);                                        \
128
                                                                        \
129
    vx0 = vec_adds (vec_mergeh (vy0, vy4), bias);                       \
130
    vx1 = vec_mergel (vy0, vy4);                                        \
131
    vx2 = vec_mergeh (vy1, vy5);                                        \
132
    vx3 = vec_mergel (vy1, vy5);                                        \
133
    vx4 = vec_mergeh (vy2, vy6);                                        \
134
    vx5 = vec_mergel (vy2, vy6);                                        \
135
    vx6 = vec_mergeh (vy3, vy7);                                        \
136
    vx7 = vec_mergel (vy3, vy7);                                        \
137
                                                                        \
138
    IDCT_HALF                                                           \
139
                                                                        \
140
    shift = vec_splat_u16 (6);                                          \
141
    vx0 = vec_sra (vy0, shift);                                         \
142
    vx1 = vec_sra (vy1, shift);                                         \
143
    vx2 = vec_sra (vy2, shift);                                         \
144
    vx3 = vec_sra (vy3, shift);                                         \
145
    vx4 = vec_sra (vy4, shift);                                         \
146
    vx5 = vec_sra (vy5, shift);                                         \
147
    vx6 = vec_sra (vy6, shift);                                         \
148
    vx7 = vec_sra (vy7, shift);
149

    
150

    
151
static const vec_s16 constants[5] = {
152
    {23170, 13573,  6518, 21895, -23170, -21895,    32,    31},
153
    {16384, 22725, 21407, 19266,  16384,  19266, 21407, 22725},
154
    {22725, 31521, 29692, 26722,  22725,  26722, 29692, 31521},
155
    {21407, 29692, 27969, 25172,  21407,  25172, 27969, 29692},
156
    {19266, 26722, 25172, 22654,  19266,  22654, 25172, 26722}
157
};
158

    
159
void idct_put_altivec(uint8_t* dest, int stride, vec_s16* block)
160
{
161
POWERPC_PERF_DECLARE(altivec_idct_put_num, 1);
162
    vec_u8 tmp;
163

    
164
#ifdef CONFIG_POWERPC_PERF
165
POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
166
#endif
167
    IDCT
168

    
169
#define COPY(dest,src)                                          \
170
    tmp = vec_packsu (src, src);                                \
171
    vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);       \
172
    vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
173

    
174
    COPY (dest, vx0)    dest += stride;
175
    COPY (dest, vx1)    dest += stride;
176
    COPY (dest, vx2)    dest += stride;
177
    COPY (dest, vx3)    dest += stride;
178
    COPY (dest, vx4)    dest += stride;
179
    COPY (dest, vx5)    dest += stride;
180
    COPY (dest, vx6)    dest += stride;
181
    COPY (dest, vx7)
182

    
183
POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
184
}
185

    
186
void idct_add_altivec(uint8_t* dest, int stride, vec_s16* block)
187
{
188
POWERPC_PERF_DECLARE(altivec_idct_add_num, 1);
189
    vec_u8 tmp;
190
    vec_s16 tmp2, tmp3;
191
    vec_u8 perm0;
192
    vec_u8 perm1;
193
    vec_u8 p0, p1, p;
194

    
195
#ifdef CONFIG_POWERPC_PERF
196
POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
197
#endif
198

    
199
    IDCT
200

    
201
    p0 = vec_lvsl (0, dest);
202
    p1 = vec_lvsl (stride, dest);
203
    p = vec_splat_u8 (-1);
204
    perm0 = vec_mergeh (p, p0);
205
    perm1 = vec_mergeh (p, p1);
206

    
207
#define ADD(dest,src,perm)                                              \
208
    /* *(uint64_t *)&tmp = *(uint64_t *)dest; */                        \
209
    tmp = vec_ld (0, dest);                                             \
210
    tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm);       \
211
    tmp3 = vec_adds (tmp2, src);                                        \
212
    tmp = vec_packsu (tmp3, tmp3);                                      \
213
    vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);               \
214
    vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
215

    
216
    ADD (dest, vx0, perm0)      dest += stride;
217
    ADD (dest, vx1, perm1)      dest += stride;
218
    ADD (dest, vx2, perm0)      dest += stride;
219
    ADD (dest, vx3, perm1)      dest += stride;
220
    ADD (dest, vx4, perm0)      dest += stride;
221
    ADD (dest, vx5, perm1)      dest += stride;
222
    ADD (dest, vx6, perm0)      dest += stride;
223
    ADD (dest, vx7, perm1)
224

    
225
POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
226
}
227