Statistics
| Branch: | Revision:

ffmpeg / libavcodec / ppc / vp8dsp_altivec.c @ 2912e87a

History | View | Annotate | Download (10.5 KB)

1
/**
2
 * VP8 compatible video decoder
3
 *
4
 * Copyright (C) 2010 David Conrad
5
 *
6
 * This file is part of Libav.
7
 *
8
 * Libav is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * Libav is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with Libav; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 */
22

    
23
#include "libavutil/cpu.h"
24
#include "libavcodec/vp8dsp.h"
25
#include "dsputil_altivec.h"
26
#include "types_altivec.h"
27
#include "util_altivec.h"
28

    
29
#define REPT4(...) { __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__ }
30

    
31
// h subpel filter uses msum to multiply+add 4 pixel taps at once
32
static const vec_s8 h_subpel_filters_inner[7] =
33
{
34
    REPT4( -6, 123,  12,  -1),
35
    REPT4(-11, 108,  36,  -8),
36
    REPT4( -9,  93,  50,  -6),
37
    REPT4(-16,  77,  77, -16),
38
    REPT4( -6,  50,  93,  -9),
39
    REPT4( -8,  36, 108, -11),
40
    REPT4( -1,  12, 123,  -6),
41
};
42

    
43
// for 6tap filters, these are the outer two taps
44
// The zeros mask off pixels 4-7 when filtering 0-3
45
// and vice-versa
46
static const vec_s8 h_subpel_filters_outer[3] =
47
{
48
    REPT4(0, 0, 2, 1),
49
    REPT4(0, 0, 3, 3),
50
    REPT4(0, 0, 1, 2),
51
};
52

    
53
#define LOAD_H_SUBPEL_FILTER(i) \
54
    vec_s8 filter_inner  = h_subpel_filters_inner[i]; \
55
    vec_s8 filter_outerh = h_subpel_filters_outer[(i)>>1]; \
56
    vec_s8 filter_outerl = vec_sld(filter_outerh, filter_outerh, 2)
57

    
58
#define FILTER_H(dstv, off) \
59
    a = vec_ld((off)-is6tap-1,    src); \
60
    b = vec_ld((off)-is6tap-1+15, src); \
61
\
62
    pixh  = vec_perm(a, b, permh##off); \
63
    pixl  = vec_perm(a, b, perml##off); \
64
    filth = vec_msum(filter_inner, pixh, c64); \
65
    filtl = vec_msum(filter_inner, pixl, c64); \
66
\
67
    if (is6tap) { \
68
        outer = vec_perm(a, b, perm_6tap##off); \
69
        filth = vec_msum(filter_outerh, outer, filth); \
70
        filtl = vec_msum(filter_outerl, outer, filtl); \
71
    } \
72
    if (w == 4) \
73
        filtl = filth; /* discard pixels 4-7 */ \
74
    dstv = vec_packs(filth, filtl); \
75
    dstv = vec_sra(dstv, c7)
76

    
77
static av_always_inline
78
void put_vp8_epel_h_altivec_core(uint8_t *dst, int dst_stride,
79
                                 uint8_t *src, int src_stride,
80
                                 int h, int mx, int w, int is6tap)
81
{
82
    LOAD_H_SUBPEL_FILTER(mx-1);
83
    vec_u8 align_vec0, align_vec8, permh0, permh8, filt;
84
    vec_u8 perm_6tap0, perm_6tap8, perml0, perml8;
85
    vec_u8 a, b, pixh, pixl, outer;
86
    vec_s16 f16h, f16l;
87
    vec_s32 filth, filtl;
88

    
89
    vec_u8 perm_inner6 = { 1,2,3,4, 2,3,4,5, 3,4,5,6, 4,5,6,7 };
90
    vec_u8 perm_inner4 = { 0,1,2,3, 1,2,3,4, 2,3,4,5, 3,4,5,6 };
91
    vec_u8 perm_inner  = is6tap ? perm_inner6 : perm_inner4;
92
    vec_u8 perm_outer = { 4,9, 0,5, 5,10, 1,6, 6,11, 2,7, 7,12, 3,8 };
93
    vec_s32 c64 = vec_sl(vec_splat_s32(1), vec_splat_u32(6));
94
    vec_u16 c7  = vec_splat_u16(7);
95

    
96
    align_vec0 = vec_lvsl( -is6tap-1, src);
97
    align_vec8 = vec_lvsl(8-is6tap-1, src);
98

    
99
    permh0     = vec_perm(align_vec0, align_vec0, perm_inner);
100
    permh8     = vec_perm(align_vec8, align_vec8, perm_inner);
101
    perm_inner = vec_add(perm_inner, vec_splat_u8(4));
102
    perml0     = vec_perm(align_vec0, align_vec0, perm_inner);
103
    perml8     = vec_perm(align_vec8, align_vec8, perm_inner);
104
    perm_6tap0 = vec_perm(align_vec0, align_vec0, perm_outer);
105
    perm_6tap8 = vec_perm(align_vec8, align_vec8, perm_outer);
106

    
107
    while (h --> 0) {
108
        FILTER_H(f16h, 0);
109

    
110
        if (w == 16) {
111
            FILTER_H(f16l, 8);
112
            filt = vec_packsu(f16h, f16l);
113
            vec_st(filt, 0, dst);
114
        } else {
115
            filt = vec_packsu(f16h, f16h);
116
            vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
117
            if (w == 8)
118
                vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
119
        }
120
        src += src_stride;
121
        dst += dst_stride;
122
    }
123
}
124

    
125
// v subpel filter does a simple vertical multiply + add
126
static const vec_u8 v_subpel_filters[7] =
127
{
128
    { 0,   6, 123,  12,   1,   0 },
129
    { 2,  11, 108,  36,   8,   1 },
130
    { 0,   9,  93,  50,   6,   0 },
131
    { 3,  16,  77,  77,  16,   3 },
132
    { 0,   6,  50,  93,   9,   0 },
133
    { 1,   8,  36, 108,  11,   2 },
134
    { 0,   1,  12, 123,   6,   0 },
135
};
136

    
137
#define LOAD_V_SUBPEL_FILTER(i) \
138
    vec_u8 subpel_filter = v_subpel_filters[i]; \
139
    vec_u8 f0 = vec_splat(subpel_filter, 0); \
140
    vec_u8 f1 = vec_splat(subpel_filter, 1); \
141
    vec_u8 f2 = vec_splat(subpel_filter, 2); \
142
    vec_u8 f3 = vec_splat(subpel_filter, 3); \
143
    vec_u8 f4 = vec_splat(subpel_filter, 4); \
144
    vec_u8 f5 = vec_splat(subpel_filter, 5)
145

    
146
#define FILTER_V(dstv, vec_mul) \
147
    s1f = (vec_s16)vec_mul(s1, f1); \
148
    s2f = (vec_s16)vec_mul(s2, f2); \
149
    s3f = (vec_s16)vec_mul(s3, f3); \
150
    s4f = (vec_s16)vec_mul(s4, f4); \
151
    s2f = vec_subs(s2f, s1f); \
152
    s3f = vec_subs(s3f, s4f); \
153
    if (is6tap) { \
154
        s0f = (vec_s16)vec_mul(s0, f0); \
155
        s5f = (vec_s16)vec_mul(s5, f5); \
156
        s2f = vec_adds(s2f, s0f); \
157
        s3f = vec_adds(s3f, s5f); \
158
    } \
159
    dstv = vec_adds(s2f, s3f); \
160
    dstv = vec_adds(dstv, c64); \
161
    dstv = vec_sra(dstv, c7)
162

    
163
static av_always_inline
164
void put_vp8_epel_v_altivec_core(uint8_t *dst, int dst_stride,
165
                                 uint8_t *src, int src_stride,
166
                                 int h, int my, int w, int is6tap)
167
{
168
    LOAD_V_SUBPEL_FILTER(my-1);
169
    vec_u8 s0, s1, s2, s3, s4, s5, filt, align_vech, perm_vec, align_vecl;
170
    vec_s16 s0f, s1f, s2f, s3f, s4f, s5f, f16h, f16l;
171
    vec_s16 c64 = vec_sl(vec_splat_s16(1), vec_splat_u16(6));
172
    vec_u16 c7  = vec_splat_u16(7);
173

    
174
    // we want pixels 0-7 to be in the even positions and 8-15 in the odd,
175
    // so combine this permute with the alignment permute vector
176
    align_vech = vec_lvsl(0, src);
177
    align_vecl = vec_sld(align_vech, align_vech, 8);
178
    if (w ==16)
179
        perm_vec = vec_mergeh(align_vech, align_vecl);
180
    else
181
        perm_vec = vec_mergeh(align_vech, align_vech);
182

    
183
    if (is6tap)
184
        s0 = load_with_perm_vec(-2*src_stride, src, perm_vec);
185
    s1 = load_with_perm_vec(-1*src_stride, src, perm_vec);
186
    s2 = load_with_perm_vec( 0*src_stride, src, perm_vec);
187
    s3 = load_with_perm_vec( 1*src_stride, src, perm_vec);
188
    if (is6tap)
189
        s4 = load_with_perm_vec( 2*src_stride, src, perm_vec);
190

    
191
    src += (2+is6tap)*src_stride;
192

    
193
    while (h --> 0) {
194
        if (is6tap)
195
            s5 = load_with_perm_vec(0, src, perm_vec);
196
        else
197
            s4 = load_with_perm_vec(0, src, perm_vec);
198

    
199
        FILTER_V(f16h, vec_mule);
200

    
201
        if (w == 16) {
202
            FILTER_V(f16l, vec_mulo);
203
            filt = vec_packsu(f16h, f16l);
204
            vec_st(filt, 0, dst);
205
        } else {
206
            filt = vec_packsu(f16h, f16h);
207
            if (w == 4)
208
                filt = (vec_u8)vec_splat((vec_u32)filt, 0);
209
            else
210
                vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
211
            vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
212
        }
213

    
214
        if (is6tap)
215
            s0 = s1;
216
        s1 = s2;
217
        s2 = s3;
218
        s3 = s4;
219
        if (is6tap)
220
            s4 = s5;
221

    
222
        dst += dst_stride;
223
        src += src_stride;
224
    }
225
}
226

    
227
#define EPEL_FUNCS(WIDTH, TAPS) \
228
static av_noinline \
229
void put_vp8_epel ## WIDTH ## _h ## TAPS ## _altivec(uint8_t *dst, int dst_stride, uint8_t *src, int src_stride, int h, int mx, int my) \
230
{ \
231
    put_vp8_epel_h_altivec_core(dst, dst_stride, src, src_stride, h, mx, WIDTH, TAPS == 6); \
232
} \
233
\
234
static av_noinline \
235
void put_vp8_epel ## WIDTH ## _v ## TAPS ## _altivec(uint8_t *dst, int dst_stride, uint8_t *src, int src_stride, int h, int mx, int my) \
236
{ \
237
    put_vp8_epel_v_altivec_core(dst, dst_stride, src, src_stride, h, my, WIDTH, TAPS == 6); \
238
}
239

    
240
#define EPEL_HV(WIDTH, HTAPS, VTAPS) \
241
static void put_vp8_epel ## WIDTH ## _h ## HTAPS ## v ## VTAPS ## _altivec(uint8_t *dst, int stride, uint8_t *src, int s, int h, int mx, int my) \
242
{ \
243
    DECLARE_ALIGNED(16, uint8_t, tmp)[(2*WIDTH+5)*16]; \
244
    if (VTAPS == 6) { \
245
        put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16,     src-2*stride, stride, h+5, mx, my); \
246
        put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, stride, tmp+2*16,     16,     h,   mx, my); \
247
    } else { \
248
        put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16,     src-stride, stride, h+4, mx, my); \
249
        put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, stride, tmp+16,     16,     h,   mx, my); \
250
    } \
251
}
252

    
253
EPEL_FUNCS(16,6)
254
EPEL_FUNCS(8, 6)
255
EPEL_FUNCS(8, 4)
256
EPEL_FUNCS(4, 6)
257
EPEL_FUNCS(4, 4)
258

    
259
EPEL_HV(16, 6,6)
260
EPEL_HV(8,  6,6)
261
EPEL_HV(8,  4,6)
262
EPEL_HV(8,  6,4)
263
EPEL_HV(8,  4,4)
264
EPEL_HV(4,  6,6)
265
EPEL_HV(4,  4,6)
266
EPEL_HV(4,  6,4)
267
EPEL_HV(4,  4,4)
268

    
269
static void put_vp8_pixels16_altivec(uint8_t *dst, int stride, uint8_t *src, int s, int h, int mx, int my)
270
{
271
    put_pixels16_altivec(dst, src, stride, h);
272
}
273

    
274
av_cold void ff_vp8dsp_init_altivec(VP8DSPContext *c)
275
{
276
    if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
277
        return;
278

    
279
    c->put_vp8_epel_pixels_tab[0][0][0] = put_vp8_pixels16_altivec;
280
    c->put_vp8_epel_pixels_tab[0][0][2] = put_vp8_epel16_h6_altivec;
281
    c->put_vp8_epel_pixels_tab[0][2][0] = put_vp8_epel16_v6_altivec;
282
    c->put_vp8_epel_pixels_tab[0][2][2] = put_vp8_epel16_h6v6_altivec;
283

    
284
    c->put_vp8_epel_pixels_tab[1][0][2] = put_vp8_epel8_h6_altivec;
285
    c->put_vp8_epel_pixels_tab[1][2][0] = put_vp8_epel8_v6_altivec;
286
    c->put_vp8_epel_pixels_tab[1][0][1] = put_vp8_epel8_h4_altivec;
287
    c->put_vp8_epel_pixels_tab[1][1][0] = put_vp8_epel8_v4_altivec;
288

    
289
    c->put_vp8_epel_pixels_tab[1][2][2] = put_vp8_epel8_h6v6_altivec;
290
    c->put_vp8_epel_pixels_tab[1][1][1] = put_vp8_epel8_h4v4_altivec;
291
    c->put_vp8_epel_pixels_tab[1][1][2] = put_vp8_epel8_h6v4_altivec;
292
    c->put_vp8_epel_pixels_tab[1][2][1] = put_vp8_epel8_h4v6_altivec;
293

    
294
    c->put_vp8_epel_pixels_tab[2][0][2] = put_vp8_epel4_h6_altivec;
295
    c->put_vp8_epel_pixels_tab[2][2][0] = put_vp8_epel4_v6_altivec;
296
    c->put_vp8_epel_pixels_tab[2][0][1] = put_vp8_epel4_h4_altivec;
297
    c->put_vp8_epel_pixels_tab[2][1][0] = put_vp8_epel4_v4_altivec;
298

    
299
    c->put_vp8_epel_pixels_tab[2][2][2] = put_vp8_epel4_h6v6_altivec;
300
    c->put_vp8_epel_pixels_tab[2][1][1] = put_vp8_epel4_h4v4_altivec;
301
    c->put_vp8_epel_pixels_tab[2][1][2] = put_vp8_epel4_h6v4_altivec;
302
    c->put_vp8_epel_pixels_tab[2][2][1] = put_vp8_epel4_h4v6_altivec;
303
}