Statistics
| Branch: | Revision:

ffmpeg / libavcodec / ppc / h264_altivec.c @ 3ca96802

History | View | Annotate | Download (42.8 KB)

1 a6a12a8a Romain Dolbeau
/*
2
 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
3
 *
4 b78e7197 Diego Biurrun
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7 a6a12a8a Romain Dolbeau
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9 b78e7197 Diego Biurrun
 * version 2.1 of the License, or (at your option) any later version.
10 a6a12a8a Romain Dolbeau
 *
11 b78e7197 Diego Biurrun
 * FFmpeg is distributed in the hope that it will be useful,
12 a6a12a8a Romain Dolbeau
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17 b78e7197 Diego Biurrun
 * License along with FFmpeg; if not, write to the Free Software
18 5509bffa Diego Biurrun
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 a6a12a8a Romain Dolbeau
 */
20 115329f1 Diego Biurrun
21 b550bfaa Ronald S. Bultje
#include "dsputil.h"
22 a6a12a8a Romain Dolbeau
23
#include "gcc_fixes.h"
24
25
#include "dsputil_altivec.h"
26 3813dcc9 Guillaume Poirier
#include "types_altivec.h"
27 a6a12a8a Romain Dolbeau
28
#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
29
#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
30
31
#define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
32
#define PREFIX_h264_chroma_mc8_altivec         put_h264_chroma_mc8_altivec
33
#define PREFIX_h264_chroma_mc8_num             altivec_put_h264_chroma_mc8_num
34
#define PREFIX_h264_qpel16_h_lowpass_altivec   put_h264_qpel16_h_lowpass_altivec
35
#define PREFIX_h264_qpel16_h_lowpass_num       altivec_put_h264_qpel16_h_lowpass_num
36
#define PREFIX_h264_qpel16_v_lowpass_altivec   put_h264_qpel16_v_lowpass_altivec
37
#define PREFIX_h264_qpel16_v_lowpass_num       altivec_put_h264_qpel16_v_lowpass_num
38
#define PREFIX_h264_qpel16_hv_lowpass_altivec  put_h264_qpel16_hv_lowpass_altivec
39
#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_put_h264_qpel16_hv_lowpass_num
40 b5f7e6eb Luca Barbato
#include "h264_template_altivec.c"
41 a6a12a8a Romain Dolbeau
#undef OP_U8_ALTIVEC
42
#undef PREFIX_h264_chroma_mc8_altivec
43
#undef PREFIX_h264_chroma_mc8_num
44
#undef PREFIX_h264_qpel16_h_lowpass_altivec
45
#undef PREFIX_h264_qpel16_h_lowpass_num
46
#undef PREFIX_h264_qpel16_v_lowpass_altivec
47
#undef PREFIX_h264_qpel16_v_lowpass_num
48
#undef PREFIX_h264_qpel16_hv_lowpass_altivec
49
#undef PREFIX_h264_qpel16_hv_lowpass_num
50
51
#define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
52
#define PREFIX_h264_chroma_mc8_altivec         avg_h264_chroma_mc8_altivec
53
#define PREFIX_h264_chroma_mc8_num             altivec_avg_h264_chroma_mc8_num
54
#define PREFIX_h264_qpel16_h_lowpass_altivec   avg_h264_qpel16_h_lowpass_altivec
55
#define PREFIX_h264_qpel16_h_lowpass_num       altivec_avg_h264_qpel16_h_lowpass_num
56
#define PREFIX_h264_qpel16_v_lowpass_altivec   avg_h264_qpel16_v_lowpass_altivec
57
#define PREFIX_h264_qpel16_v_lowpass_num       altivec_avg_h264_qpel16_v_lowpass_num
58
#define PREFIX_h264_qpel16_hv_lowpass_altivec  avg_h264_qpel16_hv_lowpass_altivec
59
#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_avg_h264_qpel16_hv_lowpass_num
60 b5f7e6eb Luca Barbato
#include "h264_template_altivec.c"
61 a6a12a8a Romain Dolbeau
#undef OP_U8_ALTIVEC
62
#undef PREFIX_h264_chroma_mc8_altivec
63
#undef PREFIX_h264_chroma_mc8_num
64
#undef PREFIX_h264_qpel16_h_lowpass_altivec
65
#undef PREFIX_h264_qpel16_h_lowpass_num
66
#undef PREFIX_h264_qpel16_v_lowpass_altivec
67
#undef PREFIX_h264_qpel16_v_lowpass_num
68
#undef PREFIX_h264_qpel16_hv_lowpass_altivec
69
#undef PREFIX_h264_qpel16_hv_lowpass_num
70
71
#define H264_MC(OPNAME, SIZE, CODETYPE) \
72
static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
73
    OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
74
}\
75
\
76
static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
77 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
78 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
79
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
80
}\
81
\
82
static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
83
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
84
}\
85
\
86
static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
87 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
88 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
89
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
90
}\
91
\
92
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
93 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
94 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
95
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
96
}\
97
\
98
static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
99
    OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
100
}\
101
\
102
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
103 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
104 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
105
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
106
}\
107
\
108
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
109 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
110
    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
111 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
112
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
113
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
114
}\
115
\
116
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
117 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
118
    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
119 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
120
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
121
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
122
}\
123
\
124
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
125 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
126
    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
127 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
128
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
129
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
130
}\
131
\
132
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
133 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
134
    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
135 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
136
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
137
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
138
}\
139
\
140
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
141 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
142 a6a12a8a Romain Dolbeau
    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
143
}\
144
\
145
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
146 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
147
    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
148
    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
149 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
150
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
151
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
152
}\
153
\
154
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
155 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
156
    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
157
    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
158 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
159
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
160
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
161
}\
162
\
163
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
164 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
165
    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
166
    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
167 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
168
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
169
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
170
}\
171
\
172
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
173 8047fe72 Luca Barbato
    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
174
    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
175
    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
176 a6a12a8a Romain Dolbeau
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
177
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
178
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
179
}\
180
181 60aae27a Kostya Shishkov
/* this code assume that stride % 16 == 0 */
182
void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
183 88bcb6c2 Guillaume Poirier
   DECLARE_ALIGNED_16(signed int, ABCD[4]) =
184 60aae27a Kostya Shishkov
                        {((8 - x) * (8 - y)),
185
                          ((x) * (8 - y)),
186
                          ((8 - x) * (y)),
187
                          ((x) * (y))};
188
    register int i;
189 3ca96802 Guillaume Poirier
    vec_u8_t fperm;
190
    const vec_s32_t vABCD = vec_ld(0, ABCD);
191
    const vec_s16_t vA = vec_splat((vec_s16_t)vABCD, 1);
192
    const vec_s16_t vB = vec_splat((vec_s16_t)vABCD, 3);
193
    const vec_s16_t vC = vec_splat((vec_s16_t)vABCD, 5);
194
    const vec_s16_t vD = vec_splat((vec_s16_t)vABCD, 7);
195
    LOAD_ZERO;
196
    const vec_s16_t v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
197
    const vec_u16_t v6us = vec_splat_u16(6);
198 60aae27a Kostya Shishkov
    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
199
    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
200
201 3ca96802 Guillaume Poirier
    vec_u8_t vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
202
    vec_u8_t vsrc0uc, vsrc1uc;
203
    vec_s16_t vsrc0ssH, vsrc1ssH;
204
    vec_u8_t vsrcCuc, vsrc2uc, vsrc3uc;
205
    vec_s16_t vsrc2ssH, vsrc3ssH, psum;
206
    vec_u8_t vdst, ppsum, fsum;
207 60aae27a Kostya Shishkov
208
    if (((unsigned long)dst) % 16 == 0) {
209 3ca96802 Guillaume Poirier
      fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13,
210
                            0x14, 0x15, 0x16, 0x17,
211
                            0x08, 0x09, 0x0A, 0x0B,
212
                            0x0C, 0x0D, 0x0E, 0x0F);
213 60aae27a Kostya Shishkov
    } else {
214 3ca96802 Guillaume Poirier
      fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03,
215
                            0x04, 0x05, 0x06, 0x07,
216
                            0x18, 0x19, 0x1A, 0x1B,
217
                            0x1C, 0x1D, 0x1E, 0x1F);
218 60aae27a Kostya Shishkov
    }
219
220
    vsrcAuc = vec_ld(0, src);
221
222
    if (loadSecond)
223
      vsrcBuc = vec_ld(16, src);
224
    vsrcperm0 = vec_lvsl(0, src);
225
    vsrcperm1 = vec_lvsl(1, src);
226
227
    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
228
    if (reallyBadAlign)
229
      vsrc1uc = vsrcBuc;
230
    else
231
      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
232
233 3ca96802 Guillaume Poirier
    vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc0uc);
234
    vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc1uc);
235 60aae27a Kostya Shishkov
236
    if (!loadSecond) {// -> !reallyBadAlign
237
      for (i = 0 ; i < h ; i++) {
238
239
240
        vsrcCuc = vec_ld(stride + 0, src);
241
242
        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
243
        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
244
245 3ca96802 Guillaume Poirier
        vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc);
246
        vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc);
247 60aae27a Kostya Shishkov
248
        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
249
        psum = vec_mladd(vB, vsrc1ssH, psum);
250
        psum = vec_mladd(vC, vsrc2ssH, psum);
251
        psum = vec_mladd(vD, vsrc3ssH, psum);
252
        psum = vec_add(v28ss, psum);
253
        psum = vec_sra(psum, v6us);
254
255
        vdst = vec_ld(0, dst);
256 3ca96802 Guillaume Poirier
        ppsum = (vec_u8_t)vec_packsu(psum, psum);
257 60aae27a Kostya Shishkov
        fsum = vec_perm(vdst, ppsum, fperm);
258
259
        vec_st(fsum, 0, dst);
260
261
        vsrc0ssH = vsrc2ssH;
262
        vsrc1ssH = vsrc3ssH;
263
264
        dst += stride;
265
        src += stride;
266
      }
267
    } else {
268 3ca96802 Guillaume Poirier
        vec_u8_t vsrcDuc;
269 60aae27a Kostya Shishkov
      for (i = 0 ; i < h ; i++) {
270
        vsrcCuc = vec_ld(stride + 0, src);
271
        vsrcDuc = vec_ld(stride + 16, src);
272
273
        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
274
        if (reallyBadAlign)
275
          vsrc3uc = vsrcDuc;
276
        else
277
          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
278
279 3ca96802 Guillaume Poirier
        vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc);
280
        vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc);
281 60aae27a Kostya Shishkov
282
        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
283
        psum = vec_mladd(vB, vsrc1ssH, psum);
284
        psum = vec_mladd(vC, vsrc2ssH, psum);
285
        psum = vec_mladd(vD, vsrc3ssH, psum);
286
        psum = vec_add(v28ss, psum);
287
        psum = vec_sr(psum, v6us);
288
289
        vdst = vec_ld(0, dst);
290 3ca96802 Guillaume Poirier
        ppsum = (vec_u8_t)vec_pack(psum, psum);
291 60aae27a Kostya Shishkov
        fsum = vec_perm(vdst, ppsum, fperm);
292
293
        vec_st(fsum, 0, dst);
294
295
        vsrc0ssH = vsrc2ssH;
296
        vsrc1ssH = vsrc3ssH;
297
298
        dst += stride;
299
        src += stride;
300
      }
301
    }
302
}
303
304 0d18f798 Luca Barbato
static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
305
                                    const uint8_t * src2, int dst_stride,
306
                                    int src_stride1, int h)
307
{
308
    int i;
309 3ca96802 Guillaume Poirier
    vec_u8_t a, b, d, tmp1, tmp2, mask, mask_, edges, align;
310 a6a12a8a Romain Dolbeau
311 0d18f798 Luca Barbato
    mask_ = vec_lvsl(0, src2);
312
313
    for (i = 0; i < h; i++) {
314
315
        tmp1 = vec_ld(i * src_stride1, src1);
316
        mask = vec_lvsl(i * src_stride1, src1);
317
        tmp2 = vec_ld(i * src_stride1 + 15, src1);
318
319
        a = vec_perm(tmp1, tmp2, mask);
320
321
        tmp1 = vec_ld(i * 16, src2);
322
        tmp2 = vec_ld(i * 16 + 15, src2);
323
324
        b = vec_perm(tmp1, tmp2, mask_);
325
326
        tmp1 = vec_ld(0, dst);
327
        mask = vec_lvsl(0, dst);
328
        tmp2 = vec_ld(15, dst);
329
330
        d = vec_avg(a, b);
331
332
        edges = vec_perm(tmp2, tmp1, mask);
333
334
        align = vec_lvsr(0, dst);
335
336 7e821457 Luca Barbato
        tmp2 = vec_perm(d, edges, align);
337 27303c8a Luca Barbato
        tmp1 = vec_perm(edges, d, align);
338 0d18f798 Luca Barbato
339 cb243ea2 Luca Barbato
        vec_st(tmp2, 15, dst);
340 27303c8a Luca Barbato
        vec_st(tmp1, 0 , dst);
341 0d18f798 Luca Barbato
342
        dst += dst_stride;
343
    }
344
}
345
346
static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
347
                                    const uint8_t * src2, int dst_stride,
348
                                    int src_stride1, int h)
349
{
350
    int i;
351 3ca96802 Guillaume Poirier
    vec_u8_t a, b, d, tmp1, tmp2, mask, mask_, edges, align;
352 0d18f798 Luca Barbato
353
    mask_ = vec_lvsl(0, src2);
354
355
    for (i = 0; i < h; i++) {
356
357
        tmp1 = vec_ld(i * src_stride1, src1);
358
        mask = vec_lvsl(i * src_stride1, src1);
359
        tmp2 = vec_ld(i * src_stride1 + 15, src1);
360
361
        a = vec_perm(tmp1, tmp2, mask);
362
363
        tmp1 = vec_ld(i * 16, src2);
364
        tmp2 = vec_ld(i * 16 + 15, src2);
365
366
        b = vec_perm(tmp1, tmp2, mask_);
367
368
        tmp1 = vec_ld(0, dst);
369
        mask = vec_lvsl(0, dst);
370
        tmp2 = vec_ld(15, dst);
371
372
        d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
373
374
        edges = vec_perm(tmp2, tmp1, mask);
375
376
        align = vec_lvsr(0, dst);
377
378 7e821457 Luca Barbato
        tmp2 = vec_perm(d, edges, align);
379 27303c8a Luca Barbato
        tmp1 = vec_perm(edges, d, align);
380 0d18f798 Luca Barbato
381 cb243ea2 Luca Barbato
        vec_st(tmp2, 15, dst);
382 27303c8a Luca Barbato
        vec_st(tmp1, 0 , dst);
383 0d18f798 Luca Barbato
384
        dst += dst_stride;
385
    }
386 a6a12a8a Romain Dolbeau
}
387
388 0d18f798 Luca Barbato
/* Implemented but could be faster
389 a6a12a8a Romain Dolbeau
#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
390
#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
391 0d18f798 Luca Barbato
 */
392 a6a12a8a Romain Dolbeau
393 0d18f798 Luca Barbato
  H264_MC(put_, 16, altivec)
394
  H264_MC(avg_, 16, altivec)
395 a6a12a8a Romain Dolbeau
396 3813dcc9 Guillaume Poirier
397
/****************************************************************************
398
 * IDCT transform:
399
 ****************************************************************************/
400
401 5dda2539 Luca Barbato
#define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3)              \
402
   /* 1st stage */                                               \
403
   vz0 = vec_add(vb0,vb2);       /* temp[0] = Y[0] + Y[2] */     \
404
   vz1 = vec_sub(vb0,vb2);       /* temp[1] = Y[0] - Y[2] */     \
405
   vz2 = vec_sra(vb1,vec_splat_u16(1));                          \
406
   vz2 = vec_sub(vz2,vb3);       /* temp[2] = Y[1].1/2 - Y[3] */ \
407
   vz3 = vec_sra(vb3,vec_splat_u16(1));                          \
408
   vz3 = vec_add(vb1,vz3);       /* temp[3] = Y[1] + Y[3].1/2 */ \
409
   /* 2nd stage: output */                                       \
410
   va0 = vec_add(vz0,vz3);       /* x[0] = temp[0] + temp[3] */  \
411
   va1 = vec_add(vz1,vz2);       /* x[1] = temp[1] + temp[2] */  \
412
   va2 = vec_sub(vz1,vz2);       /* x[2] = temp[1] - temp[2] */  \
413
   va3 = vec_sub(vz0,vz3)        /* x[3] = temp[0] - temp[3] */
414
415
#define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
416
    b0 = vec_mergeh( a0, a0 ); \
417
    b1 = vec_mergeh( a1, a0 ); \
418
    b2 = vec_mergeh( a2, a0 ); \
419
    b3 = vec_mergeh( a3, a0 ); \
420
    a0 = vec_mergeh( b0, b2 ); \
421
    a1 = vec_mergel( b0, b2 ); \
422
    a2 = vec_mergeh( b1, b3 ); \
423
    a3 = vec_mergel( b1, b3 ); \
424
    b0 = vec_mergeh( a0, a2 ); \
425
    b1 = vec_mergel( a0, a2 ); \
426
    b2 = vec_mergeh( a1, a3 ); \
427
    b3 = vec_mergel( a1, a3 )
428
429
#define VEC_LOAD_U8_ADD_S16_STORE_U8(va)                      \
430
    vdst_orig = vec_ld(0, dst);                               \
431
    vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);          \
432
    vdst_ss = (vec_s16_t) vec_mergeh(zero_u8v, vdst);         \
433
    va = vec_add(va, vdst_ss);                                \
434
    va_u8 = vec_packsu(va, zero_s16v);                        \
435
    va_u32 = vec_splat((vec_u32_t)va_u8, 0);                  \
436
    vec_ste(va_u32, element, (uint32_t*)dst);
437
438
static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
439
{
440
    vec_s16_t va0, va1, va2, va3;
441
    vec_s16_t vz0, vz1, vz2, vz3;
442
    vec_s16_t vtmp0, vtmp1, vtmp2, vtmp3;
443
    vec_u8_t va_u8;
444
    vec_u32_t va_u32;
445
    vec_s16_t vdst_ss;
446
    const vec_u16_t v6us = vec_splat_u16(6);
447
    vec_u8_t vdst, vdst_orig;
448
    vec_u8_t vdst_mask = vec_lvsl(0, dst);
449
    int element = ((unsigned long)dst & 0xf) >> 2;
450
    LOAD_ZERO;
451
452
    block[0] += 32;  /* add 32 as a DC-level for rounding */
453
454
    vtmp0 = vec_ld(0,block);
455
    vtmp1 = vec_sld(vtmp0, vtmp0, 8);
456
    vtmp2 = vec_ld(16,block);
457
    vtmp3 = vec_sld(vtmp2, vtmp2, 8);
458
459
    VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
460
    VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
461
    VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
462
463
    va0 = vec_sra(va0,v6us);
464
    va1 = vec_sra(va1,v6us);
465
    va2 = vec_sra(va2,v6us);
466
    va3 = vec_sra(va3,v6us);
467
468
    VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
469
    dst += stride;
470
    VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
471
    dst += stride;
472
    VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
473
    dst += stride;
474
    VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
475
}
476
477 3813dcc9 Guillaume Poirier
#define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,  d0, d1, d2, d3, d4, d5, d6, d7) {\
478
    /*        a0  = SRC(0) + SRC(4); */ \
479
    vec_s16_t a0v = vec_add(s0, s4);    \
480
    /*        a2  = SRC(0) - SRC(4); */ \
481
    vec_s16_t a2v = vec_sub(s0, s4);    \
482
    /*        a4  =           (SRC(2)>>1) - SRC(6); */ \
483
    vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6);    \
484
    /*        a6  =           (SRC(6)>>1) + SRC(2); */ \
485
    vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2);    \
486
    /*        b0  =         a0 + a6; */ \
487
    vec_s16_t b0v = vec_add(a0v, a6v);  \
488
    /*        b2  =         a2 + a4; */ \
489
    vec_s16_t b2v = vec_add(a2v, a4v);  \
490
    /*        b4  =         a2 - a4; */ \
491
    vec_s16_t b4v = vec_sub(a2v, a4v);  \
492
    /*        b6  =         a0 - a6; */ \
493
    vec_s16_t b6v = vec_sub(a0v, a6v);  \
494
    /* a1 =  SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
495
    /*        a1 =             (SRC(5)-SRC(3)) -  (SRC(7)  +  (SRC(7)>>1)); */ \
496
    vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
497
    /* a3 =  SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
498
    /*        a3 =             (SRC(7)+SRC(1)) -  (SRC(3)  +  (SRC(3)>>1)); */ \
499
    vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
500
    /* a5 =  SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
501
    /*        a5 =             (SRC(7)-SRC(1)) +   SRC(5) +   (SRC(5)>>1); */ \
502
    vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
503
    /*        a7 =                SRC(5)+SRC(3) +  SRC(1) +   (SRC(1)>>1); */ \
504
    vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
505
    /*        b1 =                  (a7>>2)  +  a1; */ \
506
    vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \
507
    /*        b3 =          a3 +        (a5>>2); */ \
508
    vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \
509
    /*        b5 =                  (a3>>2)  -   a5; */ \
510
    vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \
511
    /*        b7 =           a7 -        (a1>>2); */ \
512
    vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
513
    /* DST(0,    b0 + b7); */ \
514
    d0 = vec_add(b0v, b7v); \
515
    /* DST(1,    b2 + b5); */ \
516
    d1 = vec_add(b2v, b5v); \
517
    /* DST(2,    b4 + b3); */ \
518
    d2 = vec_add(b4v, b3v); \
519
    /* DST(3,    b6 + b1); */ \
520
    d3 = vec_add(b6v, b1v); \
521
    /* DST(4,    b6 - b1); */ \
522
    d4 = vec_sub(b6v, b1v); \
523
    /* DST(5,    b4 - b3); */ \
524
    d5 = vec_sub(b4v, b3v); \
525
    /* DST(6,    b2 - b5); */ \
526
    d6 = vec_sub(b2v, b5v); \
527
    /* DST(7,    b0 - b7); */ \
528
    d7 = vec_sub(b0v, b7v); \
529
}
530
531
#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
532
    /* unaligned load */                                       \
533
    vec_u8_t hv = vec_ld( 0, dest );                           \
534
    vec_u8_t lv = vec_ld( 7, dest );                           \
535
    vec_u8_t dstv   = vec_perm( hv, lv, (vec_u8_t)perm_ldv );  \
536
    vec_s16_t idct_sh6 = vec_sra(idctv, sixv);                 \
537 ec4e0056 Guillaume Poirier
    vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv);   \
538 3813dcc9 Guillaume Poirier
    vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16);  \
539
    vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum);        \
540
    vec_u8_t edgehv;                                           \
541
    /* unaligned store */                                      \
542
    vec_u8_t bodyv  = vec_perm( idstsum8, idstsum8, perm_stv );\
543
    vec_u8_t edgelv = vec_perm( sel, zero_u8v, perm_stv );     \
544
    lv    = vec_sel( lv, bodyv, edgelv );                      \
545
    vec_st( lv, 7, dest );                                     \
546
    hv    = vec_ld( 0, dest );                                 \
547
    edgehv = vec_perm( zero_u8v, sel, perm_stv );              \
548
    hv    = vec_sel( hv, bodyv, edgehv );                      \
549
    vec_st( hv, 0, dest );                                     \
550
 }
551
552
void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
553
    vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
554
    vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
555
    vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
556
557
    vec_u8_t perm_ldv = vec_lvsl(0, dst);
558
    vec_u8_t perm_stv = vec_lvsr(8, dst);
559
560
    const vec_u16_t onev = vec_splat_u16(1);
561
    const vec_u16_t twov = vec_splat_u16(2);
562
    const vec_u16_t sixv = vec_splat_u16(6);
563
564 3ca96802 Guillaume Poirier
    const vec_u8_t sel = (vec_u8_t) AVV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1);
565 3813dcc9 Guillaume Poirier
    LOAD_ZERO;
566
567
    dct[0] += 32; // rounding for the >>6 at the end
568
569
    s0 = vec_ld(0x00, (int16_t*)dct);
570
    s1 = vec_ld(0x10, (int16_t*)dct);
571
    s2 = vec_ld(0x20, (int16_t*)dct);
572
    s3 = vec_ld(0x30, (int16_t*)dct);
573
    s4 = vec_ld(0x40, (int16_t*)dct);
574
    s5 = vec_ld(0x50, (int16_t*)dct);
575
    s6 = vec_ld(0x60, (int16_t*)dct);
576
    s7 = vec_ld(0x70, (int16_t*)dct);
577
578
    IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
579
                     d0, d1, d2, d3, d4, d5, d6, d7);
580
581
    TRANSPOSE8( d0,  d1,  d2,  d3,  d4,  d5,  d6, d7 );
582
583
    IDCT8_1D_ALTIVEC(d0,  d1,  d2,  d3,  d4,  d5,  d6, d7,
584
                     idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
585
586
    ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
587
    ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
588
    ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
589
    ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
590
    ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
591
    ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
592
    ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
593
    ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
594
}
595
596 f057cc03 Graham Booker
#define transpose4x16(r0, r1, r2, r3) {      \
597 3ca96802 Guillaume Poirier
    register vec_u8_t r4;                    \
598
    register vec_u8_t r5;                    \
599
    register vec_u8_t r6;                    \
600
    register vec_u8_t r7;                    \
601 f057cc03 Graham Booker
                                             \
602
    r4 = vec_mergeh(r0, r2);  /*0, 2 set 0*/ \
603
    r5 = vec_mergel(r0, r2);  /*0, 2 set 1*/ \
604
    r6 = vec_mergeh(r1, r3);  /*1, 3 set 0*/ \
605
    r7 = vec_mergel(r1, r3);  /*1, 3 set 1*/ \
606
                                             \
607
    r0 = vec_mergeh(r4, r6);  /*all set 0*/  \
608
    r1 = vec_mergel(r4, r6);  /*all set 1*/  \
609
    r2 = vec_mergeh(r5, r7);  /*all set 2*/  \
610
    r3 = vec_mergel(r5, r7);  /*all set 3*/  \
611
}
612
613 da1fce39 Guillaume Poirier
static inline void write16x4(uint8_t *dst, int dst_stride,
614 3ca96802 Guillaume Poirier
                             register vec_u8_t r0, register vec_u8_t r1,
615
                             register vec_u8_t r2, register vec_u8_t r3) {
616 f057cc03 Graham Booker
    DECLARE_ALIGNED_16(unsigned char, result[64]);
617
    uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
618
    int int_dst_stride = dst_stride/4;
619
620
    vec_st(r0, 0, result);
621
    vec_st(r1, 16, result);
622
    vec_st(r2, 32, result);
623
    vec_st(r3, 48, result);
624
    /* FIXME: there has to be a better way!!!! */
625
    *dst_int = *src_int;
626
    *(dst_int+   int_dst_stride) = *(src_int + 1);
627
    *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
628
    *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
629
    *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
630
    *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
631
    *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
632
    *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
633
    *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
634
    *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
635
    *(dst_int+10*int_dst_stride) = *(src_int + 10);
636
    *(dst_int+11*int_dst_stride) = *(src_int + 11);
637
    *(dst_int+12*int_dst_stride) = *(src_int + 12);
638
    *(dst_int+13*int_dst_stride) = *(src_int + 13);
639
    *(dst_int+14*int_dst_stride) = *(src_int + 14);
640
    *(dst_int+15*int_dst_stride) = *(src_int + 15);
641
}
642
643
/** \brief performs a 6x16 transpose of data in src, and stores it to dst
644
    \todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
645
    out of unaligned_load() */
646
#define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
647 3ca96802 Guillaume Poirier
    register vec_u8_t r0  = unaligned_load(0,             src);            \
648
    register vec_u8_t r1  = unaligned_load(   src_stride, src);            \
649
    register vec_u8_t r2  = unaligned_load(2* src_stride, src);            \
650
    register vec_u8_t r3  = unaligned_load(3* src_stride, src);            \
651
    register vec_u8_t r4  = unaligned_load(4* src_stride, src);            \
652
    register vec_u8_t r5  = unaligned_load(5* src_stride, src);            \
653
    register vec_u8_t r6  = unaligned_load(6* src_stride, src);            \
654
    register vec_u8_t r7  = unaligned_load(7* src_stride, src);            \
655
    register vec_u8_t r14 = unaligned_load(14*src_stride, src);            \
656
    register vec_u8_t r15 = unaligned_load(15*src_stride, src);            \
657 f057cc03 Graham Booker
                                                                           \
658
    r8  = unaligned_load( 8*src_stride, src);                              \
659
    r9  = unaligned_load( 9*src_stride, src);                              \
660
    r10 = unaligned_load(10*src_stride, src);                              \
661
    r11 = unaligned_load(11*src_stride, src);                              \
662
    r12 = unaligned_load(12*src_stride, src);                              \
663
    r13 = unaligned_load(13*src_stride, src);                              \
664
                                                                           \
665
    /*Merge first pairs*/                                                  \
666
    r0 = vec_mergeh(r0, r8);    /*0, 8*/                                   \
667
    r1 = vec_mergeh(r1, r9);    /*1, 9*/                                   \
668
    r2 = vec_mergeh(r2, r10);   /*2,10*/                                   \
669
    r3 = vec_mergeh(r3, r11);   /*3,11*/                                   \
670
    r4 = vec_mergeh(r4, r12);   /*4,12*/                                   \
671
    r5 = vec_mergeh(r5, r13);   /*5,13*/                                   \
672
    r6 = vec_mergeh(r6, r14);   /*6,14*/                                   \
673
    r7 = vec_mergeh(r7, r15);   /*7,15*/                                   \
674
                                                                           \
675
    /*Merge second pairs*/                                                 \
676
    r8  = vec_mergeh(r0, r4);   /*0,4, 8,12 set 0*/                        \
677
    r9  = vec_mergel(r0, r4);   /*0,4, 8,12 set 1*/                        \
678
    r10 = vec_mergeh(r1, r5);   /*1,5, 9,13 set 0*/                        \
679
    r11 = vec_mergel(r1, r5);   /*1,5, 9,13 set 1*/                        \
680
    r12 = vec_mergeh(r2, r6);   /*2,6,10,14 set 0*/                        \
681
    r13 = vec_mergel(r2, r6);   /*2,6,10,14 set 1*/                        \
682
    r14 = vec_mergeh(r3, r7);   /*3,7,11,15 set 0*/                        \
683
    r15 = vec_mergel(r3, r7);   /*3,7,11,15 set 1*/                        \
684
                                                                           \
685
    /*Third merge*/                                                        \
686
    r0 = vec_mergeh(r8, r12);   /*0,2,4,6,8,10,12,14 set 0*/               \
687
    r1 = vec_mergel(r8, r12);   /*0,2,4,6,8,10,12,14 set 1*/               \
688
    r2 = vec_mergeh(r9, r13);   /*0,2,4,6,8,10,12,14 set 2*/               \
689
    r4 = vec_mergeh(r10, r14);  /*1,3,5,7,9,11,13,15 set 0*/               \
690
    r5 = vec_mergel(r10, r14);  /*1,3,5,7,9,11,13,15 set 1*/               \
691
    r6 = vec_mergeh(r11, r15);  /*1,3,5,7,9,11,13,15 set 2*/               \
692
    /* Don't need to compute 3 and 7*/                                     \
693
                                                                           \
694
    /*Final merge*/                                                        \
695
    r8  = vec_mergeh(r0, r4);   /*all set 0*/                              \
696
    r9  = vec_mergel(r0, r4);   /*all set 1*/                              \
697
    r10 = vec_mergeh(r1, r5);   /*all set 2*/                              \
698
    r11 = vec_mergel(r1, r5);   /*all set 3*/                              \
699
    r12 = vec_mergeh(r2, r6);   /*all set 4*/                              \
700
    r13 = vec_mergel(r2, r6);   /*all set 5*/                              \
701
    /* Don't need to compute 14 and 15*/                                   \
702
                                                                           \
703
}
704
705
// out: o = |x-y| < a
706 3ca96802 Guillaume Poirier
static inline vec_u8_t diff_lt_altivec ( register vec_u8_t x,
707
                                         register vec_u8_t y,
708
                                         register vec_u8_t a) {
709
710
    register vec_u8_t diff = vec_subs(x, y);
711
    register vec_u8_t diffneg = vec_subs(y, x);
712
    register vec_u8_t o = vec_or(diff, diffneg); /* |x-y| */
713
    o = (vec_u8_t)vec_cmplt(o, a);
714 f057cc03 Graham Booker
    return o;
715
}
716
717 3ca96802 Guillaume Poirier
static inline vec_u8_t h264_deblock_mask ( register vec_u8_t p0,
718
                                           register vec_u8_t p1,
719
                                           register vec_u8_t q0,
720
                                           register vec_u8_t q1,
721
                                           register vec_u8_t alpha,
722
                                           register vec_u8_t beta) {
723 f057cc03 Graham Booker
724 3ca96802 Guillaume Poirier
    register vec_u8_t mask;
725
    register vec_u8_t tempmask;
726 f057cc03 Graham Booker
727
    mask = diff_lt_altivec(p0, q0, alpha);
728
    tempmask = diff_lt_altivec(p1, p0, beta);
729
    mask = vec_and(mask, tempmask);
730
    tempmask = diff_lt_altivec(q1, q0, beta);
731
    mask = vec_and(mask, tempmask);
732
733
    return mask;
734
}
735
736 22fa38f0 Graham Booker
// out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
737 3ca96802 Guillaume Poirier
static inline vec_u8_t h264_deblock_q1(register vec_u8_t p0,
738
                                       register vec_u8_t p1,
739
                                       register vec_u8_t p2,
740
                                       register vec_u8_t q0,
741
                                       register vec_u8_t tc0) {
742
743
    register vec_u8_t average = vec_avg(p0, q0);
744
    register vec_u8_t temp;
745
    register vec_u8_t uncliped;
746
    register vec_u8_t ones;
747
    register vec_u8_t max;
748
    register vec_u8_t min;
749
    register vec_u8_t newp1;
750 963eca22 Guillaume Poirier
751
    temp = vec_xor(average, p2);
752
    average = vec_avg(average, p2);     /*avg(p2, avg(p0, q0)) */
753
    ones = vec_splat_u8(1);
754
    temp = vec_and(temp, ones);         /*(p2^avg(p0, q0)) & 1 */
755
    uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
756
    max = vec_adds(p1, tc0);
757
    min = vec_subs(p1, tc0);
758 22fa38f0 Graham Booker
    newp1 = vec_max(min, uncliped);
759
    newp1 = vec_min(max, newp1);
760
    return newp1;
761 f057cc03 Graham Booker
}
762
763
#define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) {                                           \
764
                                                                                                  \
765 3ca96802 Guillaume Poirier
    const vec_u8_t A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4));                               \
766 f057cc03 Graham Booker
                                                                                                  \
767 3ca96802 Guillaume Poirier
    register vec_u8_t pq0bit = vec_xor(p0,q0);                                                    \
768
    register vec_u8_t q1minus;                                                                    \
769
    register vec_u8_t p0minus;                                                                    \
770
    register vec_u8_t stage1;                                                                     \
771
    register vec_u8_t stage2;                                                                     \
772
    register vec_u8_t vec160;                                                                     \
773
    register vec_u8_t delta;                                                                      \
774
    register vec_u8_t deltaneg;                                                                   \
775 f057cc03 Graham Booker
                                                                                                  \
776 f4a02f6e Graham Booker
    q1minus = vec_nor(q1, q1);                 /* 255 - q1 */                                     \
777 f057cc03 Graham Booker
    stage1 = vec_avg(p1, q1minus);             /* (p1 - q1 + 256)>>1 */                           \
778
    stage2 = vec_sr(stage1, vec_splat_u8(1));  /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */     \
779 f4a02f6e Graham Booker
    p0minus = vec_nor(p0, p0);                 /* 255 - p0 */                                     \
780 f057cc03 Graham Booker
    stage1 = vec_avg(q0, p0minus);             /* (q0 - p0 + 256)>>1 */                           \
781
    pq0bit = vec_and(pq0bit, vec_splat_u8(1));                                                    \
782
    stage2 = vec_avg(stage2, pq0bit);          /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
783
    stage2 = vec_adds(stage2, stage1);         /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */  \
784
    vec160 = vec_ld(0, &A0v);                                                                     \
785
    deltaneg = vec_subs(vec160, stage2);       /* -d */                                           \
786
    delta = vec_subs(stage2, vec160);          /* d */                                            \
787
    deltaneg = vec_min(tc0masked, deltaneg);                                                      \
788
    delta = vec_min(tc0masked, delta);                                                            \
789
    p0 = vec_subs(p0, deltaneg);                                                                  \
790
    q0 = vec_subs(q0, delta);                                                                     \
791
    p0 = vec_adds(p0, delta);                                                                     \
792
    q0 = vec_adds(q0, deltaneg);                                                                  \
793
}
794
795
#define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) {            \
796
    DECLARE_ALIGNED_16(unsigned char, temp[16]);                                             \
797 3ca96802 Guillaume Poirier
    register vec_u8_t alphavec;                                                              \
798
    register vec_u8_t betavec;                                                               \
799
    register vec_u8_t mask;                                                                  \
800
    register vec_u8_t p1mask;                                                                \
801
    register vec_u8_t q1mask;                                                                \
802 e970d98c Guillaume Poirier
    register vector signed   char tc0vec;                                                    \
803 3ca96802 Guillaume Poirier
    register vec_u8_t finaltc0;                                                              \
804
    register vec_u8_t tc0masked;                                                             \
805
    register vec_u8_t newp1;                                                                 \
806
    register vec_u8_t newq1;                                                                 \
807 f057cc03 Graham Booker
                                                                                             \
808
    temp[0] = alpha;                                                                         \
809
    temp[1] = beta;                                                                          \
810
    alphavec = vec_ld(0, temp);                                                              \
811
    betavec = vec_splat(alphavec, 0x1);                                                      \
812
    alphavec = vec_splat(alphavec, 0x0);                                                     \
813
    mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */            \
814
                                                                                             \
815
    *((int *)temp) = *((int *)tc0);                                                          \
816 e970d98c Guillaume Poirier
    tc0vec = vec_ld(0, (signed char*)temp);                                                  \
817 f057cc03 Graham Booker
    tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \
818
    tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \
819 22fa38f0 Graham Booker
    mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1)));  /* if tc0[i] >= 0 */         \
820 3ca96802 Guillaume Poirier
    finaltc0 = vec_and((vec_u8_t)tc0vec, mask);     /* tc = tc0 */                           \
821 f057cc03 Graham Booker
                                                                                             \
822
    p1mask = diff_lt_altivec(p2, p0, betavec);                                               \
823
    p1mask = vec_and(p1mask, mask);                             /* if( |p2 - p0| < beta) */  \
824 3ca96802 Guillaume Poirier
    tc0masked = vec_and(p1mask, (vec_u8_t)tc0vec);                                           \
825 f057cc03 Graham Booker
    finaltc0 = vec_sub(finaltc0, p1mask);                       /* tc++ */                   \
826 22fa38f0 Graham Booker
    newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked);                                      \
827 f057cc03 Graham Booker
    /*end if*/                                                                               \
828
                                                                                             \
829
    q1mask = diff_lt_altivec(q2, q0, betavec);                                               \
830
    q1mask = vec_and(q1mask, mask);                             /* if ( |q2 - q0| < beta ) */\
831 3ca96802 Guillaume Poirier
    tc0masked = vec_and(q1mask, (vec_u8_t)tc0vec);                                           \
832 f057cc03 Graham Booker
    finaltc0 = vec_sub(finaltc0, q1mask);                       /* tc++ */                   \
833 22fa38f0 Graham Booker
    newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked);                                      \
834 f057cc03 Graham Booker
    /*end if*/                                                                               \
835
                                                                                             \
836
    h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0);                                            \
837 22fa38f0 Graham Booker
    p1 = newp1;                                                                              \
838
    q1 = newq1;                                                                              \
839 f057cc03 Graham Booker
}
840
841
static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
842
843
    if((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
844 3ca96802 Guillaume Poirier
        register vec_u8_t p2 = vec_ld(-3*stride, pix);
845
        register vec_u8_t p1 = vec_ld(-2*stride, pix);
846
        register vec_u8_t p0 = vec_ld(-1*stride, pix);
847
        register vec_u8_t q0 = vec_ld(0, pix);
848
        register vec_u8_t q1 = vec_ld(stride, pix);
849
        register vec_u8_t q2 = vec_ld(2*stride, pix);
850 f057cc03 Graham Booker
        h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
851
        vec_st(p1, -2*stride, pix);
852
        vec_st(p0, -1*stride, pix);
853
        vec_st(q0, 0, pix);
854
        vec_st(q1, stride, pix);
855
    }
856
}
857
858
static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
859
860 3ca96802 Guillaume Poirier
    register vec_u8_t line0, line1, line2, line3, line4, line5;
861 f057cc03 Graham Booker
    if((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
862
        return;
863
    readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
864
    h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
865
    transpose4x16(line1, line2, line3, line4);
866
    write16x4(pix-2, stride, line1, line2, line3, line4);
867
}
868
869 a6a12a8a Romain Dolbeau
void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
870 115329f1 Diego Biurrun
871 a6a12a8a Romain Dolbeau
#ifdef HAVE_ALTIVEC
872
  if (has_altivec()) {
873
    c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
874 60aae27a Kostya Shishkov
    c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec;
875 a6a12a8a Romain Dolbeau
    c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
876 5dda2539 Luca Barbato
    c->h264_idct_add = ff_h264_idct_add_altivec;
877 3813dcc9 Guillaume Poirier
    c->h264_idct8_add = ff_h264_idct8_add_altivec;
878 f057cc03 Graham Booker
    c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
879
    c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
880 a6a12a8a Romain Dolbeau
881
#define dspfunc(PFX, IDX, NUM) \
882
    c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
883
    c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
884
    c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
885
    c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
886
    c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
887
    c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
888
    c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
889
    c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
890
    c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
891
    c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
892
    c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
893
    c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
894
    c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
895
    c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
896
    c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
897
    c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
898 115329f1 Diego Biurrun
899 a6a12a8a Romain Dolbeau
    dspfunc(put_h264_qpel, 0, 16);
900
    dspfunc(avg_h264_qpel, 0, 16);
901
#undef dspfunc
902 115329f1 Diego Biurrun
903 a6a12a8a Romain Dolbeau
  } else
904
#endif /* HAVE_ALTIVEC */
905
  {
906
    // Non-AltiVec PPC optimisations
907 115329f1 Diego Biurrun
908 a6a12a8a Romain Dolbeau
    // ... pending ...
909
  }
910
}