Statistics
| Branch: | Revision:

ffmpeg / libavcodec / ppc / h264_altivec.c @ 84dc2d8a

History | View | Annotate | Download (46.9 KB)

1
/*
2
 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
3
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20

    
21
#include "libavcodec/dsputil.h"
22
#include "libavcodec/h264data.h"
23

    
24
#include "dsputil_ppc.h"
25
#include "dsputil_altivec.h"
26
#include "util_altivec.h"
27
#include "types_altivec.h"
28

    
29
#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
30
#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
31

    
32
#define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
33
#define PREFIX_h264_chroma_mc8_altivec         put_h264_chroma_mc8_altivec
34
#define PREFIX_no_rnd_vc1_chroma_mc8_altivec   put_no_rnd_vc1_chroma_mc8_altivec
35
#define PREFIX_h264_chroma_mc8_num             altivec_put_h264_chroma_mc8_num
36
#define PREFIX_h264_qpel16_h_lowpass_altivec   put_h264_qpel16_h_lowpass_altivec
37
#define PREFIX_h264_qpel16_h_lowpass_num       altivec_put_h264_qpel16_h_lowpass_num
38
#define PREFIX_h264_qpel16_v_lowpass_altivec   put_h264_qpel16_v_lowpass_altivec
39
#define PREFIX_h264_qpel16_v_lowpass_num       altivec_put_h264_qpel16_v_lowpass_num
40
#define PREFIX_h264_qpel16_hv_lowpass_altivec  put_h264_qpel16_hv_lowpass_altivec
41
#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_put_h264_qpel16_hv_lowpass_num
42
#include "h264_template_altivec.c"
43
#undef OP_U8_ALTIVEC
44
#undef PREFIX_h264_chroma_mc8_altivec
45
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
46
#undef PREFIX_h264_chroma_mc8_num
47
#undef PREFIX_h264_qpel16_h_lowpass_altivec
48
#undef PREFIX_h264_qpel16_h_lowpass_num
49
#undef PREFIX_h264_qpel16_v_lowpass_altivec
50
#undef PREFIX_h264_qpel16_v_lowpass_num
51
#undef PREFIX_h264_qpel16_hv_lowpass_altivec
52
#undef PREFIX_h264_qpel16_hv_lowpass_num
53

    
54
#define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
55
#define PREFIX_h264_chroma_mc8_altivec         avg_h264_chroma_mc8_altivec
56
#define PREFIX_no_rnd_vc1_chroma_mc8_altivec   avg_no_rnd_vc1_chroma_mc8_altivec
57
#define PREFIX_h264_chroma_mc8_num             altivec_avg_h264_chroma_mc8_num
58
#define PREFIX_h264_qpel16_h_lowpass_altivec   avg_h264_qpel16_h_lowpass_altivec
59
#define PREFIX_h264_qpel16_h_lowpass_num       altivec_avg_h264_qpel16_h_lowpass_num
60
#define PREFIX_h264_qpel16_v_lowpass_altivec   avg_h264_qpel16_v_lowpass_altivec
61
#define PREFIX_h264_qpel16_v_lowpass_num       altivec_avg_h264_qpel16_v_lowpass_num
62
#define PREFIX_h264_qpel16_hv_lowpass_altivec  avg_h264_qpel16_hv_lowpass_altivec
63
#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_avg_h264_qpel16_hv_lowpass_num
64
#include "h264_template_altivec.c"
65
#undef OP_U8_ALTIVEC
66
#undef PREFIX_h264_chroma_mc8_altivec
67
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
68
#undef PREFIX_h264_chroma_mc8_num
69
#undef PREFIX_h264_qpel16_h_lowpass_altivec
70
#undef PREFIX_h264_qpel16_h_lowpass_num
71
#undef PREFIX_h264_qpel16_v_lowpass_altivec
72
#undef PREFIX_h264_qpel16_v_lowpass_num
73
#undef PREFIX_h264_qpel16_hv_lowpass_altivec
74
#undef PREFIX_h264_qpel16_hv_lowpass_num
75

    
76
#define H264_MC(OPNAME, SIZE, CODETYPE) \
77
static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
78
    OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
79
}\
80
\
81
static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
82
    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
83
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
84
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
85
}\
86
\
87
static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
88
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
89
}\
90
\
91
static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
92
    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
93
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
94
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
95
}\
96
\
97
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
98
    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
99
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
100
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
101
}\
102
\
103
static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
104
    OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
105
}\
106
\
107
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
108
    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
109
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
110
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
111
}\
112
\
113
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
114
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
115
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
116
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
117
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
118
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
119
}\
120
\
121
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
122
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
123
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
124
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
125
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
126
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
127
}\
128
\
129
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
130
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
131
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
132
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
133
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
134
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
135
}\
136
\
137
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
138
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
139
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
140
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
141
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
142
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
143
}\
144
\
145
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
146
    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
147
    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
148
}\
149
\
150
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
151
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
152
    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
153
    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
154
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
155
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
156
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
157
}\
158
\
159
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
160
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
161
    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
162
    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
163
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
164
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
165
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
166
}\
167
\
168
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
169
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
170
    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
171
    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
172
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
173
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
174
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
175
}\
176
\
177
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
178
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
179
    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
180
    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
181
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
182
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
183
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
184
}\
185

    
186
static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
187
                                    const uint8_t * src2, int dst_stride,
188
                                    int src_stride1, int h)
189
{
190
    int i;
191
    vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
192

    
193
    mask_ = vec_lvsl(0, src2);
194

    
195
    for (i = 0; i < h; i++) {
196

    
197
        tmp1 = vec_ld(i * src_stride1, src1);
198
        mask = vec_lvsl(i * src_stride1, src1);
199
        tmp2 = vec_ld(i * src_stride1 + 15, src1);
200

    
201
        a = vec_perm(tmp1, tmp2, mask);
202

    
203
        tmp1 = vec_ld(i * 16, src2);
204
        tmp2 = vec_ld(i * 16 + 15, src2);
205

    
206
        b = vec_perm(tmp1, tmp2, mask_);
207

    
208
        tmp1 = vec_ld(0, dst);
209
        mask = vec_lvsl(0, dst);
210
        tmp2 = vec_ld(15, dst);
211

    
212
        d = vec_avg(a, b);
213

    
214
        edges = vec_perm(tmp2, tmp1, mask);
215

    
216
        align = vec_lvsr(0, dst);
217

    
218
        tmp2 = vec_perm(d, edges, align);
219
        tmp1 = vec_perm(edges, d, align);
220

    
221
        vec_st(tmp2, 15, dst);
222
        vec_st(tmp1, 0 , dst);
223

    
224
        dst += dst_stride;
225
    }
226
}
227

    
228
static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
229
                                    const uint8_t * src2, int dst_stride,
230
                                    int src_stride1, int h)
231
{
232
    int i;
233
    vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
234

    
235
    mask_ = vec_lvsl(0, src2);
236

    
237
    for (i = 0; i < h; i++) {
238

    
239
        tmp1 = vec_ld(i * src_stride1, src1);
240
        mask = vec_lvsl(i * src_stride1, src1);
241
        tmp2 = vec_ld(i * src_stride1 + 15, src1);
242

    
243
        a = vec_perm(tmp1, tmp2, mask);
244

    
245
        tmp1 = vec_ld(i * 16, src2);
246
        tmp2 = vec_ld(i * 16 + 15, src2);
247

    
248
        b = vec_perm(tmp1, tmp2, mask_);
249

    
250
        tmp1 = vec_ld(0, dst);
251
        mask = vec_lvsl(0, dst);
252
        tmp2 = vec_ld(15, dst);
253

    
254
        d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
255

    
256
        edges = vec_perm(tmp2, tmp1, mask);
257

    
258
        align = vec_lvsr(0, dst);
259

    
260
        tmp2 = vec_perm(d, edges, align);
261
        tmp1 = vec_perm(edges, d, align);
262

    
263
        vec_st(tmp2, 15, dst);
264
        vec_st(tmp1, 0 , dst);
265

    
266
        dst += dst_stride;
267
    }
268
}
269

    
270
/* Implemented but could be faster
271
#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
272
#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
273
 */
274

    
275
H264_MC(put_, 16, altivec)
276
H264_MC(avg_, 16, altivec)
277

    
278

    
279
/****************************************************************************
280
 * IDCT transform:
281
 ****************************************************************************/
282

    
283
#define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3)               \
284
    /* 1st stage */                                               \
285
    vz0 = vec_add(vb0,vb2);       /* temp[0] = Y[0] + Y[2] */     \
286
    vz1 = vec_sub(vb0,vb2);       /* temp[1] = Y[0] - Y[2] */     \
287
    vz2 = vec_sra(vb1,vec_splat_u16(1));                          \
288
    vz2 = vec_sub(vz2,vb3);       /* temp[2] = Y[1].1/2 - Y[3] */ \
289
    vz3 = vec_sra(vb3,vec_splat_u16(1));                          \
290
    vz3 = vec_add(vb1,vz3);       /* temp[3] = Y[1] + Y[3].1/2 */ \
291
    /* 2nd stage: output */                                       \
292
    va0 = vec_add(vz0,vz3);       /* x[0] = temp[0] + temp[3] */  \
293
    va1 = vec_add(vz1,vz2);       /* x[1] = temp[1] + temp[2] */  \
294
    va2 = vec_sub(vz1,vz2);       /* x[2] = temp[1] - temp[2] */  \
295
    va3 = vec_sub(vz0,vz3)        /* x[3] = temp[0] - temp[3] */
296

    
297
#define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
298
    b0 = vec_mergeh( a0, a0 ); \
299
    b1 = vec_mergeh( a1, a0 ); \
300
    b2 = vec_mergeh( a2, a0 ); \
301
    b3 = vec_mergeh( a3, a0 ); \
302
    a0 = vec_mergeh( b0, b2 ); \
303
    a1 = vec_mergel( b0, b2 ); \
304
    a2 = vec_mergeh( b1, b3 ); \
305
    a3 = vec_mergel( b1, b3 ); \
306
    b0 = vec_mergeh( a0, a2 ); \
307
    b1 = vec_mergel( a0, a2 ); \
308
    b2 = vec_mergeh( a1, a3 ); \
309
    b3 = vec_mergel( a1, a3 )
310

    
311
#define VEC_LOAD_U8_ADD_S16_STORE_U8(va)                      \
312
    vdst_orig = vec_ld(0, dst);                               \
313
    vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);          \
314
    vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst);         \
315
    va = vec_add(va, vdst_ss);                                \
316
    va_u8 = vec_packsu(va, zero_s16v);                        \
317
    va_u32 = vec_splat((vec_u32)va_u8, 0);                  \
318
    vec_ste(va_u32, element, (uint32_t*)dst);
319

    
320
static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
321
{
322
    vec_s16 va0, va1, va2, va3;
323
    vec_s16 vz0, vz1, vz2, vz3;
324
    vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
325
    vec_u8 va_u8;
326
    vec_u32 va_u32;
327
    vec_s16 vdst_ss;
328
    const vec_u16 v6us = vec_splat_u16(6);
329
    vec_u8 vdst, vdst_orig;
330
    vec_u8 vdst_mask = vec_lvsl(0, dst);
331
    int element = ((unsigned long)dst & 0xf) >> 2;
332
    LOAD_ZERO;
333

    
334
    block[0] += 32;  /* add 32 as a DC-level for rounding */
335

    
336
    vtmp0 = vec_ld(0,block);
337
    vtmp1 = vec_sld(vtmp0, vtmp0, 8);
338
    vtmp2 = vec_ld(16,block);
339
    vtmp3 = vec_sld(vtmp2, vtmp2, 8);
340

    
341
    VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
342
    VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
343
    VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
344

    
345
    va0 = vec_sra(va0,v6us);
346
    va1 = vec_sra(va1,v6us);
347
    va2 = vec_sra(va2,v6us);
348
    va3 = vec_sra(va3,v6us);
349

    
350
    VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
351
    dst += stride;
352
    VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
353
    dst += stride;
354
    VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
355
    dst += stride;
356
    VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
357
}
358

    
359
#define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,  d0, d1, d2, d3, d4, d5, d6, d7) {\
360
    /*        a0  = SRC(0) + SRC(4); */ \
361
    vec_s16 a0v = vec_add(s0, s4);    \
362
    /*        a2  = SRC(0) - SRC(4); */ \
363
    vec_s16 a2v = vec_sub(s0, s4);    \
364
    /*        a4  =           (SRC(2)>>1) - SRC(6); */ \
365
    vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6);    \
366
    /*        a6  =           (SRC(6)>>1) + SRC(2); */ \
367
    vec_s16 a6v = vec_add(vec_sra(s6, onev), s2);    \
368
    /*        b0  =         a0 + a6; */ \
369
    vec_s16 b0v = vec_add(a0v, a6v);  \
370
    /*        b2  =         a2 + a4; */ \
371
    vec_s16 b2v = vec_add(a2v, a4v);  \
372
    /*        b4  =         a2 - a4; */ \
373
    vec_s16 b4v = vec_sub(a2v, a4v);  \
374
    /*        b6  =         a0 - a6; */ \
375
    vec_s16 b6v = vec_sub(a0v, a6v);  \
376
    /* a1 =  SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
377
    /*        a1 =             (SRC(5)-SRC(3)) -  (SRC(7)  +  (SRC(7)>>1)); */ \
378
    vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
379
    /* a3 =  SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
380
    /*        a3 =             (SRC(7)+SRC(1)) -  (SRC(3)  +  (SRC(3)>>1)); */ \
381
    vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
382
    /* a5 =  SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
383
    /*        a5 =             (SRC(7)-SRC(1)) +   SRC(5) +   (SRC(5)>>1); */ \
384
    vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
385
    /*        a7 =                SRC(5)+SRC(3) +  SRC(1) +   (SRC(1)>>1); */ \
386
    vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
387
    /*        b1 =                  (a7>>2)  +  a1; */ \
388
    vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
389
    /*        b3 =          a3 +        (a5>>2); */ \
390
    vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
391
    /*        b5 =                  (a3>>2)  -   a5; */ \
392
    vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
393
    /*        b7 =           a7 -        (a1>>2); */ \
394
    vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
395
    /* DST(0,    b0 + b7); */ \
396
    d0 = vec_add(b0v, b7v); \
397
    /* DST(1,    b2 + b5); */ \
398
    d1 = vec_add(b2v, b5v); \
399
    /* DST(2,    b4 + b3); */ \
400
    d2 = vec_add(b4v, b3v); \
401
    /* DST(3,    b6 + b1); */ \
402
    d3 = vec_add(b6v, b1v); \
403
    /* DST(4,    b6 - b1); */ \
404
    d4 = vec_sub(b6v, b1v); \
405
    /* DST(5,    b4 - b3); */ \
406
    d5 = vec_sub(b4v, b3v); \
407
    /* DST(6,    b2 - b5); */ \
408
    d6 = vec_sub(b2v, b5v); \
409
    /* DST(7,    b0 - b7); */ \
410
    d7 = vec_sub(b0v, b7v); \
411
}
412

    
413
#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
414
    /* unaligned load */                                       \
415
    vec_u8 hv = vec_ld( 0, dest );                           \
416
    vec_u8 lv = vec_ld( 7, dest );                           \
417
    vec_u8 dstv   = vec_perm( hv, lv, (vec_u8)perm_ldv );  \
418
    vec_s16 idct_sh6 = vec_sra(idctv, sixv);                 \
419
    vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv);   \
420
    vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16);  \
421
    vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum);        \
422
    vec_u8 edgehv;                                           \
423
    /* unaligned store */                                      \
424
    vec_u8 bodyv  = vec_perm( idstsum8, idstsum8, perm_stv );\
425
    vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv );     \
426
    lv    = vec_sel( lv, bodyv, edgelv );                      \
427
    vec_st( lv, 7, dest );                                     \
428
    hv    = vec_ld( 0, dest );                                 \
429
    edgehv = vec_perm( zero_u8v, sel, perm_stv );              \
430
    hv    = vec_sel( hv, bodyv, edgehv );                      \
431
    vec_st( hv, 0, dest );                                     \
432
 }
433

    
434
void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
435
    vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
436
    vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
437
    vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
438

    
439
    vec_u8 perm_ldv = vec_lvsl(0, dst);
440
    vec_u8 perm_stv = vec_lvsr(8, dst);
441

    
442
    const vec_u16 onev = vec_splat_u16(1);
443
    const vec_u16 twov = vec_splat_u16(2);
444
    const vec_u16 sixv = vec_splat_u16(6);
445

    
446
    const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
447
    LOAD_ZERO;
448

    
449
    dct[0] += 32; // rounding for the >>6 at the end
450

    
451
    s0 = vec_ld(0x00, (int16_t*)dct);
452
    s1 = vec_ld(0x10, (int16_t*)dct);
453
    s2 = vec_ld(0x20, (int16_t*)dct);
454
    s3 = vec_ld(0x30, (int16_t*)dct);
455
    s4 = vec_ld(0x40, (int16_t*)dct);
456
    s5 = vec_ld(0x50, (int16_t*)dct);
457
    s6 = vec_ld(0x60, (int16_t*)dct);
458
    s7 = vec_ld(0x70, (int16_t*)dct);
459

    
460
    IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
461
                     d0, d1, d2, d3, d4, d5, d6, d7);
462

    
463
    TRANSPOSE8( d0,  d1,  d2,  d3,  d4,  d5,  d6, d7 );
464

    
465
    IDCT8_1D_ALTIVEC(d0,  d1,  d2,  d3,  d4,  d5,  d6, d7,
466
                     idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
467

    
468
    ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
469
    ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
470
    ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
471
    ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
472
    ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
473
    ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
474
    ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
475
    ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
476
}
477

    
478
static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, DCTELEM *block, int stride, int size)
479
{
480
    vec_s16 dc16;
481
    vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
482
    LOAD_ZERO;
483
    DECLARE_ALIGNED(16, int, dc);
484
    int i;
485

    
486
    dc = (block[0] + 32) >> 6;
487
    dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
488

    
489
    if (size == 4)
490
        dc16 = vec_sld(dc16, zero_s16v, 8);
491
    dcplus = vec_packsu(dc16, zero_s16v);
492
    dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
493

    
494
    aligner = vec_lvsr(0, dst);
495
    dcplus = vec_perm(dcplus, dcplus, aligner);
496
    dcminus = vec_perm(dcminus, dcminus, aligner);
497

    
498
    for (i = 0; i < size; i += 4) {
499
        v0 = vec_ld(0, dst+0*stride);
500
        v1 = vec_ld(0, dst+1*stride);
501
        v2 = vec_ld(0, dst+2*stride);
502
        v3 = vec_ld(0, dst+3*stride);
503

    
504
        v0 = vec_adds(v0, dcplus);
505
        v1 = vec_adds(v1, dcplus);
506
        v2 = vec_adds(v2, dcplus);
507
        v3 = vec_adds(v3, dcplus);
508

    
509
        v0 = vec_subs(v0, dcminus);
510
        v1 = vec_subs(v1, dcminus);
511
        v2 = vec_subs(v2, dcminus);
512
        v3 = vec_subs(v3, dcminus);
513

    
514
        vec_st(v0, 0, dst+0*stride);
515
        vec_st(v1, 0, dst+1*stride);
516
        vec_st(v2, 0, dst+2*stride);
517
        vec_st(v3, 0, dst+3*stride);
518

    
519
        dst += 4*stride;
520
    }
521
}
522

    
523
static void h264_idct_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
524
{
525
    h264_idct_dc_add_internal(dst, block, stride, 4);
526
}
527

    
528
static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
529
{
530
    h264_idct_dc_add_internal(dst, block, stride, 8);
531
}
532

    
533
static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
534
    int i;
535
    for(i=0; i<16; i++){
536
        int nnz = nnzc[ scan8[i] ];
537
        if(nnz){
538
            if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
539
            else                      ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
540
        }
541
    }
542
}
543

    
544
static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
545
    int i;
546
    for(i=0; i<16; i++){
547
        if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
548
        else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
549
    }
550
}
551

    
552
static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
553
    int i;
554
    for(i=0; i<16; i+=4){
555
        int nnz = nnzc[ scan8[i] ];
556
        if(nnz){
557
            if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
558
            else                      ff_h264_idct8_add_altivec   (dst + block_offset[i], block + i*16, stride);
559
        }
560
    }
561
}
562

    
563
static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
564
    int i;
565
    for(i=16; i<16+8; i++){
566
        if(nnzc[ scan8[i] ])
567
            ff_h264_idct_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
568
        else if(block[i*16])
569
            h264_idct_dc_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
570
    }
571
}
572

    
573
#define transpose4x16(r0, r1, r2, r3) {      \
574
    register vec_u8 r4;                    \
575
    register vec_u8 r5;                    \
576
    register vec_u8 r6;                    \
577
    register vec_u8 r7;                    \
578
                                             \
579
    r4 = vec_mergeh(r0, r2);  /*0, 2 set 0*/ \
580
    r5 = vec_mergel(r0, r2);  /*0, 2 set 1*/ \
581
    r6 = vec_mergeh(r1, r3);  /*1, 3 set 0*/ \
582
    r7 = vec_mergel(r1, r3);  /*1, 3 set 1*/ \
583
                                             \
584
    r0 = vec_mergeh(r4, r6);  /*all set 0*/  \
585
    r1 = vec_mergel(r4, r6);  /*all set 1*/  \
586
    r2 = vec_mergeh(r5, r7);  /*all set 2*/  \
587
    r3 = vec_mergel(r5, r7);  /*all set 3*/  \
588
}
589

    
590
static inline void write16x4(uint8_t *dst, int dst_stride,
591
                             register vec_u8 r0, register vec_u8 r1,
592
                             register vec_u8 r2, register vec_u8 r3) {
593
    DECLARE_ALIGNED(16, unsigned char, result)[64];
594
    uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
595
    int int_dst_stride = dst_stride/4;
596

    
597
    vec_st(r0, 0, result);
598
    vec_st(r1, 16, result);
599
    vec_st(r2, 32, result);
600
    vec_st(r3, 48, result);
601
    /* FIXME: there has to be a better way!!!! */
602
    *dst_int = *src_int;
603
    *(dst_int+   int_dst_stride) = *(src_int + 1);
604
    *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
605
    *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
606
    *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
607
    *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
608
    *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
609
    *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
610
    *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
611
    *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
612
    *(dst_int+10*int_dst_stride) = *(src_int + 10);
613
    *(dst_int+11*int_dst_stride) = *(src_int + 11);
614
    *(dst_int+12*int_dst_stride) = *(src_int + 12);
615
    *(dst_int+13*int_dst_stride) = *(src_int + 13);
616
    *(dst_int+14*int_dst_stride) = *(src_int + 14);
617
    *(dst_int+15*int_dst_stride) = *(src_int + 15);
618
}
619

    
620
/** \brief performs a 6x16 transpose of data in src, and stores it to dst
621
    \todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
622
    out of unaligned_load() */
623
#define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
624
    register vec_u8 r0  = unaligned_load(0,             src);            \
625
    register vec_u8 r1  = unaligned_load(   src_stride, src);            \
626
    register vec_u8 r2  = unaligned_load(2* src_stride, src);            \
627
    register vec_u8 r3  = unaligned_load(3* src_stride, src);            \
628
    register vec_u8 r4  = unaligned_load(4* src_stride, src);            \
629
    register vec_u8 r5  = unaligned_load(5* src_stride, src);            \
630
    register vec_u8 r6  = unaligned_load(6* src_stride, src);            \
631
    register vec_u8 r7  = unaligned_load(7* src_stride, src);            \
632
    register vec_u8 r14 = unaligned_load(14*src_stride, src);            \
633
    register vec_u8 r15 = unaligned_load(15*src_stride, src);            \
634
                                                                           \
635
    r8  = unaligned_load( 8*src_stride, src);                              \
636
    r9  = unaligned_load( 9*src_stride, src);                              \
637
    r10 = unaligned_load(10*src_stride, src);                              \
638
    r11 = unaligned_load(11*src_stride, src);                              \
639
    r12 = unaligned_load(12*src_stride, src);                              \
640
    r13 = unaligned_load(13*src_stride, src);                              \
641
                                                                           \
642
    /*Merge first pairs*/                                                  \
643
    r0 = vec_mergeh(r0, r8);    /*0, 8*/                                   \
644
    r1 = vec_mergeh(r1, r9);    /*1, 9*/                                   \
645
    r2 = vec_mergeh(r2, r10);   /*2,10*/                                   \
646
    r3 = vec_mergeh(r3, r11);   /*3,11*/                                   \
647
    r4 = vec_mergeh(r4, r12);   /*4,12*/                                   \
648
    r5 = vec_mergeh(r5, r13);   /*5,13*/                                   \
649
    r6 = vec_mergeh(r6, r14);   /*6,14*/                                   \
650
    r7 = vec_mergeh(r7, r15);   /*7,15*/                                   \
651
                                                                           \
652
    /*Merge second pairs*/                                                 \
653
    r8  = vec_mergeh(r0, r4);   /*0,4, 8,12 set 0*/                        \
654
    r9  = vec_mergel(r0, r4);   /*0,4, 8,12 set 1*/                        \
655
    r10 = vec_mergeh(r1, r5);   /*1,5, 9,13 set 0*/                        \
656
    r11 = vec_mergel(r1, r5);   /*1,5, 9,13 set 1*/                        \
657
    r12 = vec_mergeh(r2, r6);   /*2,6,10,14 set 0*/                        \
658
    r13 = vec_mergel(r2, r6);   /*2,6,10,14 set 1*/                        \
659
    r14 = vec_mergeh(r3, r7);   /*3,7,11,15 set 0*/                        \
660
    r15 = vec_mergel(r3, r7);   /*3,7,11,15 set 1*/                        \
661
                                                                           \
662
    /*Third merge*/                                                        \
663
    r0 = vec_mergeh(r8,  r12);  /*0,2,4,6,8,10,12,14 set 0*/               \
664
    r1 = vec_mergel(r8,  r12);  /*0,2,4,6,8,10,12,14 set 1*/               \
665
    r2 = vec_mergeh(r9,  r13);  /*0,2,4,6,8,10,12,14 set 2*/               \
666
    r4 = vec_mergeh(r10, r14);  /*1,3,5,7,9,11,13,15 set 0*/               \
667
    r5 = vec_mergel(r10, r14);  /*1,3,5,7,9,11,13,15 set 1*/               \
668
    r6 = vec_mergeh(r11, r15);  /*1,3,5,7,9,11,13,15 set 2*/               \
669
    /* Don't need to compute 3 and 7*/                                     \
670
                                                                           \
671
    /*Final merge*/                                                        \
672
    r8  = vec_mergeh(r0, r4);   /*all set 0*/                              \
673
    r9  = vec_mergel(r0, r4);   /*all set 1*/                              \
674
    r10 = vec_mergeh(r1, r5);   /*all set 2*/                              \
675
    r11 = vec_mergel(r1, r5);   /*all set 3*/                              \
676
    r12 = vec_mergeh(r2, r6);   /*all set 4*/                              \
677
    r13 = vec_mergel(r2, r6);   /*all set 5*/                              \
678
    /* Don't need to compute 14 and 15*/                                   \
679
                                                                           \
680
}
681

    
682
// out: o = |x-y| < a
683
static inline vec_u8 diff_lt_altivec ( register vec_u8 x,
684
                                         register vec_u8 y,
685
                                         register vec_u8 a) {
686

    
687
    register vec_u8 diff = vec_subs(x, y);
688
    register vec_u8 diffneg = vec_subs(y, x);
689
    register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */
690
    o = (vec_u8)vec_cmplt(o, a);
691
    return o;
692
}
693

    
694
static inline vec_u8 h264_deblock_mask ( register vec_u8 p0,
695
                                           register vec_u8 p1,
696
                                           register vec_u8 q0,
697
                                           register vec_u8 q1,
698
                                           register vec_u8 alpha,
699
                                           register vec_u8 beta) {
700

    
701
    register vec_u8 mask;
702
    register vec_u8 tempmask;
703

    
704
    mask = diff_lt_altivec(p0, q0, alpha);
705
    tempmask = diff_lt_altivec(p1, p0, beta);
706
    mask = vec_and(mask, tempmask);
707
    tempmask = diff_lt_altivec(q1, q0, beta);
708
    mask = vec_and(mask, tempmask);
709

    
710
    return mask;
711
}
712

    
713
// out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
714
static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
715
                                       register vec_u8 p1,
716
                                       register vec_u8 p2,
717
                                       register vec_u8 q0,
718
                                       register vec_u8 tc0) {
719

    
720
    register vec_u8 average = vec_avg(p0, q0);
721
    register vec_u8 temp;
722
    register vec_u8 uncliped;
723
    register vec_u8 ones;
724
    register vec_u8 max;
725
    register vec_u8 min;
726
    register vec_u8 newp1;
727

    
728
    temp = vec_xor(average, p2);
729
    average = vec_avg(average, p2);     /*avg(p2, avg(p0, q0)) */
730
    ones = vec_splat_u8(1);
731
    temp = vec_and(temp, ones);         /*(p2^avg(p0, q0)) & 1 */
732
    uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
733
    max = vec_adds(p1, tc0);
734
    min = vec_subs(p1, tc0);
735
    newp1 = vec_max(min, uncliped);
736
    newp1 = vec_min(max, newp1);
737
    return newp1;
738
}
739

    
740
#define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) {                                           \
741
                                                                                                  \
742
    const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4));                               \
743
                                                                                                  \
744
    register vec_u8 pq0bit = vec_xor(p0,q0);                                                    \
745
    register vec_u8 q1minus;                                                                    \
746
    register vec_u8 p0minus;                                                                    \
747
    register vec_u8 stage1;                                                                     \
748
    register vec_u8 stage2;                                                                     \
749
    register vec_u8 vec160;                                                                     \
750
    register vec_u8 delta;                                                                      \
751
    register vec_u8 deltaneg;                                                                   \
752
                                                                                                  \
753
    q1minus = vec_nor(q1, q1);                 /* 255 - q1 */                                     \
754
    stage1 = vec_avg(p1, q1minus);             /* (p1 - q1 + 256)>>1 */                           \
755
    stage2 = vec_sr(stage1, vec_splat_u8(1));  /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */     \
756
    p0minus = vec_nor(p0, p0);                 /* 255 - p0 */                                     \
757
    stage1 = vec_avg(q0, p0minus);             /* (q0 - p0 + 256)>>1 */                           \
758
    pq0bit = vec_and(pq0bit, vec_splat_u8(1));                                                    \
759
    stage2 = vec_avg(stage2, pq0bit);          /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
760
    stage2 = vec_adds(stage2, stage1);         /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */  \
761
    vec160 = vec_ld(0, &A0v);                                                                     \
762
    deltaneg = vec_subs(vec160, stage2);       /* -d */                                           \
763
    delta = vec_subs(stage2, vec160);          /* d */                                            \
764
    deltaneg = vec_min(tc0masked, deltaneg);                                                      \
765
    delta = vec_min(tc0masked, delta);                                                            \
766
    p0 = vec_subs(p0, deltaneg);                                                                  \
767
    q0 = vec_subs(q0, delta);                                                                     \
768
    p0 = vec_adds(p0, delta);                                                                     \
769
    q0 = vec_adds(q0, deltaneg);                                                                  \
770
}
771

    
772
#define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) {            \
773
    DECLARE_ALIGNED(16, unsigned char, temp)[16];                                             \
774
    register vec_u8 alphavec;                                                              \
775
    register vec_u8 betavec;                                                               \
776
    register vec_u8 mask;                                                                  \
777
    register vec_u8 p1mask;                                                                \
778
    register vec_u8 q1mask;                                                                \
779
    register vector signed   char tc0vec;                                                    \
780
    register vec_u8 finaltc0;                                                              \
781
    register vec_u8 tc0masked;                                                             \
782
    register vec_u8 newp1;                                                                 \
783
    register vec_u8 newq1;                                                                 \
784
                                                                                             \
785
    temp[0] = alpha;                                                                         \
786
    temp[1] = beta;                                                                          \
787
    alphavec = vec_ld(0, temp);                                                              \
788
    betavec = vec_splat(alphavec, 0x1);                                                      \
789
    alphavec = vec_splat(alphavec, 0x0);                                                     \
790
    mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */            \
791
                                                                                             \
792
    *((int *)temp) = *((int *)tc0);                                                          \
793
    tc0vec = vec_ld(0, (signed char*)temp);                                                  \
794
    tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \
795
    tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \
796
    mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1)));  /* if tc0[i] >= 0 */         \
797
    finaltc0 = vec_and((vec_u8)tc0vec, mask);     /* tc = tc0 */                           \
798
                                                                                             \
799
    p1mask = diff_lt_altivec(p2, p0, betavec);                                               \
800
    p1mask = vec_and(p1mask, mask);                             /* if ( |p2 - p0| < beta) */ \
801
    tc0masked = vec_and(p1mask, (vec_u8)tc0vec);                                           \
802
    finaltc0 = vec_sub(finaltc0, p1mask);                       /* tc++ */                   \
803
    newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked);                                      \
804
    /*end if*/                                                                               \
805
                                                                                             \
806
    q1mask = diff_lt_altivec(q2, q0, betavec);                                               \
807
    q1mask = vec_and(q1mask, mask);                             /* if ( |q2 - q0| < beta ) */\
808
    tc0masked = vec_and(q1mask, (vec_u8)tc0vec);                                           \
809
    finaltc0 = vec_sub(finaltc0, q1mask);                       /* tc++ */                   \
810
    newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked);                                      \
811
    /*end if*/                                                                               \
812
                                                                                             \
813
    h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0);                                            \
814
    p1 = newp1;                                                                              \
815
    q1 = newq1;                                                                              \
816
}
817

    
818
static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
819

    
820
    if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
821
        register vec_u8 p2 = vec_ld(-3*stride, pix);
822
        register vec_u8 p1 = vec_ld(-2*stride, pix);
823
        register vec_u8 p0 = vec_ld(-1*stride, pix);
824
        register vec_u8 q0 = vec_ld(0, pix);
825
        register vec_u8 q1 = vec_ld(stride, pix);
826
        register vec_u8 q2 = vec_ld(2*stride, pix);
827
        h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
828
        vec_st(p1, -2*stride, pix);
829
        vec_st(p0, -1*stride, pix);
830
        vec_st(q0, 0, pix);
831
        vec_st(q1, stride, pix);
832
    }
833
}
834

    
835
static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
836

    
837
    register vec_u8 line0, line1, line2, line3, line4, line5;
838
    if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
839
        return;
840
    readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
841
    h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
842
    transpose4x16(line1, line2, line3, line4);
843
    write16x4(pix-2, stride, line1, line2, line3, line4);
844
}
845

    
846
static av_always_inline
847
void weight_h264_WxH_altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset, int w, int h)
848
{
849
    int y, aligned;
850
    vec_u8 vblock;
851
    vec_s16 vtemp, vweight, voffset, v0, v1;
852
    vec_u16 vlog2_denom;
853
    DECLARE_ALIGNED(16, int32_t, temp)[4];
854
    LOAD_ZERO;
855

    
856
    offset <<= log2_denom;
857
    if(log2_denom) offset += 1<<(log2_denom-1);
858
    temp[0] = log2_denom;
859
    temp[1] = weight;
860
    temp[2] = offset;
861

    
862
    vtemp = (vec_s16)vec_ld(0, temp);
863
    vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
864
    vweight = vec_splat(vtemp, 3);
865
    voffset = vec_splat(vtemp, 5);
866
    aligned = !((unsigned long)block & 0xf);
867

    
868
    for (y=0; y<h; y++) {
869
        vblock = vec_ld(0, block);
870

    
871
        v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
872
        v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
873

    
874
        if (w == 16 || aligned) {
875
            v0 = vec_mladd(v0, vweight, zero_s16v);
876
            v0 = vec_adds(v0, voffset);
877
            v0 = vec_sra(v0, vlog2_denom);
878
        }
879
        if (w == 16 || !aligned) {
880
            v1 = vec_mladd(v1, vweight, zero_s16v);
881
            v1 = vec_adds(v1, voffset);
882
            v1 = vec_sra(v1, vlog2_denom);
883
        }
884
        vblock = vec_packsu(v0, v1);
885
        vec_st(vblock, 0, block);
886

    
887
        block += stride;
888
    }
889
}
890

    
891
static av_always_inline
892
void biweight_h264_WxH_altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom,
893
                               int weightd, int weights, int offset, int w, int h)
894
{
895
    int y, dst_aligned, src_aligned;
896
    vec_u8 vsrc, vdst;
897
    vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
898
    vec_u16 vlog2_denom;
899
    DECLARE_ALIGNED(16, int32_t, temp)[4];
900
    LOAD_ZERO;
901

    
902
    offset = ((offset + 1) | 1) << log2_denom;
903
    temp[0] = log2_denom+1;
904
    temp[1] = weights;
905
    temp[2] = weightd;
906
    temp[3] = offset;
907

    
908
    vtemp = (vec_s16)vec_ld(0, temp);
909
    vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
910
    vweights = vec_splat(vtemp, 3);
911
    vweightd = vec_splat(vtemp, 5);
912
    voffset = vec_splat(vtemp, 7);
913
    dst_aligned = !((unsigned long)dst & 0xf);
914
    src_aligned = !((unsigned long)src & 0xf);
915

    
916
    for (y=0; y<h; y++) {
917
        vdst = vec_ld(0, dst);
918
        vsrc = vec_ld(0, src);
919

    
920
        v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
921
        v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
922
        v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
923
        v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
924

    
925
        if (w == 8) {
926
            if (src_aligned)
927
                v3 = v2;
928
            else
929
                v2 = v3;
930
        }
931

    
932
        if (w == 16 || dst_aligned) {
933
            v0 = vec_mladd(v0, vweightd, zero_s16v);
934
            v2 = vec_mladd(v2, vweights, zero_s16v);
935

    
936
            v0 = vec_adds(v0, voffset);
937
            v0 = vec_adds(v0, v2);
938
            v0 = vec_sra(v0, vlog2_denom);
939
        }
940
        if (w == 16 || !dst_aligned) {
941
            v1 = vec_mladd(v1, vweightd, zero_s16v);
942
            v3 = vec_mladd(v3, vweights, zero_s16v);
943

    
944
            v1 = vec_adds(v1, voffset);
945
            v1 = vec_adds(v1, v3);
946
            v1 = vec_sra(v1, vlog2_denom);
947
        }
948
        vdst = vec_packsu(v0, v1);
949
        vec_st(vdst, 0, dst);
950

    
951
        dst += stride;
952
        src += stride;
953
    }
954
}
955

    
956
#define H264_WEIGHT(W,H) \
957
static void ff_weight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
958
    weight_h264_WxH_altivec(block, stride, log2_denom, weight, offset, W, H); \
959
}\
960
static void ff_biweight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
961
    biweight_h264_WxH_altivec(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
962
}
963

    
964
H264_WEIGHT(16,16)
965
H264_WEIGHT(16, 8)
966
H264_WEIGHT( 8,16)
967
H264_WEIGHT( 8, 8)
968
H264_WEIGHT( 8, 4)
969

    
970
void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
971

    
972
    if (has_altivec()) {
973
        c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
974
        c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
975
        c->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec;
976
        c->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec;
977
        c->h264_idct_add = ff_h264_idct_add_altivec;
978
        c->h264_idct_add8 = ff_h264_idct_add8_altivec;
979
        c->h264_idct_add16 = ff_h264_idct_add16_altivec;
980
        c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
981
        c->h264_idct_dc_add= h264_idct_dc_add_altivec;
982
        c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec;
983
        c->h264_idct8_add = ff_h264_idct8_add_altivec;
984
        c->h264_idct8_add4 = ff_h264_idct8_add4_altivec;
985
        c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
986
        c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
987

    
988
#define dspfunc(PFX, IDX, NUM) \
989
        c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
990
        c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
991
        c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
992
        c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
993
        c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
994
        c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
995
        c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
996
        c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
997
        c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
998
        c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
999
        c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
1000
        c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
1001
        c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
1002
        c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
1003
        c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
1004
        c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
1005

    
1006
        dspfunc(put_h264_qpel, 0, 16);
1007
        dspfunc(avg_h264_qpel, 0, 16);
1008
#undef dspfunc
1009

    
1010
        c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16x16_altivec;
1011
        c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels16x8_altivec;
1012
        c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels8x16_altivec;
1013
        c->weight_h264_pixels_tab[3] = ff_weight_h264_pixels8x8_altivec;
1014
        c->weight_h264_pixels_tab[4] = ff_weight_h264_pixels8x4_altivec;
1015
        c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16x16_altivec;
1016
        c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels16x8_altivec;
1017
        c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels8x16_altivec;
1018
        c->biweight_h264_pixels_tab[3] = ff_biweight_h264_pixels8x8_altivec;
1019
        c->biweight_h264_pixels_tab[4] = ff_biweight_h264_pixels8x4_altivec;
1020
    }
1021
}