Revision 16e0bf73 libpostproc/postprocess_altivec_template.c

View differences:

libpostproc/postprocess_altivec_template.c
23 23
#include "avutil.h"
24 24

  
25 25
#define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
26
  do {                                                                  \
27
    __typeof__(src_a) tempA1, tempB1, tempC1, tempD1;                   \
28
    __typeof__(src_a) tempE1, tempF1, tempG1, tempH1;                   \
29
    __typeof__(src_a) tempA2, tempB2, tempC2, tempD2;                   \
30
    __typeof__(src_a) tempE2, tempF2, tempG2, tempH2;                   \
31
    tempA1 = vec_mergeh (src_a, src_e);                                 \
32
    tempB1 = vec_mergel (src_a, src_e);                                 \
33
    tempC1 = vec_mergeh (src_b, src_f);                                 \
34
    tempD1 = vec_mergel (src_b, src_f);                                 \
35
    tempE1 = vec_mergeh (src_c, src_g);                                 \
36
    tempF1 = vec_mergel (src_c, src_g);                                 \
37
    tempG1 = vec_mergeh (src_d, src_h);                                 \
38
    tempH1 = vec_mergel (src_d, src_h);                                 \
39
    tempA2 = vec_mergeh (tempA1, tempE1);                               \
40
    tempB2 = vec_mergel (tempA1, tempE1);                               \
41
    tempC2 = vec_mergeh (tempB1, tempF1);                               \
42
    tempD2 = vec_mergel (tempB1, tempF1);                               \
43
    tempE2 = vec_mergeh (tempC1, tempG1);                               \
44
    tempF2 = vec_mergel (tempC1, tempG1);                               \
45
    tempG2 = vec_mergeh (tempD1, tempH1);                               \
46
    tempH2 = vec_mergel (tempD1, tempH1);                               \
47
    src_a = vec_mergeh (tempA2, tempE2);                                \
48
    src_b = vec_mergel (tempA2, tempE2);                                \
49
    src_c = vec_mergeh (tempB2, tempF2);                                \
50
    src_d = vec_mergel (tempB2, tempF2);                                \
51
    src_e = vec_mergeh (tempC2, tempG2);                                \
52
    src_f = vec_mergel (tempC2, tempG2);                                \
53
    src_g = vec_mergeh (tempD2, tempH2);                                \
54
    src_h = vec_mergel (tempD2, tempH2);                                \
55
  } while (0)
26
    do {                                                          \
27
        __typeof__(src_a) tempA1, tempB1, tempC1, tempD1;         \
28
        __typeof__(src_a) tempE1, tempF1, tempG1, tempH1;         \
29
        __typeof__(src_a) tempA2, tempB2, tempC2, tempD2;         \
30
        __typeof__(src_a) tempE2, tempF2, tempG2, tempH2;         \
31
        tempA1 = vec_mergeh (src_a, src_e);                       \
32
        tempB1 = vec_mergel (src_a, src_e);                       \
33
        tempC1 = vec_mergeh (src_b, src_f);                       \
34
        tempD1 = vec_mergel (src_b, src_f);                       \
35
        tempE1 = vec_mergeh (src_c, src_g);                       \
36
        tempF1 = vec_mergel (src_c, src_g);                       \
37
        tempG1 = vec_mergeh (src_d, src_h);                       \
38
        tempH1 = vec_mergel (src_d, src_h);                       \
39
        tempA2 = vec_mergeh (tempA1, tempE1);                     \
40
        tempB2 = vec_mergel (tempA1, tempE1);                     \
41
        tempC2 = vec_mergeh (tempB1, tempF1);                     \
42
        tempD2 = vec_mergel (tempB1, tempF1);                     \
43
        tempE2 = vec_mergeh (tempC1, tempG1);                     \
44
        tempF2 = vec_mergel (tempC1, tempG1);                     \
45
        tempG2 = vec_mergeh (tempD1, tempH1);                     \
46
        tempH2 = vec_mergel (tempD1, tempH1);                     \
47
        src_a = vec_mergeh (tempA2, tempE2);                      \
48
        src_b = vec_mergel (tempA2, tempE2);                      \
49
        src_c = vec_mergeh (tempB2, tempF2);                      \
50
        src_d = vec_mergel (tempB2, tempF2);                      \
51
        src_e = vec_mergeh (tempC2, tempG2);                      \
52
        src_f = vec_mergel (tempC2, tempG2);                      \
53
        src_g = vec_mergeh (tempD2, tempH2);                      \
54
        src_h = vec_mergel (tempD2, tempH2);                      \
55
    } while (0)
56 56

  
57 57

  
58 58
static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
59
  /*
59
    /*
60 60
    this code makes no assumption on src or stride.
61 61
    One could remove the recomputation of the perm
62 62
    vector by assuming (stride % 16) == 0, unfortunately
63 63
    this is not always true.
64
  */
65
  DECLARE_ALIGNED(16, short, data[8]);
66
  int numEq;
67
  uint8_t *src2 = src;
68
  vector signed short v_dcOffset;
69
  vector signed short v2QP;
70
  vector unsigned short v4QP;
71
  vector unsigned short v_dcThreshold;
72
  const int properStride = (stride % 16);
73
  const int srcAlign = ((unsigned long)src2 % 16);
74
  const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
75
  const vector signed int zero = vec_splat_s32(0);
76
  const vector signed short mask = vec_splat_s16(1);
77
  vector signed int v_numEq = vec_splat_s32(0);
78

  
79
  data[0] = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
80
  data[1] = data[0] * 2 + 1;
81
  data[2] = c->QP * 2;
82
  data[3] = c->QP * 4;
83
  vector signed short v_data = vec_ld(0, data);
84
  v_dcOffset = vec_splat(v_data, 0);
85
  v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
86
  v2QP = vec_splat(v_data, 2);
87
  v4QP = (vector unsigned short)vec_splat(v_data, 3);
88

  
89
  src2 += stride * 4;
90

  
91
  vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3, v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
64
    */
65
    DECLARE_ALIGNED(16, short, data[8]);
66
    int numEq;
67
    uint8_t *src2 = src;
68
    vector signed short v_dcOffset;
69
    vector signed short v2QP;
70
    vector unsigned short v4QP;
71
    vector unsigned short v_dcThreshold;
72
    const int properStride = (stride % 16);
73
    const int srcAlign = ((unsigned long)src2 % 16);
74
    const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
75
    const vector signed int zero = vec_splat_s32(0);
76
    const vector signed short mask = vec_splat_s16(1);
77
    vector signed int v_numEq = vec_splat_s32(0);
78

  
79
    data[0] = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
80
    data[1] = data[0] * 2 + 1;
81
    data[2] = c->QP * 2;
82
    data[3] = c->QP * 4;
83
    vector signed short v_data = vec_ld(0, data);
84
    v_dcOffset = vec_splat(v_data, 0);
85
    v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
86
    v2QP = vec_splat(v_data, 2);
87
    v4QP = (vector unsigned short)vec_splat(v_data, 3);
88

  
89
    src2 += stride * 4;
90

  
91
    vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3, v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
92 92

  
93 93
#define LOAD_LINE(i)                                                    \
94
  register int j##i = i * stride;                                       \
95
  vector unsigned char perm##i = vec_lvsl(j##i, src2);                  \
96
  const vector unsigned char v_srcA1##i = vec_ld(j##i, src2);           \
97
  vector unsigned char v_srcA2##i;                                      \
98
  if (two_vectors)                                                      \
99
    v_srcA2##i = vec_ld(j##i + 16, src2);                               \
100
  const vector unsigned char v_srcA##i =                                \
101
    vec_perm(v_srcA1##i, v_srcA2##i, perm##i);                          \
102
  v_srcAss##i =                                                         \
103
    (vector signed short)vec_mergeh((vector signed char)zero,           \
104
                                    (vector signed char)v_srcA##i)
94
    register int j##i = i * stride;                                     \
95
    vector unsigned char perm##i = vec_lvsl(j##i, src2);                \
96
    const vector unsigned char v_srcA1##i = vec_ld(j##i, src2);         \
97
    vector unsigned char v_srcA2##i;                                    \
98
    if (two_vectors)                                                    \
99
        v_srcA2##i = vec_ld(j##i + 16, src2);                           \
100
    const vector unsigned char v_srcA##i =                              \
101
        vec_perm(v_srcA1##i, v_srcA2##i, perm##i);                      \
102
    v_srcAss##i =                                                       \
103
        (vector signed short)vec_mergeh((vector signed char)zero,       \
104
                                        (vector signed char)v_srcA##i)
105 105

  
106 106
#define LOAD_LINE_ALIGNED(i)                                            \
107
  register int j##i = i * stride;                                       \
108
  const vector unsigned char v_srcA##i = vec_ld(j##i, src2);            \
109
  v_srcAss##i =                                                         \
110
    (vector signed short)vec_mergeh((vector signed char)zero,           \
111
                                    (vector signed char)v_srcA##i)
107
    register int j##i = i * stride;                                     \
108
    const vector unsigned char v_srcA##i = vec_ld(j##i, src2);          \
109
    v_srcAss##i =                                                       \
110
        (vector signed short)vec_mergeh((vector signed char)zero,       \
111
                                        (vector signed char)v_srcA##i)
112 112

  
113 113
    /* Special-casing the aligned case is worthwhile, as all calls from
114 114
     * the (transposed) horizontable deblocks will be aligned, in addition
115 115
     * to the naturally aligned vertical deblocks. */
116 116
    if (properStride && srcAlign) {
117
      LOAD_LINE_ALIGNED(0);
118
      LOAD_LINE_ALIGNED(1);
119
      LOAD_LINE_ALIGNED(2);
120
      LOAD_LINE_ALIGNED(3);
121
      LOAD_LINE_ALIGNED(4);
122
      LOAD_LINE_ALIGNED(5);
123
      LOAD_LINE_ALIGNED(6);
124
      LOAD_LINE_ALIGNED(7);
117
        LOAD_LINE_ALIGNED(0);
118
        LOAD_LINE_ALIGNED(1);
119
        LOAD_LINE_ALIGNED(2);
120
        LOAD_LINE_ALIGNED(3);
121
        LOAD_LINE_ALIGNED(4);
122
        LOAD_LINE_ALIGNED(5);
123
        LOAD_LINE_ALIGNED(6);
124
        LOAD_LINE_ALIGNED(7);
125 125
    } else {
126
      LOAD_LINE(0);
127
      LOAD_LINE(1);
128
      LOAD_LINE(2);
129
      LOAD_LINE(3);
130
      LOAD_LINE(4);
131
      LOAD_LINE(5);
132
      LOAD_LINE(6);
133
      LOAD_LINE(7);
126
        LOAD_LINE(0);
127
        LOAD_LINE(1);
128
        LOAD_LINE(2);
129
        LOAD_LINE(3);
130
        LOAD_LINE(4);
131
        LOAD_LINE(5);
132
        LOAD_LINE(6);
133
        LOAD_LINE(7);
134 134
    }
135 135
#undef LOAD_LINE
136 136
#undef LOAD_LINE_ALIGNED
137 137

  
138 138
#define ITER(i, j)                                                      \
139
  const vector signed short v_diff##i =                                 \
140
    vec_sub(v_srcAss##i, v_srcAss##j);                                  \
141
  const vector signed short v_sum##i =                                  \
142
    vec_add(v_diff##i, v_dcOffset);                                     \
143
  const vector signed short v_comp##i =                                 \
144
    (vector signed short)vec_cmplt((vector unsigned short)v_sum##i,     \
145
                                   v_dcThreshold);                      \
146
  const vector signed short v_part##i = vec_and(mask, v_comp##i);       \
147
  v_numEq = vec_sum4s(v_part##i, v_numEq);
148

  
149
  ITER(0, 1);
150
  ITER(1, 2);
151
  ITER(2, 3);
152
  ITER(3, 4);
153
  ITER(4, 5);
154
  ITER(5, 6);
155
  ITER(6, 7);
139
    const vector signed short v_diff##i =                               \
140
        vec_sub(v_srcAss##i, v_srcAss##j);                              \
141
    const vector signed short v_sum##i =                                \
142
        vec_add(v_diff##i, v_dcOffset);                                 \
143
    const vector signed short v_comp##i =                               \
144
        (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
145
                                       v_dcThreshold);                  \
146
    const vector signed short v_part##i = vec_and(mask, v_comp##i);     \
147
    v_numEq = vec_sum4s(v_part##i, v_numEq);
148

  
149
    ITER(0, 1);
150
    ITER(1, 2);
151
    ITER(2, 3);
152
    ITER(3, 4);
153
    ITER(4, 5);
154
    ITER(5, 6);
155
    ITER(6, 7);
156 156
#undef ITER
157 157

  
158
  v_numEq = vec_sums(v_numEq, zero);
159

  
160
  v_numEq = vec_splat(v_numEq, 3);
161
  vec_ste(v_numEq, 0, &numEq);
162

  
163
  if (numEq > c->ppMode.flatnessThreshold)
164
    {
165
      const vector unsigned char mmoP1 = (const vector unsigned char)
166
        AVV(0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
167
            0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B);
168
      const vector unsigned char mmoP2 = (const vector unsigned char)
169
        AVV(0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
170
            0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
171
      const vector unsigned char mmoP = (const vector unsigned char)
172
        vec_lvsl(8, (unsigned char*)0);
173

  
174
      vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
175
      vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
176
      vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
177
      vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
178
      vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
179
      vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
180
      vector signed short mmoDiff = vec_sub(mmoL, mmoR);
181
      vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
182

  
183
      if (vec_any_gt(mmoSum, v4QP))
184
        return 0;
185
      else
186
        return 1;
158
    v_numEq = vec_sums(v_numEq, zero);
159

  
160
    v_numEq = vec_splat(v_numEq, 3);
161
    vec_ste(v_numEq, 0, &numEq);
162

  
163
    if (numEq > c->ppMode.flatnessThreshold){
164
        const vector unsigned char mmoP1 = (const vector unsigned char)
165
            AVV(0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
166
                0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B);
167
        const vector unsigned char mmoP2 = (const vector unsigned char)
168
            AVV(0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
169
                0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
170
        const vector unsigned char mmoP = (const vector unsigned char)
171
            vec_lvsl(8, (unsigned char*)0);
172

  
173
        vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
174
        vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
175
        vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
176
        vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
177
        vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
178
        vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
179
        vector signed short mmoDiff = vec_sub(mmoL, mmoR);
180
        vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
181

  
182
        if (vec_any_gt(mmoSum, v4QP))
183
            return 0;
184
        else
185
            return 1;
187 186
    }
188
  else return 2;
187
    else return 2;
189 188
}
190 189

  
191 190
static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
192
  /*
191
    /*
193 192
    this code makes no assumption on src or stride.
194 193
    One could remove the recomputation of the perm
195 194
    vector by assuming (stride % 16) == 0, unfortunately
196 195
    this is not always true. Quite a lot of load/stores
197 196
    can be removed by assuming proper alignment of
198 197
    src & stride :-(
199
  */
200
  uint8_t *src2 = src;
201
  const vector signed int zero = vec_splat_s32(0);
202
  const int properStride = (stride % 16);
203
  const int srcAlign = ((unsigned long)src2 % 16);
204
  DECLARE_ALIGNED(16, short, qp[8]);
205
  qp[0] = c->QP;
206
  vector signed short vqp = vec_ld(0, qp);
207
  vqp = vec_splat(vqp, 0);
208

  
209
  src2 += stride*3;
210

  
211
  vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
212
  vector unsigned char vbA0, vbA1, vbA2, vbA3, vbA4, vbA5, vbA6, vbA7, vbA8, vbA9;
213
  vector unsigned char vbB0, vbB1, vbB2, vbB3, vbB4, vbB5, vbB6, vbB7, vbB8, vbB9;
214
  vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
198
    */
199
    uint8_t *src2 = src;
200
    const vector signed int zero = vec_splat_s32(0);
201
    const int properStride = (stride % 16);
202
    const int srcAlign = ((unsigned long)src2 % 16);
203
    DECLARE_ALIGNED(16, short, qp[8]);
204
    qp[0] = c->QP;
205
    vector signed short vqp = vec_ld(0, qp);
206
    vqp = vec_splat(vqp, 0);
207

  
208
    src2 += stride*3;
209

  
210
    vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
211
    vector unsigned char vbA0, vbA1, vbA2, vbA3, vbA4, vbA5, vbA6, vbA7, vbA8, vbA9;
212
    vector unsigned char vbB0, vbB1, vbB2, vbB3, vbB4, vbB5, vbB6, vbB7, vbB8, vbB9;
213
    vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
215 214

  
216 215
#define LOAD_LINE(i)                                                    \
217
  const vector unsigned char perml##i =                                 \
218
    vec_lvsl(i * stride, src2);                                         \
219
  vbA##i = vec_ld(i * stride, src2);                                    \
220
  vbB##i = vec_ld(i * stride + 16, src2);                               \
221
  vbT##i = vec_perm(vbA##i, vbB##i, perml##i);                          \
222
  vb##i =                                                               \
223
    (vector signed short)vec_mergeh((vector unsigned char)zero,         \
224
                                    (vector unsigned char)vbT##i)
216
    const vector unsigned char perml##i =                               \
217
        vec_lvsl(i * stride, src2);                                     \
218
    vbA##i = vec_ld(i * stride, src2);                                  \
219
    vbB##i = vec_ld(i * stride + 16, src2);                             \
220
    vbT##i = vec_perm(vbA##i, vbB##i, perml##i);                        \
221
    vb##i =                                                             \
222
        (vector signed short)vec_mergeh((vector unsigned char)zero,     \
223
                                        (vector unsigned char)vbT##i)
225 224

  
226 225
#define LOAD_LINE_ALIGNED(i)                                            \
227
  register int j##i = i * stride;                                       \
228
  vbT##i = vec_ld(j##i, src2);                                          \
229
  vb##i =                                                               \
230
    (vector signed short)vec_mergeh((vector signed char)zero,           \
231
                                    (vector signed char)vbT##i)
232

  
233
    /* Special-casing the aligned case is worthwhile, as all calls from
234
     * the (transposed) horizontable deblocks will be aligned, in addition
235
     * to the naturally aligned vertical deblocks. */
236
    if (properStride && srcAlign) {
237
      LOAD_LINE_ALIGNED(0);
238
      LOAD_LINE_ALIGNED(1);
239
      LOAD_LINE_ALIGNED(2);
240
      LOAD_LINE_ALIGNED(3);
241
      LOAD_LINE_ALIGNED(4);
242
      LOAD_LINE_ALIGNED(5);
243
      LOAD_LINE_ALIGNED(6);
244
      LOAD_LINE_ALIGNED(7);
245
      LOAD_LINE_ALIGNED(8);
246
      LOAD_LINE_ALIGNED(9);
247
    } else {
248
      LOAD_LINE(0);
249
      LOAD_LINE(1);
250
      LOAD_LINE(2);
251
      LOAD_LINE(3);
252
      LOAD_LINE(4);
253
      LOAD_LINE(5);
254
      LOAD_LINE(6);
255
      LOAD_LINE(7);
256
      LOAD_LINE(8);
257
      LOAD_LINE(9);
258
    }
226
    register int j##i = i * stride;                                     \
227
    vbT##i = vec_ld(j##i, src2);                                        \
228
    vb##i =                                                             \
229
        (vector signed short)vec_mergeh((vector signed char)zero,       \
230
                                        (vector signed char)vbT##i)
231

  
232
      /* Special-casing the aligned case is worthwhile, as all calls from
233
       * the (transposed) horizontable deblocks will be aligned, in addition
234
       * to the naturally aligned vertical deblocks. */
235
      if (properStride && srcAlign) {
236
          LOAD_LINE_ALIGNED(0);
237
          LOAD_LINE_ALIGNED(1);
238
          LOAD_LINE_ALIGNED(2);
239
          LOAD_LINE_ALIGNED(3);
240
          LOAD_LINE_ALIGNED(4);
241
          LOAD_LINE_ALIGNED(5);
242
          LOAD_LINE_ALIGNED(6);
243
          LOAD_LINE_ALIGNED(7);
244
          LOAD_LINE_ALIGNED(8);
245
          LOAD_LINE_ALIGNED(9);
246
      } else {
247
          LOAD_LINE(0);
248
          LOAD_LINE(1);
249
          LOAD_LINE(2);
250
          LOAD_LINE(3);
251
          LOAD_LINE(4);
252
          LOAD_LINE(5);
253
          LOAD_LINE(6);
254
          LOAD_LINE(7);
255
          LOAD_LINE(8);
256
          LOAD_LINE(9);
257
      }
259 258
#undef LOAD_LINE
260 259
#undef LOAD_LINE_ALIGNED
261 260

  
262
  const vector unsigned short v_2 = vec_splat_u16(2);
263
  const vector unsigned short v_4 = vec_splat_u16(4);
261
    const vector unsigned short v_2 = vec_splat_u16(2);
262
    const vector unsigned short v_4 = vec_splat_u16(4);
264 263

  
265
  const vector signed short v_diff01 = vec_sub(vb0, vb1);
266
  const vector unsigned short v_cmp01 =
267
    (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
268
  const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
269
  const vector signed short v_diff89 = vec_sub(vb8, vb9);
270
  const vector unsigned short v_cmp89 =
271
    (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
272
  const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
264
    const vector signed short v_diff01 = vec_sub(vb0, vb1);
265
    const vector unsigned short v_cmp01 =
266
        (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
267
    const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
268
    const vector signed short v_diff89 = vec_sub(vb8, vb9);
269
    const vector unsigned short v_cmp89 =
270
        (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
271
    const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
273 272

  
274
  const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
275
  const vector signed short temp02 = vec_add(vb2, vb3);
276
  const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
277
  const vector signed short v_sumsB0 = vec_add(temp02, temp03);
273
    const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
274
    const vector signed short temp02 = vec_add(vb2, vb3);
275
    const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
276
    const vector signed short v_sumsB0 = vec_add(temp02, temp03);
278 277

  
279
  const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
280
  const vector signed short v_sumsB1 = vec_add(temp11, vb4);
278
    const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
279
    const vector signed short v_sumsB1 = vec_add(temp11, vb4);
281 280

  
282
  const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
283
  const vector signed short v_sumsB2 = vec_add(temp21, vb5);
281
    const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
282
    const vector signed short v_sumsB2 = vec_add(temp21, vb5);
284 283

  
285
  const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
286
  const vector signed short v_sumsB3 = vec_add(temp31, vb6);
284
    const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
285
    const vector signed short v_sumsB3 = vec_add(temp31, vb6);
287 286

  
288
  const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
289
  const vector signed short v_sumsB4 = vec_add(temp41, vb7);
287
    const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
288
    const vector signed short v_sumsB4 = vec_add(temp41, vb7);
290 289

  
291
  const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
292
  const vector signed short v_sumsB5 = vec_add(temp51, vb8);
290
    const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
291
    const vector signed short v_sumsB5 = vec_add(temp51, vb8);
293 292

  
294
  const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
295
  const vector signed short v_sumsB6 = vec_add(temp61, v_last);
293
    const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
294
    const vector signed short v_sumsB6 = vec_add(temp61, v_last);
296 295

  
297
  const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
298
  const vector signed short v_sumsB7 = vec_add(temp71, v_last);
296
    const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
297
    const vector signed short v_sumsB7 = vec_add(temp71, v_last);
299 298

  
300
  const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
301
  const vector signed short v_sumsB8 = vec_add(temp81, v_last);
299
    const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
300
    const vector signed short v_sumsB8 = vec_add(temp81, v_last);
302 301

  
303
  const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
304
  const vector signed short v_sumsB9 = vec_add(temp91, v_last);
302
    const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
303
    const vector signed short v_sumsB9 = vec_add(temp91, v_last);
305 304

  
306 305
#define COMPUTE_VR(i, j, k)                                             \
307
  const vector signed short temps1##i =                                 \
308
    vec_add(v_sumsB##i, v_sumsB##k);                                    \
309
  const vector signed short temps2##i =                                 \
310
    vec_mladd(vb##j, (vector signed short)v_2, temps1##i);              \
311
  const vector signed short  vr##j = vec_sra(temps2##i, v_4)
312

  
313
  COMPUTE_VR(0, 1, 2);
314
  COMPUTE_VR(1, 2, 3);
315
  COMPUTE_VR(2, 3, 4);
316
  COMPUTE_VR(3, 4, 5);
317
  COMPUTE_VR(4, 5, 6);
318
  COMPUTE_VR(5, 6, 7);
319
  COMPUTE_VR(6, 7, 8);
320
  COMPUTE_VR(7, 8, 9);
321

  
322
  const vector signed char neg1 = vec_splat_s8(-1);
323
  const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
324
                                                                      0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
306
    const vector signed short temps1##i =                               \
307
        vec_add(v_sumsB##i, v_sumsB##k);                                \
308
    const vector signed short temps2##i =                               \
309
        vec_mladd(vb##j, (vector signed short)v_2, temps1##i);          \
310
    const vector signed short  vr##j = vec_sra(temps2##i, v_4)
311

  
312
    COMPUTE_VR(0, 1, 2);
313
    COMPUTE_VR(1, 2, 3);
314
    COMPUTE_VR(2, 3, 4);
315
    COMPUTE_VR(3, 4, 5);
316
    COMPUTE_VR(4, 5, 6);
317
    COMPUTE_VR(5, 6, 7);
318
    COMPUTE_VR(6, 7, 8);
319
    COMPUTE_VR(7, 8, 9);
320

  
321
    const vector signed char neg1 = vec_splat_s8(-1);
322
    const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
323
                                                                        0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
325 324

  
326 325
#define PACK_AND_STORE(i)                                       \
327
  const vector unsigned char perms##i =                         \
328
    vec_lvsr(i * stride, src2);                                 \
329
  const vector unsigned char vf##i =                            \
330
    vec_packsu(vr##i, (vector signed short)zero);               \
331
  const vector unsigned char vg##i =                            \
332
    vec_perm(vf##i, vbT##i, permHH);                            \
333
  const vector unsigned char mask##i =                          \
334
    vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
335
  const vector unsigned char vg2##i =                           \
336
    vec_perm(vg##i, vg##i, perms##i);                           \
337
  const vector unsigned char svA##i =                           \
338
    vec_sel(vbA##i, vg2##i, mask##i);                           \
339
  const vector unsigned char svB##i =                           \
340
    vec_sel(vg2##i, vbB##i, mask##i);                           \
341
  vec_st(svA##i, i * stride, src2);                             \
342
  vec_st(svB##i, i * stride + 16, src2)
326
    const vector unsigned char perms##i =                       \
327
        vec_lvsr(i * stride, src2);                             \
328
    const vector unsigned char vf##i =                          \
329
        vec_packsu(vr##i, (vector signed short)zero);           \
330
    const vector unsigned char vg##i =                          \
331
        vec_perm(vf##i, vbT##i, permHH);                        \
332
    const vector unsigned char mask##i =                        \
333
        vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
334
    const vector unsigned char vg2##i =                         \
335
        vec_perm(vg##i, vg##i, perms##i);                       \
336
    const vector unsigned char svA##i =                         \
337
        vec_sel(vbA##i, vg2##i, mask##i);                       \
338
    const vector unsigned char svB##i =                         \
339
        vec_sel(vg2##i, vbB##i, mask##i);                       \
340
    vec_st(svA##i, i * stride, src2);                           \
341
    vec_st(svB##i, i * stride + 16, src2)
343 342

  
344 343
#define PACK_AND_STORE_ALIGNED(i)                               \
345
  const vector unsigned char vf##i =                            \
346
    vec_packsu(vr##i, (vector signed short)zero);               \
347
  const vector unsigned char vg##i =                            \
348
    vec_perm(vf##i, vbT##i, permHH);                            \
349
  vec_st(vg##i, i * stride, src2)
350

  
351
  /* Special-casing the aligned case is worthwhile, as all calls from
352
   * the (transposed) horizontable deblocks will be aligned, in addition
353
   * to the naturally aligned vertical deblocks. */
354
  if (properStride && srcAlign) {
355
    PACK_AND_STORE_ALIGNED(1);
356
    PACK_AND_STORE_ALIGNED(2);
357
    PACK_AND_STORE_ALIGNED(3);
358
    PACK_AND_STORE_ALIGNED(4);
359
    PACK_AND_STORE_ALIGNED(5);
360
    PACK_AND_STORE_ALIGNED(6);
361
    PACK_AND_STORE_ALIGNED(7);
362
    PACK_AND_STORE_ALIGNED(8);
363
  } else {
364
    PACK_AND_STORE(1);
365
    PACK_AND_STORE(2);
366
    PACK_AND_STORE(3);
367
    PACK_AND_STORE(4);
368
    PACK_AND_STORE(5);
369
    PACK_AND_STORE(6);
370
    PACK_AND_STORE(7);
371
    PACK_AND_STORE(8);
372
  }
344
    const vector unsigned char vf##i =                          \
345
        vec_packsu(vr##i, (vector signed short)zero);           \
346
    const vector unsigned char vg##i =                          \
347
        vec_perm(vf##i, vbT##i, permHH);                        \
348
    vec_st(vg##i, i * stride, src2)
349

  
350
    /* Special-casing the aligned case is worthwhile, as all calls from
351
     * the (transposed) horizontable deblocks will be aligned, in addition
352
     * to the naturally aligned vertical deblocks. */
353
    if (properStride && srcAlign) {
354
        PACK_AND_STORE_ALIGNED(1);
355
        PACK_AND_STORE_ALIGNED(2);
356
        PACK_AND_STORE_ALIGNED(3);
357
        PACK_AND_STORE_ALIGNED(4);
358
        PACK_AND_STORE_ALIGNED(5);
359
        PACK_AND_STORE_ALIGNED(6);
360
        PACK_AND_STORE_ALIGNED(7);
361
        PACK_AND_STORE_ALIGNED(8);
362
    } else {
363
        PACK_AND_STORE(1);
364
        PACK_AND_STORE(2);
365
        PACK_AND_STORE(3);
366
        PACK_AND_STORE(4);
367
        PACK_AND_STORE(5);
368
        PACK_AND_STORE(6);
369
        PACK_AND_STORE(7);
370
        PACK_AND_STORE(8);
371
    }
373 372
#undef PACK_AND_STORE
374 373
#undef PACK_AND_STORE_ALIGNED
375 374
}
......
377 376

  
378 377

  
379 378
static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) {
380
  /*
379
    /*
381 380
    this code makes no assumption on src or stride.
382 381
    One could remove the recomputation of the perm
383 382
    vector by assuming (stride % 16) == 0, unfortunately
384 383
    this is not always true. Quite a lot of load/stores
385 384
    can be removed by assuming proper alignment of
386 385
    src & stride :-(
387
  */
388
  uint8_t *src2 = src;
389
  const vector signed int zero = vec_splat_s32(0);
390
  DECLARE_ALIGNED(16, short, qp[8]);
391
  qp[0] = 8*c->QP;
392
  vector signed short vqp = vec_ld(0, qp);
393
  vqp = vec_splat(vqp, 0);
386
    */
387
    uint8_t *src2 = src;
388
    const vector signed int zero = vec_splat_s32(0);
389
    DECLARE_ALIGNED(16, short, qp[8]);
390
    qp[0] = 8*c->QP;
391
    vector signed short vqp = vec_ld(0, qp);
392
    vqp = vec_splat(vqp, 0);
394 393

  
395 394
#define LOAD_LINE(i)                                                    \
396
  const vector unsigned char perm##i =                                  \
397
    vec_lvsl(i * stride, src2);                                         \
398
  const vector unsigned char vbA##i =                                   \
399
    vec_ld(i * stride, src2);                                           \
400
  const vector unsigned char vbB##i =                                   \
401
    vec_ld(i * stride + 16, src2);                                      \
402
  const vector unsigned char vbT##i =                                   \
403
    vec_perm(vbA##i, vbB##i, perm##i);                                  \
404
  const vector signed short vb##i =                                     \
405
    (vector signed short)vec_mergeh((vector unsigned char)zero,         \
406
                                    (vector unsigned char)vbT##i)
407

  
408
  src2 += stride*3;
409

  
410
  LOAD_LINE(1);
411
  LOAD_LINE(2);
412
  LOAD_LINE(3);
413
  LOAD_LINE(4);
414
  LOAD_LINE(5);
415
  LOAD_LINE(6);
416
  LOAD_LINE(7);
417
  LOAD_LINE(8);
395
    const vector unsigned char perm##i =                                \
396
        vec_lvsl(i * stride, src2);                                     \
397
    const vector unsigned char vbA##i =                                 \
398
        vec_ld(i * stride, src2);                                       \
399
    const vector unsigned char vbB##i =                                 \
400
        vec_ld(i * stride + 16, src2);                                  \
401
    const vector unsigned char vbT##i =                                 \
402
        vec_perm(vbA##i, vbB##i, perm##i);                              \
403
    const vector signed short vb##i =                                   \
404
        (vector signed short)vec_mergeh((vector unsigned char)zero,     \
405
                                        (vector unsigned char)vbT##i)
406

  
407
    src2 += stride*3;
408

  
409
     LOAD_LINE(1);
410
     LOAD_LINE(2);
411
     LOAD_LINE(3);
412
     LOAD_LINE(4);
413
     LOAD_LINE(5);
414
     LOAD_LINE(6);
415
     LOAD_LINE(7);
416
     LOAD_LINE(8);
418 417
#undef LOAD_LINE
419 418

  
420
  const vector signed short v_1 = vec_splat_s16(1);
421
  const vector signed short v_2 = vec_splat_s16(2);
422
  const vector signed short v_5 = vec_splat_s16(5);
423
  const vector signed short v_32 = vec_sl(v_1,
424
                                          (vector unsigned short)v_5);
425
  /* middle energy */
426
  const vector signed short l3minusl6 = vec_sub(vb3, vb6);
427
  const vector signed short l5minusl4 = vec_sub(vb5, vb4);
428
  const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
429
  const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
430
  const vector signed short absmE = vec_abs(mE);
431
  /* left & right energy */
432
  const vector signed short l1minusl4 = vec_sub(vb1, vb4);
433
  const vector signed short l3minusl2 = vec_sub(vb3, vb2);
434
  const vector signed short l5minusl8 = vec_sub(vb5, vb8);
435
  const vector signed short l7minusl6 = vec_sub(vb7, vb6);
436
  const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
437
  const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
438
  const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
439
  const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
440
  /* d */
441
  const vector signed short ddiff = vec_sub(absmE,
442
                                            vec_min(vec_abs(lE),
443
                                                    vec_abs(rE)));
444
  const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
445
  const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
446
  const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
447
  const vector signed short minusd = vec_sub((vector signed short)zero, d);
448
  const vector signed short finald = vec_sel(minusd,
449
                                             d,
450
                                             vec_cmpgt(vec_sub((vector signed short)zero, mE),
451
                                                       (vector signed short)zero));
452
  /* q */
453
  const vector signed short qtimes2 = vec_sub(vb4, vb5);
454
  /* for a shift right to behave like /2, we need to add one
455
     to all negative integer */
456
  const vector signed short rounddown = vec_sel((vector signed short)zero,
457
                                                v_1,
458
                                                vec_cmplt(qtimes2, (vector signed short)zero));
459
  const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
460
  /* clamp */
461
  const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
462
  const vector signed short dclamp_P = vec_min(dclamp_P1, q);
463
  const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
464
  const vector signed short dclamp_N = vec_max(dclamp_N1, q);
465

  
466
  const vector signed short dclampedfinal = vec_sel(dclamp_N,
467
                                                    dclamp_P,
468
                                                    vec_cmpgt(q, (vector signed short)zero));
469
  const vector signed short dornotd = vec_sel((vector signed short)zero,
470
                                              dclampedfinal,
471
                                              vec_cmplt(absmE, vqp));
472
  /* add/subtract to l4 and l5 */
473
  const vector signed short vb4minusd = vec_sub(vb4, dornotd);
474
  const vector signed short vb5plusd = vec_add(vb5, dornotd);
475
  /* finally, stores */
476
  const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
477
  const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero);
478

  
479
  const vector signed char neg1 = vec_splat_s8(-1);
480
  const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
481
                                                                      0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
419
     const vector signed short v_1 = vec_splat_s16(1);
420
     const vector signed short v_2 = vec_splat_s16(2);
421
     const vector signed short v_5 = vec_splat_s16(5);
422
     const vector signed short v_32 = vec_sl(v_1,
423
                                             (vector unsigned short)v_5);
424
     /* middle energy */
425
     const vector signed short l3minusl6 = vec_sub(vb3, vb6);
426
     const vector signed short l5minusl4 = vec_sub(vb5, vb4);
427
     const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
428
     const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
429
     const vector signed short absmE = vec_abs(mE);
430
     /* left & right energy */
431
     const vector signed short l1minusl4 = vec_sub(vb1, vb4);
432
     const vector signed short l3minusl2 = vec_sub(vb3, vb2);
433
     const vector signed short l5minusl8 = vec_sub(vb5, vb8);
434
     const vector signed short l7minusl6 = vec_sub(vb7, vb6);
435
     const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
436
     const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
437
     const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
438
     const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
439
     /* d */
440
     const vector signed short ddiff = vec_sub(absmE,
441
                                               vec_min(vec_abs(lE),
442
                                                       vec_abs(rE)));
443
     const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
444
     const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
445
     const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
446
     const vector signed short minusd = vec_sub((vector signed short)zero, d);
447
     const vector signed short finald = vec_sel(minusd,
448
                                                d,
449
                                                vec_cmpgt(vec_sub((vector signed short)zero, mE),
450
                                                          (vector signed short)zero));
451
     /* q */
452
     const vector signed short qtimes2 = vec_sub(vb4, vb5);
453
     /* for a shift right to behave like /2, we need to add one
454
        to all negative integer */
455
     const vector signed short rounddown = vec_sel((vector signed short)zero,
456
                                                   v_1,
457
                                                   vec_cmplt(qtimes2, (vector signed short)zero));
458
     const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
459
     /* clamp */
460
     const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
461
     const vector signed short dclamp_P = vec_min(dclamp_P1, q);
462
     const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
463
     const vector signed short dclamp_N = vec_max(dclamp_N1, q);
464

  
465
     const vector signed short dclampedfinal = vec_sel(dclamp_N,
466
                                                       dclamp_P,
467
                                                       vec_cmpgt(q, (vector signed short)zero));
468
     const vector signed short dornotd = vec_sel((vector signed short)zero,
469
                                                 dclampedfinal,
470
                                                 vec_cmplt(absmE, vqp));
471
     /* add/subtract to l4 and l5 */
472
     const vector signed short vb4minusd = vec_sub(vb4, dornotd);
473
     const vector signed short vb5plusd  = vec_add(vb5, dornotd);
474
     /* finally, stores */
475
     const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
476
     const vector unsigned char st5 = vec_packsu(vb5plusd,  (vector signed short)zero);
477

  
478
     const vector signed char neg1 = vec_splat_s8(-1);
479
     const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
480
                                                                         0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
482 481

  
483 482
#define STORE(i)                                                \
484
  const vector unsigned char perms##i =                         \
485
    vec_lvsr(i * stride, src2);                                 \
486
  const vector unsigned char vg##i =                            \
487
    vec_perm(st##i, vbT##i, permHH);                            \
488
  const vector unsigned char mask##i =                          \
489
    vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
490
  const vector unsigned char vg2##i =                           \
491
    vec_perm(vg##i, vg##i, perms##i);                           \
492
  const vector unsigned char svA##i =                           \
493
    vec_sel(vbA##i, vg2##i, mask##i);                           \
494
  const vector unsigned char svB##i =                           \
495
    vec_sel(vg2##i, vbB##i, mask##i);                           \
496
  vec_st(svA##i, i * stride, src2);                             \
497
  vec_st(svB##i, i * stride + 16, src2)
498

  
499
  STORE(4);
500
  STORE(5);
483
     const vector unsigned char perms##i =                      \
484
         vec_lvsr(i * stride, src2);                            \
485
     const vector unsigned char vg##i =                         \
486
         vec_perm(st##i, vbT##i, permHH);                       \
487
     const vector unsigned char mask##i =                       \
488
         vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
489
     const vector unsigned char vg2##i =                        \
490
         vec_perm(vg##i, vg##i, perms##i);                      \
491
     const vector unsigned char svA##i =                        \
492
         vec_sel(vbA##i, vg2##i, mask##i);                      \
493
     const vector unsigned char svB##i =                        \
494
         vec_sel(vg2##i, vbB##i, mask##i);                      \
495
     vec_st(svA##i, i * stride, src2);                          \
496
     vec_st(svB##i, i * stride + 16, src2)
497

  
498
     STORE(4);
499
     STORE(5);
501 500
}
502 501

  
503 502
static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
504
  /*
503
    /*
505 504
    this code makes no assumption on src or stride.
506 505
    One could remove the recomputation of the perm
507 506
    vector by assuming (stride % 16) == 0, unfortunately
508 507
    this is not always true. Quite a lot of load/stores
509 508
    can be removed by assuming proper alignment of
510 509
    src & stride :-(
511
  */
512
  uint8_t *srcCopy = src;
513
  DECLARE_ALIGNED(16, uint8_t, dt[16]);
514
  const vector signed int zero = vec_splat_s32(0);
515
  vector unsigned char v_dt;
516
  dt[0] = deringThreshold;
517
  v_dt = vec_splat(vec_ld(0, dt), 0);
510
    */
511
    uint8_t *srcCopy = src;
512
    DECLARE_ALIGNED(16, uint8_t, dt[16]);
513
    const vector signed int zero = vec_splat_s32(0);
514
    vector unsigned char v_dt;
515
    dt[0] = deringThreshold;
516
    v_dt = vec_splat(vec_ld(0, dt), 0);
518 517

  
519 518
#define LOAD_LINE(i)                                                    \
520
  const vector unsigned char perm##i =                                  \
521
    vec_lvsl(i * stride, srcCopy);                                      \
522
  vector unsigned char sA##i = vec_ld(i * stride, srcCopy);             \
523
  vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy);        \
524
  vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
525

  
526
  LOAD_LINE(0);
527
  LOAD_LINE(1);
528
  LOAD_LINE(2);
529
  LOAD_LINE(3);
530
  LOAD_LINE(4);
531
  LOAD_LINE(5);
532
  LOAD_LINE(6);
533
  LOAD_LINE(7);
534
  LOAD_LINE(8);
535
  LOAD_LINE(9);
519
    const vector unsigned char perm##i =                               \
520
        vec_lvsl(i * stride, srcCopy);                                 \
521
    vector unsigned char sA##i = vec_ld(i * stride, srcCopy);          \
522
    vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy);     \
523
    vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
524

  
525
    LOAD_LINE(0);
526
    LOAD_LINE(1);
527
    LOAD_LINE(2);
528
    LOAD_LINE(3);
529
    LOAD_LINE(4);
530
    LOAD_LINE(5);
531
    LOAD_LINE(6);
532
    LOAD_LINE(7);
533
    LOAD_LINE(8);
534
    LOAD_LINE(9);
536 535
#undef LOAD_LINE
537 536

  
538
  vector unsigned char v_avg;
539
  {
537
    vector unsigned char v_avg;
538
    {
540 539
    const vector unsigned char trunc_perm = (vector unsigned char)
541
      AVV(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
542
          0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18);
540
        AVV(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
541
            0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18);
543 542
    const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm);
544 543
    const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm);
545 544
    const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm);
546 545
    const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm);
547 546

  
548 547
#define EXTRACT(op) do {                                                \
549
      const vector unsigned char s##op##_1 = vec_##op(trunc_src12, trunc_src34); \
550
      const vector unsigned char s##op##_2 = vec_##op(trunc_src56, trunc_src78); \
551
      const vector unsigned char s##op##_6 = vec_##op(s##op##_1, s##op##_2); \
552
      const vector unsigned char s##op##_8h = vec_mergeh(s##op##_6, s##op##_6); \
553
      const vector unsigned char s##op##_8l = vec_mergel(s##op##_6, s##op##_6); \
554
      const vector unsigned char s##op##_9 = vec_##op(s##op##_8h, s##op##_8l); \
555
      const vector unsigned char s##op##_9h = vec_mergeh(s##op##_9, s##op##_9); \
556
      const vector unsigned char s##op##_9l = vec_mergel(s##op##_9, s##op##_9); \
557
      const vector unsigned char s##op##_10 = vec_##op(s##op##_9h, s##op##_9l); \
558
      const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \
559
      const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \
560
      const vector unsigned char s##op##_11 = vec_##op(s##op##_10h, s##op##_10l); \
561
      const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \
562
      const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \
563
      v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0)
548
    const vector unsigned char s##op##_1   = vec_##op(trunc_src12, trunc_src34); \
549
    const vector unsigned char s##op##_2   = vec_##op(trunc_src56, trunc_src78); \
550
    const vector unsigned char s##op##_6   = vec_##op(s##op##_1, s##op##_2);     \
551
    const vector unsigned char s##op##_8h  = vec_mergeh(s##op##_6, s##op##_6);   \
552
    const vector unsigned char s##op##_8l  = vec_mergel(s##op##_6, s##op##_6);   \
553
    const vector unsigned char s##op##_9   = vec_##op(s##op##_8h, s##op##_8l);   \
554
    const vector unsigned char s##op##_9h  = vec_mergeh(s##op##_9, s##op##_9);   \
555
    const vector unsigned char s##op##_9l  = vec_mergel(s##op##_9, s##op##_9);   \
556
    const vector unsigned char s##op##_10  = vec_##op(s##op##_9h, s##op##_9l);   \
557
    const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \
558
    const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \
559
    const vector unsigned char s##op##_11  = vec_##op(s##op##_10h, s##op##_10l); \
560
    const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \
561
    const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \
562
    v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0)
564 563

  
565 564
    vector unsigned char v_min;
566 565
    vector unsigned char v_max;
......
569 568
#undef EXTRACT
570 569

  
571 570
    if (vec_all_lt(vec_sub(v_max, v_min), v_dt))
572
      return;
571
        return;
573 572

  
574 573
    v_avg = vec_avg(v_min, v_max);
575
  }
574
    }
576 575

  
577
  DECLARE_ALIGNED(16, signed int, S[8]);
578
  {
576
    DECLARE_ALIGNED(16, signed int, S[8]);
577
    {
579 578
    const vector unsigned short mask1 = (vector unsigned short)
580
      AVV(0x0001, 0x0002, 0x0004, 0x0008,
581
          0x0010, 0x0020, 0x0040, 0x0080);
579
        AVV(0x0001, 0x0002, 0x0004, 0x0008,
580
            0x0010, 0x0020, 0x0040, 0x0080);
582 581
    const vector unsigned short mask2 = (vector unsigned short)
583
      AVV(0x0100, 0x0200, 0x0000, 0x0000,
584
          0x0000, 0x0000, 0x0000, 0x0000);
582
        AVV(0x0100, 0x0200, 0x0000, 0x0000,
583
            0x0000, 0x0000, 0x0000, 0x0000);
585 584

  
586 585
    const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4));
587 586
    const vector unsigned int vuint32_1 = vec_splat_u32(1);
......
589 588
#define COMPARE(i)                                                      \
590 589
    vector signed int sum##i;                                           \
591 590
    do {                                                                \
592
      const vector unsigned char cmp##i =                               \
593
        (vector unsigned char)vec_cmpgt(src##i, v_avg);                 \
594
      const vector unsigned short cmpHi##i =                            \
595
        (vector unsigned short)vec_mergeh(cmp##i, cmp##i);              \
596
      const vector unsigned short cmpLi##i =                            \
597
        (vector unsigned short)vec_mergel(cmp##i, cmp##i);              \
598
      const vector signed short cmpHf##i =                              \
599
        (vector signed short)vec_and(cmpHi##i, mask1);                  \
600
      const vector signed short cmpLf##i =                              \
601
        (vector signed short)vec_and(cmpLi##i, mask2);                  \
602
      const vector signed int sump##i = vec_sum4s(cmpHf##i, zero);      \
603
      const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i);   \
604
      sum##i  = vec_sums(sumq##i, zero); } while (0)
591
        const vector unsigned char cmp##i =                             \
592
            (vector unsigned char)vec_cmpgt(src##i, v_avg);             \
593
        const vector unsigned short cmpHi##i =                          \
594
            (vector unsigned short)vec_mergeh(cmp##i, cmp##i);          \
595
        const vector unsigned short cmpLi##i =                          \
596
            (vector unsigned short)vec_mergel(cmp##i, cmp##i);          \
597
        const vector signed short cmpHf##i =                            \
598
            (vector signed short)vec_and(cmpHi##i, mask1);              \
599
        const vector signed short cmpLf##i =                            \
600
            (vector signed short)vec_and(cmpLi##i, mask2);              \
601
        const vector signed int sump##i = vec_sum4s(cmpHf##i, zero);    \
602
        const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i); \
603
        sum##i  = vec_sums(sumq##i, zero); } while (0)
605 604

  
606 605
    COMPARE(0);
607 606
    COMPARE(1);
......
618 617
    vector signed int sumA2;
619 618
    vector signed int sumB2;
620 619
    {
621
      const vector signed int sump02 = vec_mergel(sum0, sum2);
622
      const vector signed int sump13 = vec_mergel(sum1, sum3);
623
      const vector signed int sumA = vec_mergel(sump02, sump13);
624

  
625
      const vector signed int sump46 = vec_mergel(sum4, sum6);
626
      const vector signed int sump57 = vec_mergel(sum5, sum7);
627
      const vector signed int sumB = vec_mergel(sump46, sump57);
628

  
629
      const vector signed int sump8A = vec_mergel(sum8, zero);
630
      const vector signed int sump9B = vec_mergel(sum9, zero);
631
      const vector signed int sumC = vec_mergel(sump8A, sump9B);
632

  
633
      const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
634
      const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
635
      const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
636
      const vector signed int t2A = vec_or(sumA, tA);
637
      const vector signed int t2B = vec_or(sumB, tB);
638
      const vector signed int t2C = vec_or(sumC, tC);
639
      const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
640
                                            vec_sl(t2A, vuint32_1));
641
      const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
642
                                            vec_sl(t2B, vuint32_1));
643
      const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
644
                                            vec_sl(t2C, vuint32_1));
645
      const vector signed int yA = vec_and(t2A, t3A);
646
      const vector signed int yB = vec_and(t2B, t3B);
647
      const vector signed int yC = vec_and(t2C, t3C);
648

  
649
      const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
650
      const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
651
      const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
652
      const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
653
      const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
654
      const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
655
      const vector signed int sumAp = vec_and(yA,
656
                                              vec_and(sumAd4,sumAd8));
657
      const vector signed int sumBp = vec_and(yB,
658
                                              vec_and(sumBd4,sumBd8));
659
      sumA2 = vec_or(sumAp,
660
                     vec_sra(sumAp,
661
                             vuint32_16));
662
      sumB2  = vec_or(sumBp,
663
                      vec_sra(sumBp,
664
                              vuint32_16));
620
    const vector signed int sump02 = vec_mergel(sum0, sum2);
621
    const vector signed int sump13 = vec_mergel(sum1, sum3);
622
    const vector signed int sumA = vec_mergel(sump02, sump13);
623

  
624
    const vector signed int sump46 = vec_mergel(sum4, sum6);
625
    const vector signed int sump57 = vec_mergel(sum5, sum7);
626
    const vector signed int sumB = vec_mergel(sump46, sump57);
627

  
628
    const vector signed int sump8A = vec_mergel(sum8, zero);
629
    const vector signed int sump9B = vec_mergel(sum9, zero);
630
    const vector signed int sumC = vec_mergel(sump8A, sump9B);
631

  
632
    const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
633
    const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
634
    const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
635
    const vector signed int t2A = vec_or(sumA, tA);
636
    const vector signed int t2B = vec_or(sumB, tB);
637
    const vector signed int t2C = vec_or(sumC, tC);
638
    const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
639
                                          vec_sl(t2A, vuint32_1));
640
    const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
641
                                          vec_sl(t2B, vuint32_1));
642
    const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
643
                                          vec_sl(t2C, vuint32_1));
644
    const vector signed int yA = vec_and(t2A, t3A);
645
    const vector signed int yB = vec_and(t2B, t3B);
646
    const vector signed int yC = vec_and(t2C, t3C);
647

  
648
    const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
649
    const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
650
    const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
651
    const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
652
    const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
653
    const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
654
    const vector signed int sumAp = vec_and(yA,
655
                                            vec_and(sumAd4,sumAd8));
656
    const vector signed int sumBp = vec_and(yB,
657
                                            vec_and(sumBd4,sumBd8));
658
    sumA2 = vec_or(sumAp,
659
                   vec_sra(sumAp,
660
                           vuint32_16));
661
    sumB2  = vec_or(sumBp,
662
                    vec_sra(sumBp,
663
                            vuint32_16));
665 664
    }
666 665
    vec_st(sumA2, 0, S);
667 666
    vec_st(sumB2, 16, S);
668
  }
669

  
670
  /* I'm not sure the following is actually faster
671
     than straight, unvectorized C code :-( */
672

  
673
  DECLARE_ALIGNED(16, int, tQP2[4]);
674
  tQP2[0]= c->QP/2 + 1;
675
  vector signed int vQP2 = vec_ld(0, tQP2);
676
  vQP2 = vec_splat(vQP2, 0);
677
  const vector signed int vsint32_8 = vec_splat_s32(8);
678
  const vector unsigned int vuint32_4 = vec_splat_u32(4);
679

  
680
  const vector unsigned char permA1 = (vector unsigned char)
681
    AVV(0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
682
        0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
683
  const vector unsigned char permA2 = (vector unsigned char)
684
    AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
685
        0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
686
  const vector unsigned char permA1inc = (vector unsigned char)
687
    AVV(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
688
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
689
  const vector unsigned char permA2inc = (vector unsigned char)
690
    AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
691
        0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
692
  const vector unsigned char magic = (vector unsigned char)
693
    AVV(0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
694
        0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
695
  const vector unsigned char extractPerm = (vector unsigned char)
696
    AVV(0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
697
        0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01);
698
  const vector unsigned char extractPermInc = (vector unsigned char)
699
    AVV(0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
700
        0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01);
701
  const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
702
  const vector unsigned char tenRight = (vector unsigned char)
703
    AVV(0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
704
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
705
  const vector unsigned char eightLeft = (vector unsigned char)
706
    AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
707
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08);
667
    }
668

  
669
    /* I'm not sure the following is actually faster
670
       than straight, unvectorized C code :-( */
671

  
672
    DECLARE_ALIGNED(16, int, tQP2[4]);
673
    tQP2[0]= c->QP/2 + 1;
674
    vector signed int vQP2 = vec_ld(0, tQP2);
675
    vQP2 = vec_splat(vQP2, 0);
676
    const vector signed int vsint32_8 = vec_splat_s32(8);
677
    const vector unsigned int vuint32_4 = vec_splat_u32(4);
678

  
679
    const vector unsigned char permA1 = (vector unsigned char)
680
        AVV(0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
681
            0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
682
    const vector unsigned char permA2 = (vector unsigned char)
683
        AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
684
            0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
685
    const vector unsigned char permA1inc = (vector unsigned char)
686
        AVV(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
687
            0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
688
    const vector unsigned char permA2inc = (vector unsigned char)
689
        AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
690
            0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
691
    const vector unsigned char magic = (vector unsigned char)
692
        AVV(0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
693
            0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
694
    const vector unsigned char extractPerm = (vector unsigned char)
695
        AVV(0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
696
            0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01);
697
    const vector unsigned char extractPermInc = (vector unsigned char)
698
        AVV(0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
699
            0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01);
700
    const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
701
    const vector unsigned char tenRight = (vector unsigned char)
702
        AVV(0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
703
            0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
704
    const vector unsigned char eightLeft = (vector unsigned char)
705
        AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
706
            0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08);
708 707

  
709 708

  
710 709
#define F_INIT(i)                                       \
711
  vector unsigned char tenRightM##i = tenRight;         \
712
  vector unsigned char permA1M##i = permA1;             \
713
  vector unsigned char permA2M##i = permA2;             \
714
  vector unsigned char extractPermM##i = extractPerm
710
    vector unsigned char tenRightM##i = tenRight;       \
711
    vector unsigned char permA1M##i = permA1;           \
712
    vector unsigned char permA2M##i = permA2;           \
713
    vector unsigned char extractPermM##i = extractPerm
715 714

  
716 715
#define F2(i, j, k, l)                                                  \
717
  if (S[i] & (1 << (l+1))) {                                            \
718
    const vector unsigned char a_##j##_A##l =                           \
719
      vec_perm(src##i, src##j, permA1M##i);                             \
720
    const vector unsigned char a_##j##_B##l =                           \
721
      vec_perm(a_##j##_A##l, src##k, permA2M##i);                       \
722
    const vector signed int a_##j##_sump##l =                           \
723
      (vector signed int)vec_msum(a_##j##_B##l, magic,                  \
724
                                  (vector unsigned int)zero);           \
725
    vector signed int F_##j##_##l =                                     \
726
      vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4);          \
727
    F_##j##_##l = vec_splat(F_##j##_##l, 3);                            \
728
    const vector signed int p_##j##_##l =                               \
729
      (vector signed int)vec_perm(src##j,                               \
730
                                  (vector unsigned char)zero,           \
731
                                  extractPermM##i);                     \
732
    const vector signed int sum_##j##_##l = vec_add( p_##j##_##l, vQP2);\
733
    const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2);\
734
    vector signed int newpm_##j##_##l;                                  \
735
    if (vec_all_lt(sum_##j##_##l, F_##j##_##l))                         \
736
      newpm_##j##_##l = sum_##j##_##l;                                  \
737
    else if (vec_all_gt(diff_##j##_##l, F_##j##_##l))                   \
738
      newpm_##j##_##l = diff_##j##_##l;                                 \
739
    else newpm_##j##_##l = F_##j##_##l;                                 \
740
    const vector unsigned char newpm2_##j##_##l =                       \
741
      vec_splat((vector unsigned char)newpm_##j##_##l, 15);             \
742
    const vector unsigned char mask##j##l = vec_add(identity,           \
743
                                                    tenRightM##i);      \
744
    src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l);            \
745
  }                                                                     \
746
  permA1M##i = vec_add(permA1M##i, permA1inc);                          \
747
  permA2M##i = vec_add(permA2M##i, permA2inc);                          \
748
  tenRightM##i = vec_sro(tenRightM##i, eightLeft);                      \
749
  extractPermM##i = vec_add(extractPermM##i, extractPermInc)
716
    if (S[i] & (1 << (l+1))) {                                          \
717
        const vector unsigned char a_##j##_A##l =                       \
718
            vec_perm(src##i, src##j, permA1M##i);                       \
719
        const vector unsigned char a_##j##_B##l =                       \
720
            vec_perm(a_##j##_A##l, src##k, permA2M##i);                 \
721
        const vector signed int a_##j##_sump##l =                       \
722
            (vector signed int)vec_msum(a_##j##_B##l, magic,            \
723
                                        (vector unsigned int)zero);     \
724
        vector signed int F_##j##_##l =                                 \
725
            vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4);    \
726
        F_##j##_##l = vec_splat(F_##j##_##l, 3);                        \
727
        const vector signed int p_##j##_##l =                           \
728
            (vector signed int)vec_perm(src##j,                         \
729
                                        (vector unsigned char)zero,     \
730
                                        extractPermM##i);               \
731
        const vector signed int sum_##j##_##l  = vec_add( p_##j##_##l, vQP2);\
732
        const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2);\
733
        vector signed int newpm_##j##_##l;                              \
734
        if (vec_all_lt(sum_##j##_##l, F_##j##_##l))                     \
735
            newpm_##j##_##l = sum_##j##_##l;                            \
736
        else if (vec_all_gt(diff_##j##_##l, F_##j##_##l))               \
737
            newpm_##j##_##l = diff_##j##_##l;                           \
738
        else newpm_##j##_##l = F_##j##_##l;                             \
739
        const vector unsigned char newpm2_##j##_##l =                   \
740
            vec_splat((vector unsigned char)newpm_##j##_##l, 15);       \
741
        const vector unsigned char mask##j##l = vec_add(identity,       \
742
                                                        tenRightM##i);  \
743
        src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l);        \
744
    }                                                                   \
745
    permA1M##i = vec_add(permA1M##i, permA1inc);                        \
746
    permA2M##i = vec_add(permA2M##i, permA2inc);                        \
747
    tenRightM##i = vec_sro(tenRightM##i, eightLeft);                    \
748
    extractPermM##i = vec_add(extractPermM##i, extractPermInc)
750 749

  
751 750
#define ITER(i, j, k)                           \
752
  F_INIT(i);                                    \
753
  F2(i, j, k, 0);                               \
754
  F2(i, j, k, 1);                               \
755
  F2(i, j, k, 2);                               \
756
  F2(i, j, k, 3);                               \
757
  F2(i, j, k, 4);                               \
758
  F2(i, j, k, 5);                               \
759
  F2(i, j, k, 6);                               \
760
  F2(i, j, k, 7)
761

  
762
  ITER(0, 1, 2);
763
  ITER(1, 2, 3);
764
  ITER(2, 3, 4);
765
  ITER(3, 4, 5);
766
  ITER(4, 5, 6);
767
  ITER(5, 6, 7);
768
  ITER(6, 7, 8);
769
  ITER(7, 8, 9);
770

  
771
  const vector signed char neg1 = vec_splat_s8(-1);
751
    F_INIT(i);                                  \
752
    F2(i, j, k, 0);                             \
753
    F2(i, j, k, 1);                             \
754
    F2(i, j, k, 2);                             \
755
    F2(i, j, k, 3);                             \
756
    F2(i, j, k, 4);                             \
757
    F2(i, j, k, 5);                             \
758
    F2(i, j, k, 6);                             \
759
    F2(i, j, k, 7)
760

  
761
    ITER(0, 1, 2);
762
    ITER(1, 2, 3);
763
    ITER(2, 3, 4);
764
    ITER(3, 4, 5);
765
    ITER(4, 5, 6);
766
    ITER(5, 6, 7);
767
    ITER(6, 7, 8);
768
    ITER(7, 8, 9);
769

  
770
    const vector signed char neg1 = vec_splat_s8(-1);
772 771

  
773 772
#define STORE_LINE(i)                                   \
774
  const vector unsigned char permST##i =                \
775
    vec_lvsr(i * stride, srcCopy);                      \
776
  const vector unsigned char maskST##i =                \
777
    vec_perm((vector unsigned char)zero,                \
778
             (vector unsigned char)neg1, permST##i);    \
779
  src##i = vec_perm(src##i ,src##i, permST##i);         \
780
  sA##i= vec_sel(sA##i, src##i, maskST##i);             \
781
  sB##i= vec_sel(src##i, sB##i, maskST##i);             \
782
  vec_st(sA##i, i * stride, srcCopy);                   \
783
  vec_st(sB##i, i * stride + 16, srcCopy)
784

  
785
  STORE_LINE(1);
786
  STORE_LINE(2);
787
  STORE_LINE(3);
788
  STORE_LINE(4);
789
  STORE_LINE(5);
790
  STORE_LINE(6);
791
  STORE_LINE(7);
792
  STORE_LINE(8);
773
    const vector unsigned char permST##i =              \
774
        vec_lvsr(i * stride, srcCopy);                  \
775
    const vector unsigned char maskST##i =              \
776
        vec_perm((vector unsigned char)zero,            \
777
                 (vector unsigned char)neg1, permST##i);\
778
    src##i = vec_perm(src##i ,src##i, permST##i);       \
779
    sA##i= vec_sel(sA##i, src##i, maskST##i);           \
780
    sB##i= vec_sel(src##i, sB##i, maskST##i);           \
781
    vec_st(sA##i, i * stride, srcCopy);                 \
782
    vec_st(sB##i, i * stride + 16, srcCopy)
783

  
784
    STORE_LINE(1);
785
    STORE_LINE(2);
786
    STORE_LINE(3);
787
    STORE_LINE(4);
788
    STORE_LINE(5);
789
    STORE_LINE(6);
790
    STORE_LINE(7);
791
    STORE_LINE(8);
793 792

  
794 793
#undef STORE_LINE
795 794
#undef ITER
......
801 800
#define do_a_deblock_altivec(a...) do_a_deblock_C(a)
802 801

  
803 802
static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
804
                                    uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise)
803
                                            uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise)
805 804
{
806
  const vector signed int zero = vec_splat_s32(0);
807
  const vector signed short vsint16_1 = vec_splat_s16(1);
808
  vector signed int v_dp = zero;
809
  vector signed int v_sysdp = zero;
810
  int d, sysd, i;
805
    const vector signed int zero = vec_splat_s32(0);
806
    const vector signed short vsint16_1 = vec_splat_s16(1);
807
    vector signed int v_dp = zero;
808
    vector signed int v_sysdp = zero;
809
    int d, sysd, i;
811 810

  
812
  tempBluredPast[127]= maxNoise[0];
813
  tempBluredPast[128]= maxNoise[1];
814
  tempBluredPast[129]= maxNoise[2];
811
    tempBluredPast[127]= maxNoise[0];
812
    tempBluredPast[128]= maxNoise[1];
813
    tempBluredPast[129]= maxNoise[2];
815 814

  
816 815
#define LOAD_LINE(src, i)                                               \
817
  register int j##src##i = i * stride;                                  \
818
  vector unsigned char perm##src##i = vec_lvsl(j##src##i, src);         \
819
  const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src);   \
820
  const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
821
  const vector unsigned char v_##src##A##i =                            \
822
    vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i);             \
823
  vector signed short v_##src##Ass##i =                                 \
824
    (vector signed short)vec_mergeh((vector signed char)zero,           \
825
                                    (vector signed char)v_##src##A##i)
826

  
827
  LOAD_LINE(src, 0);
828
  LOAD_LINE(src, 1);
829
  LOAD_LINE(src, 2);
830
  LOAD_LINE(src, 3);
831
  LOAD_LINE(src, 4);
832
  LOAD_LINE(src, 5);
833
  LOAD_LINE(src, 6);
834
  LOAD_LINE(src, 7);
835

  
836
  LOAD_LINE(tempBlured, 0);
837
  LOAD_LINE(tempBlured, 1);
838
  LOAD_LINE(tempBlured, 2);
839
  LOAD_LINE(tempBlured, 3);
840
  LOAD_LINE(tempBlured, 4);
841
  LOAD_LINE(tempBlured, 5);
842
  LOAD_LINE(tempBlured, 6);
843
  LOAD_LINE(tempBlured, 7);
816
    register int j##src##i = i * stride;                                \
817
    vector unsigned char perm##src##i = vec_lvsl(j##src##i, src);       \
818
    const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
819
    const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
820
    const vector unsigned char v_##src##A##i =                          \
821
        vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i);         \
822
    vector signed short v_##src##Ass##i =                               \
823
        (vector signed short)vec_mergeh((vector signed char)zero,       \
824
                                        (vector signed char)v_##src##A##i)
825

  
826
    LOAD_LINE(src, 0);
827
    LOAD_LINE(src, 1);
828
    LOAD_LINE(src, 2);
829
    LOAD_LINE(src, 3);
830
    LOAD_LINE(src, 4);
831
    LOAD_LINE(src, 5);
832
    LOAD_LINE(src, 6);
833
    LOAD_LINE(src, 7);
834

  
835
    LOAD_LINE(tempBlured, 0);
836
    LOAD_LINE(tempBlured, 1);
837
    LOAD_LINE(tempBlured, 2);
838
    LOAD_LINE(tempBlured, 3);
839
    LOAD_LINE(tempBlured, 4);
840
    LOAD_LINE(tempBlured, 5);
841
    LOAD_LINE(tempBlured, 6);
842
    LOAD_LINE(tempBlured, 7);
844 843
#undef LOAD_LINE
845 844

  
846 845
#define ACCUMULATE_DIFFS(i)                                     \
847
  vector signed short v_d##i = vec_sub(v_tempBluredAss##i,      \
848
                                       v_srcAss##i);            \
849
  v_dp = vec_msums(v_d##i, v_d##i, v_dp);                       \
850
  v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp)
851

  
852
  ACCUMULATE_DIFFS(0);
853
  ACCUMULATE_DIFFS(1);
854
  ACCUMULATE_DIFFS(2);
855
  ACCUMULATE_DIFFS(3);
856
  ACCUMULATE_DIFFS(4);
857
  ACCUMULATE_DIFFS(5);
858
  ACCUMULATE_DIFFS(6);
859
  ACCUMULATE_DIFFS(7);
846
    vector signed short v_d##i = vec_sub(v_tempBluredAss##i,    \
847
                                         v_srcAss##i);          \
848
    v_dp = vec_msums(v_d##i, v_d##i, v_dp);                     \
849
    v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp)
850

  
851
    ACCUMULATE_DIFFS(0);
852
    ACCUMULATE_DIFFS(1);
853
    ACCUMULATE_DIFFS(2);
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff