Revision 84dc2d8a libavcodec/ppc/h264_altivec.c

View differences:

libavcodec/ppc/h264_altivec.c
79 79
}\
80 80
\
81 81
static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
82
    DECLARE_ALIGNED_16(uint8_t, half)[SIZE*SIZE];\
82
    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
83 83
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
84 84
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
85 85
}\
......
89 89
}\
90 90
\
91 91
static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
92
    DECLARE_ALIGNED_16(uint8_t, half)[SIZE*SIZE];\
92
    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
93 93
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
94 94
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
95 95
}\
96 96
\
97 97
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
98
    DECLARE_ALIGNED_16(uint8_t, half)[SIZE*SIZE];\
98
    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
99 99
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
100 100
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
101 101
}\
......
105 105
}\
106 106
\
107 107
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
108
    DECLARE_ALIGNED_16(uint8_t, half)[SIZE*SIZE];\
108
    DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
109 109
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
110 110
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
111 111
}\
112 112
\
113 113
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
114
    DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
115
    DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
114
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
115
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
116 116
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
117 117
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
118 118
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
119 119
}\
120 120
\
121 121
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
122
    DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
123
    DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
122
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
123
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
124 124
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
125 125
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
126 126
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
127 127
}\
128 128
\
129 129
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
130
    DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
131
    DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
130
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
131
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
132 132
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
133 133
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
134 134
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
135 135
}\
136 136
\
137 137
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
138
    DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
139
    DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
138
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
139
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
140 140
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
141 141
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
142 142
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
143 143
}\
144 144
\
145 145
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
146
    DECLARE_ALIGNED_16(int16_t, tmp)[SIZE*(SIZE+8)];\
146
    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
147 147
    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
148 148
}\
149 149
\
150 150
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
151
    DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
152
    DECLARE_ALIGNED_16(uint8_t, halfHV)[SIZE*SIZE];\
153
    DECLARE_ALIGNED_16(int16_t, tmp)[SIZE*(SIZE+8)];\
151
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
152
    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
153
    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
154 154
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
155 155
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
156 156
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
157 157
}\
158 158
\
159 159
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
160
    DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
161
    DECLARE_ALIGNED_16(uint8_t, halfHV)[SIZE*SIZE];\
162
    DECLARE_ALIGNED_16(int16_t, tmp)[SIZE*(SIZE+8)];\
160
    DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
161
    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
162
    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
163 163
    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
164 164
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
165 165
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
166 166
}\
167 167
\
168 168
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
169
    DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
170
    DECLARE_ALIGNED_16(uint8_t, halfHV)[SIZE*SIZE];\
171
    DECLARE_ALIGNED_16(int16_t, tmp)[SIZE*(SIZE+8)];\
169
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
170
    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
171
    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
172 172
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
173 173
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
174 174
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
175 175
}\
176 176
\
177 177
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
178
    DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
179
    DECLARE_ALIGNED_16(uint8_t, halfHV)[SIZE*SIZE];\
180
    DECLARE_ALIGNED_16(int16_t, tmp)[SIZE*(SIZE+8)];\
178
    DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
179
    DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
180
    DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
181 181
    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
182 182
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
183 183
    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
......
480 480
    vec_s16 dc16;
481 481
    vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
482 482
    LOAD_ZERO;
483
    DECLARE_ALIGNED_16(int, dc);
483
    DECLARE_ALIGNED(16, int, dc);
484 484
    int i;
485 485

  
486 486
    dc = (block[0] + 32) >> 6;
......
590 590
static inline void write16x4(uint8_t *dst, int dst_stride,
591 591
                             register vec_u8 r0, register vec_u8 r1,
592 592
                             register vec_u8 r2, register vec_u8 r3) {
593
    DECLARE_ALIGNED_16(unsigned char, result)[64];
593
    DECLARE_ALIGNED(16, unsigned char, result)[64];
594 594
    uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
595 595
    int int_dst_stride = dst_stride/4;
596 596

  
......
770 770
}
771 771

  
772 772
#define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) {            \
773
    DECLARE_ALIGNED_16(unsigned char, temp)[16];                                             \
773
    DECLARE_ALIGNED(16, unsigned char, temp)[16];                                             \
774 774
    register vec_u8 alphavec;                                                              \
775 775
    register vec_u8 betavec;                                                               \
776 776
    register vec_u8 mask;                                                                  \
......
850 850
    vec_u8 vblock;
851 851
    vec_s16 vtemp, vweight, voffset, v0, v1;
852 852
    vec_u16 vlog2_denom;
853
    DECLARE_ALIGNED_16(int32_t, temp)[4];
853
    DECLARE_ALIGNED(16, int32_t, temp)[4];
854 854
    LOAD_ZERO;
855 855

  
856 856
    offset <<= log2_denom;
......
896 896
    vec_u8 vsrc, vdst;
897 897
    vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
898 898
    vec_u16 vlog2_denom;
899
    DECLARE_ALIGNED_16(int32_t, temp)[4];
899
    DECLARE_ALIGNED(16, int32_t, temp)[4];
900 900
    LOAD_ZERO;
901 901

  
902 902
    offset = ((offset + 1) | 1) << log2_denom;

Also available in: Unified diff