Revision 563c72da libavcodec/h264dsp.c
libavcodec/h264dsp.c | ||
---|---|---|
29 | 29 |
#include "avcodec.h" |
30 | 30 |
#include "h264dsp.h" |
31 | 31 |
|
32 |
#define op_scale1(x) block[x] = av_clip_uint8( (block[x]*weight + offset) >> log2_denom ) |
|
33 |
#define op_scale2(x) dst[x] = av_clip_uint8( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1)) |
|
34 |
#define H264_WEIGHT(W,H) \ |
|
35 |
static void weight_h264_pixels ## W ## x ## H ## _c(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \ |
|
36 |
int y; \ |
|
37 |
offset <<= log2_denom; \ |
|
38 |
if(log2_denom) offset += 1<<(log2_denom-1); \ |
|
39 |
for(y=0; y<H; y++, block += stride){ \ |
|
40 |
op_scale1(0); \ |
|
41 |
op_scale1(1); \ |
|
42 |
if(W==2) continue; \ |
|
43 |
op_scale1(2); \ |
|
44 |
op_scale1(3); \ |
|
45 |
if(W==4) continue; \ |
|
46 |
op_scale1(4); \ |
|
47 |
op_scale1(5); \ |
|
48 |
op_scale1(6); \ |
|
49 |
op_scale1(7); \ |
|
50 |
if(W==8) continue; \ |
|
51 |
op_scale1(8); \ |
|
52 |
op_scale1(9); \ |
|
53 |
op_scale1(10); \ |
|
54 |
op_scale1(11); \ |
|
55 |
op_scale1(12); \ |
|
56 |
op_scale1(13); \ |
|
57 |
op_scale1(14); \ |
|
58 |
op_scale1(15); \ |
|
59 |
} \ |
|
60 |
} \ |
|
61 |
static void biweight_h264_pixels ## W ## x ## H ## _c(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \ |
|
62 |
int y; \ |
|
63 |
offset = ((offset + 1) | 1) << log2_denom; \ |
|
64 |
for(y=0; y<H; y++, dst += stride, src += stride){ \ |
|
65 |
op_scale2(0); \ |
|
66 |
op_scale2(1); \ |
|
67 |
if(W==2) continue; \ |
|
68 |
op_scale2(2); \ |
|
69 |
op_scale2(3); \ |
|
70 |
if(W==4) continue; \ |
|
71 |
op_scale2(4); \ |
|
72 |
op_scale2(5); \ |
|
73 |
op_scale2(6); \ |
|
74 |
op_scale2(7); \ |
|
75 |
if(W==8) continue; \ |
|
76 |
op_scale2(8); \ |
|
77 |
op_scale2(9); \ |
|
78 |
op_scale2(10); \ |
|
79 |
op_scale2(11); \ |
|
80 |
op_scale2(12); \ |
|
81 |
op_scale2(13); \ |
|
82 |
op_scale2(14); \ |
|
83 |
op_scale2(15); \ |
|
84 |
} \ |
|
85 |
} |
|
86 |
|
|
87 |
H264_WEIGHT(16,16) |
|
88 |
H264_WEIGHT(16,8) |
|
89 |
H264_WEIGHT(8,16) |
|
90 |
H264_WEIGHT(8,8) |
|
91 |
H264_WEIGHT(8,4) |
|
92 |
H264_WEIGHT(4,8) |
|
93 |
H264_WEIGHT(4,4) |
|
94 |
H264_WEIGHT(4,2) |
|
95 |
H264_WEIGHT(2,4) |
|
96 |
H264_WEIGHT(2,2) |
|
97 |
|
|
98 |
#undef op_scale1 |
|
99 |
#undef op_scale2 |
|
100 |
#undef H264_WEIGHT |
|
101 |
|
|
102 |
static av_always_inline av_flatten void h264_loop_filter_luma_c(uint8_t *pix, int xstride, int ystride, int inner_iters, int alpha, int beta, int8_t *tc0) |
|
103 |
{ |
|
104 |
int i, d; |
|
105 |
for( i = 0; i < 4; i++ ) { |
|
106 |
if( tc0[i] < 0 ) { |
|
107 |
pix += inner_iters*ystride; |
|
108 |
continue; |
|
109 |
} |
|
110 |
for( d = 0; d < inner_iters; d++ ) { |
|
111 |
const int p0 = pix[-1*xstride]; |
|
112 |
const int p1 = pix[-2*xstride]; |
|
113 |
const int p2 = pix[-3*xstride]; |
|
114 |
const int q0 = pix[0]; |
|
115 |
const int q1 = pix[1*xstride]; |
|
116 |
const int q2 = pix[2*xstride]; |
|
117 |
|
|
118 |
if( FFABS( p0 - q0 ) < alpha && |
|
119 |
FFABS( p1 - p0 ) < beta && |
|
120 |
FFABS( q1 - q0 ) < beta ) { |
|
121 |
|
|
122 |
int tc = tc0[i]; |
|
123 |
int i_delta; |
|
124 |
|
|
125 |
if( FFABS( p2 - p0 ) < beta ) { |
|
126 |
if(tc0[i]) |
|
127 |
pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] ); |
|
128 |
tc++; |
|
129 |
} |
|
130 |
if( FFABS( q2 - q0 ) < beta ) { |
|
131 |
if(tc0[i]) |
|
132 |
pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] ); |
|
133 |
tc++; |
|
134 |
} |
|
135 |
|
|
136 |
i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); |
|
137 |
pix[-xstride] = av_clip_uint8( p0 + i_delta ); /* p0' */ |
|
138 |
pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ |
|
139 |
} |
|
140 |
pix += ystride; |
|
141 |
} |
|
142 |
} |
|
143 |
} |
|
144 |
static void h264_v_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) |
|
145 |
{ |
|
146 |
h264_loop_filter_luma_c(pix, stride, 1, 4, alpha, beta, tc0); |
|
147 |
} |
|
148 |
static void h264_h_loop_filter_luma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) |
|
149 |
{ |
|
150 |
h264_loop_filter_luma_c(pix, 1, stride, 4, alpha, beta, tc0); |
|
151 |
} |
|
152 |
static void h264_h_loop_filter_luma_mbaff_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) |
|
153 |
{ |
|
154 |
h264_loop_filter_luma_c(pix, 1, stride, 2, alpha, beta, tc0); |
|
155 |
} |
|
156 |
|
|
157 |
static av_always_inline av_flatten void h264_loop_filter_luma_intra_c(uint8_t *pix, int xstride, int ystride, int inner_iters, int alpha, int beta) |
|
158 |
{ |
|
159 |
int d; |
|
160 |
for( d = 0; d < 4 * inner_iters; d++ ) { |
|
161 |
const int p2 = pix[-3*xstride]; |
|
162 |
const int p1 = pix[-2*xstride]; |
|
163 |
const int p0 = pix[-1*xstride]; |
|
164 |
|
|
165 |
const int q0 = pix[ 0*xstride]; |
|
166 |
const int q1 = pix[ 1*xstride]; |
|
167 |
const int q2 = pix[ 2*xstride]; |
|
168 |
|
|
169 |
if( FFABS( p0 - q0 ) < alpha && |
|
170 |
FFABS( p1 - p0 ) < beta && |
|
171 |
FFABS( q1 - q0 ) < beta ) { |
|
172 |
|
|
173 |
if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){ |
|
174 |
if( FFABS( p2 - p0 ) < beta) |
|
175 |
{ |
|
176 |
const int p3 = pix[-4*xstride]; |
|
177 |
/* p0', p1', p2' */ |
|
178 |
pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3; |
|
179 |
pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2; |
|
180 |
pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3; |
|
181 |
} else { |
|
182 |
/* p0' */ |
|
183 |
pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; |
|
184 |
} |
|
185 |
if( FFABS( q2 - q0 ) < beta) |
|
186 |
{ |
|
187 |
const int q3 = pix[3*xstride]; |
|
188 |
/* q0', q1', q2' */ |
|
189 |
pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3; |
|
190 |
pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2; |
|
191 |
pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3; |
|
192 |
} else { |
|
193 |
/* q0' */ |
|
194 |
pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2; |
|
195 |
} |
|
196 |
}else{ |
|
197 |
/* p0', q0' */ |
|
198 |
pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; |
|
199 |
pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2; |
|
200 |
} |
|
201 |
} |
|
202 |
pix += ystride; |
|
203 |
} |
|
204 |
} |
|
205 |
static void h264_v_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta) |
|
206 |
{ |
|
207 |
h264_loop_filter_luma_intra_c(pix, stride, 1, 4, alpha, beta); |
|
208 |
} |
|
209 |
static void h264_h_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta) |
|
210 |
{ |
|
211 |
h264_loop_filter_luma_intra_c(pix, 1, stride, 4, alpha, beta); |
|
212 |
} |
|
213 |
static void h264_h_loop_filter_luma_mbaff_intra_c(uint8_t *pix, int stride, int alpha, int beta) |
|
214 |
{ |
|
215 |
h264_loop_filter_luma_intra_c(pix, 1, stride, 2, alpha, beta); |
|
216 |
} |
|
217 |
|
|
218 |
static av_always_inline av_flatten void h264_loop_filter_chroma_c(uint8_t *pix, int xstride, int ystride, int inner_iters, int alpha, int beta, int8_t *tc0) |
|
219 |
{ |
|
220 |
int i, d; |
|
221 |
for( i = 0; i < 4; i++ ) { |
|
222 |
const int tc = tc0[i]; |
|
223 |
if( tc <= 0 ) { |
|
224 |
pix += inner_iters*ystride; |
|
225 |
continue; |
|
226 |
} |
|
227 |
for( d = 0; d < inner_iters; d++ ) { |
|
228 |
const int p0 = pix[-1*xstride]; |
|
229 |
const int p1 = pix[-2*xstride]; |
|
230 |
const int q0 = pix[0]; |
|
231 |
const int q1 = pix[1*xstride]; |
|
232 |
|
|
233 |
if( FFABS( p0 - q0 ) < alpha && |
|
234 |
FFABS( p1 - p0 ) < beta && |
|
235 |
FFABS( q1 - q0 ) < beta ) { |
|
236 |
|
|
237 |
int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); |
|
238 |
|
|
239 |
pix[-xstride] = av_clip_uint8( p0 + delta ); /* p0' */ |
|
240 |
pix[0] = av_clip_uint8( q0 - delta ); /* q0' */ |
|
241 |
} |
|
242 |
pix += ystride; |
|
243 |
} |
|
244 |
} |
|
245 |
} |
|
246 |
static void h264_v_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) |
|
247 |
{ |
|
248 |
h264_loop_filter_chroma_c(pix, stride, 1, 2, alpha, beta, tc0); |
|
249 |
} |
|
250 |
static void h264_h_loop_filter_chroma_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) |
|
251 |
{ |
|
252 |
h264_loop_filter_chroma_c(pix, 1, stride, 2, alpha, beta, tc0); |
|
253 |
} |
|
254 |
static void h264_h_loop_filter_chroma_mbaff_c(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) |
|
255 |
{ |
|
256 |
h264_loop_filter_chroma_c(pix, 1, stride, 1, alpha, beta, tc0); |
|
257 |
} |
|
258 |
|
|
259 |
static av_always_inline av_flatten void h264_loop_filter_chroma_intra_c(uint8_t *pix, int xstride, int ystride, int inner_iters, int alpha, int beta) |
|
260 |
{ |
|
261 |
int d; |
|
262 |
for( d = 0; d < 4 * inner_iters; d++ ) { |
|
263 |
const int p0 = pix[-1*xstride]; |
|
264 |
const int p1 = pix[-2*xstride]; |
|
265 |
const int q0 = pix[0]; |
|
266 |
const int q1 = pix[1*xstride]; |
|
267 |
|
|
268 |
if( FFABS( p0 - q0 ) < alpha && |
|
269 |
FFABS( p1 - p0 ) < beta && |
|
270 |
FFABS( q1 - q0 ) < beta ) { |
|
271 |
|
|
272 |
pix[-xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */ |
|
273 |
pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */ |
|
274 |
} |
|
275 |
pix += ystride; |
|
276 |
} |
|
277 |
} |
|
278 |
static void h264_v_loop_filter_chroma_intra_c(uint8_t *pix, int stride, int alpha, int beta) |
|
279 |
{ |
|
280 |
h264_loop_filter_chroma_intra_c(pix, stride, 1, 2, alpha, beta); |
|
281 |
} |
|
282 |
static void h264_h_loop_filter_chroma_intra_c(uint8_t *pix, int stride, int alpha, int beta) |
|
283 |
{ |
|
284 |
h264_loop_filter_chroma_intra_c(pix, 1, stride, 2, alpha, beta); |
|
285 |
} |
|
286 |
static void h264_h_loop_filter_chroma_mbaff_intra_c(uint8_t *pix, int stride, int alpha, int beta) |
|
287 |
{ |
|
288 |
h264_loop_filter_chroma_intra_c(pix, 1, stride, 1, alpha, beta); |
|
289 |
} |
|
32 |
#include "h264dsp_template.c" |
|
290 | 33 |
|
291 | 34 |
void ff_h264dsp_init(H264DSPContext *c) |
292 | 35 |
{ |
Also available in: Unified diff