ffmpeg / libavcodec / vp56.c @ f66e4f5f
History | View | Annotate | Download (21.1 KB)
1 |
/**
|
---|---|
2 |
* @file vp56.c
|
3 |
* VP5 and VP6 compatible video decoder (common features)
|
4 |
*
|
5 |
* Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
|
6 |
*
|
7 |
* This file is part of FFmpeg.
|
8 |
*
|
9 |
* FFmpeg is free software; you can redistribute it and/or
|
10 |
* modify it under the terms of the GNU Lesser General Public
|
11 |
* License as published by the Free Software Foundation; either
|
12 |
* version 2.1 of the License, or (at your option) any later version.
|
13 |
*
|
14 |
* FFmpeg is distributed in the hope that it will be useful,
|
15 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
16 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
17 |
* Lesser General Public License for more details.
|
18 |
*
|
19 |
* You should have received a copy of the GNU Lesser General Public
|
20 |
* License along with FFmpeg; if not, write to the Free Software
|
21 |
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
22 |
*/
|
23 |
|
24 |
#include "avcodec.h" |
25 |
|
26 |
#include "vp56.h" |
27 |
#include "vp56data.h" |
28 |
|
29 |
|
30 |
void vp56_init_dequant(vp56_context_t *s, int quantizer) |
31 |
{ |
32 |
s->quantizer = quantizer; |
33 |
s->dequant_dc = vp56_dc_dequant[quantizer] << 2;
|
34 |
s->dequant_ac = vp56_ac_dequant[quantizer] << 2;
|
35 |
} |
36 |
|
37 |
static int vp56_get_vectors_predictors(vp56_context_t *s, int row, int col, |
38 |
vp56_frame_t ref_frame) |
39 |
{ |
40 |
int nb_pred = 0; |
41 |
vp56_mv_t vect[2] = {{0,0}, {0,0}}; |
42 |
int pos, offset;
|
43 |
vp56_mv_t mvp; |
44 |
|
45 |
for (pos=0; pos<12; pos++) { |
46 |
mvp.x = col + vp56_candidate_predictor_pos[pos][0];
|
47 |
mvp.y = row + vp56_candidate_predictor_pos[pos][1];
|
48 |
if (mvp.x < 0 || mvp.x >= s->mb_width || |
49 |
mvp.y < 0 || mvp.y >= s->mb_height)
|
50 |
continue;
|
51 |
offset = mvp.x + s->mb_width*mvp.y; |
52 |
|
53 |
if (vp56_reference_frame[s->macroblocks[offset].type] != ref_frame)
|
54 |
continue;
|
55 |
if ((s->macroblocks[offset].mv.x == vect[0].x && |
56 |
s->macroblocks[offset].mv.y == vect[0].y) ||
|
57 |
(s->macroblocks[offset].mv.x == 0 &&
|
58 |
s->macroblocks[offset].mv.y == 0))
|
59 |
continue;
|
60 |
|
61 |
vect[nb_pred++] = s->macroblocks[offset].mv; |
62 |
if (nb_pred > 1) { |
63 |
nb_pred = -1;
|
64 |
break;
|
65 |
} |
66 |
s->vector_candidate_pos = pos; |
67 |
} |
68 |
|
69 |
s->vector_candidate[0] = vect[0]; |
70 |
s->vector_candidate[1] = vect[1]; |
71 |
|
72 |
return nb_pred+1; |
73 |
} |
74 |
|
75 |
static void vp56_parse_mb_type_models(vp56_context_t *s) |
76 |
{ |
77 |
vp56_range_coder_t *c = &s->c; |
78 |
int i, ctx, type;
|
79 |
|
80 |
for (ctx=0; ctx<3; ctx++) { |
81 |
if (vp56_rac_get_prob(c, 174)) { |
82 |
int idx = vp56_rac_gets(c, 4); |
83 |
memcpy(s->mb_types_stats[ctx],vp56_pre_def_mb_type_stats[idx][ctx], |
84 |
sizeof(s->mb_types_stats[ctx]));
|
85 |
} |
86 |
if (vp56_rac_get_prob(c, 254)) { |
87 |
for (type=0; type<10; type++) { |
88 |
for(i=0; i<2; i++) { |
89 |
if (vp56_rac_get_prob(c, 205)) { |
90 |
int delta, sign = vp56_rac_get(c);
|
91 |
|
92 |
delta = vp56_rac_get_tree(c, vp56_pmbtm_tree, |
93 |
vp56_mb_type_model_model); |
94 |
if (!delta)
|
95 |
delta = 4 * vp56_rac_gets(c, 7); |
96 |
s->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign; |
97 |
} |
98 |
} |
99 |
} |
100 |
} |
101 |
} |
102 |
|
103 |
/* compute MB type probability tables based on previous MB type */
|
104 |
for (ctx=0; ctx<3; ctx++) { |
105 |
int p[10]; |
106 |
|
107 |
for (type=0; type<10; type++) |
108 |
p[type] = 100 * s->mb_types_stats[ctx][type][1]; |
109 |
|
110 |
for (type=0; type<10; type++) { |
111 |
int p02, p34, p0234, p17, p56, p89, p5689, p156789;
|
112 |
|
113 |
/* conservative MB type probability */
|
114 |
s->mb_type_model[ctx][type][0] = 255 - (255 * s->mb_types_stats[ctx][type][0]) / (1 + s->mb_types_stats[ctx][type][0] + s->mb_types_stats[ctx][type][1]); |
115 |
|
116 |
p[type] = 0; /* same MB type => weight is null */ |
117 |
|
118 |
/* binary tree parsing probabilities */
|
119 |
p02 = p[0] + p[2]; |
120 |
p34 = p[3] + p[4]; |
121 |
p0234 = p02 + p34; |
122 |
p17 = p[1] + p[7]; |
123 |
p56 = p[5] + p[6]; |
124 |
p89 = p[8] + p[9]; |
125 |
p5689 = p56 + p89; |
126 |
p156789 = p17 + p5689; |
127 |
|
128 |
s->mb_type_model[ctx][type][1] = 1 + 255 * p0234/(1+p0234+p156789); |
129 |
s->mb_type_model[ctx][type][2] = 1 + 255 * p02 / (1+p0234); |
130 |
s->mb_type_model[ctx][type][3] = 1 + 255 * p17 / (1+p156789); |
131 |
s->mb_type_model[ctx][type][4] = 1 + 255 * p[0] / (1+p02); |
132 |
s->mb_type_model[ctx][type][5] = 1 + 255 * p[3] / (1+p34); |
133 |
s->mb_type_model[ctx][type][6] = 1 + 255 * p[1] / (1+p17); |
134 |
s->mb_type_model[ctx][type][7] = 1 + 255 * p56 / (1+p5689); |
135 |
s->mb_type_model[ctx][type][8] = 1 + 255 * p[5] / (1+p56); |
136 |
s->mb_type_model[ctx][type][9] = 1 + 255 * p[8] / (1+p89); |
137 |
|
138 |
/* restore initial value */
|
139 |
p[type] = 100 * s->mb_types_stats[ctx][type][1]; |
140 |
} |
141 |
} |
142 |
} |
143 |
|
144 |
static vp56_mb_t vp56_parse_mb_type(vp56_context_t *s,
|
145 |
vp56_mb_t prev_type, int ctx)
|
146 |
{ |
147 |
uint8_t *mb_type_model = s->mb_type_model[ctx][prev_type]; |
148 |
vp56_range_coder_t *c = &s->c; |
149 |
|
150 |
if (vp56_rac_get_prob(c, mb_type_model[0])) |
151 |
return prev_type;
|
152 |
else
|
153 |
return vp56_rac_get_tree(c, vp56_pmbt_tree, mb_type_model);
|
154 |
} |
155 |
|
156 |
static void vp56_decode_4mv(vp56_context_t *s, int row, int col) |
157 |
{ |
158 |
vp56_mv_t mv = {0,0}; |
159 |
int type[4]; |
160 |
int b;
|
161 |
|
162 |
/* parse each block type */
|
163 |
for (b=0; b<4; b++) { |
164 |
type[b] = vp56_rac_gets(&s->c, 2);
|
165 |
if (type[b])
|
166 |
type[b]++; /* only returns 0, 2, 3 or 4 (all INTER_PF) */
|
167 |
} |
168 |
|
169 |
/* get vectors */
|
170 |
for (b=0; b<4; b++) { |
171 |
switch (type[b]) {
|
172 |
case VP56_MB_INTER_NOVEC_PF:
|
173 |
s->mv[b] = (vp56_mv_t) {0,0}; |
174 |
break;
|
175 |
case VP56_MB_INTER_DELTA_PF:
|
176 |
s->parse_vector_adjustment(s, &s->mv[b]); |
177 |
break;
|
178 |
case VP56_MB_INTER_V1_PF:
|
179 |
s->mv[b] = s->vector_candidate[0];
|
180 |
break;
|
181 |
case VP56_MB_INTER_V2_PF:
|
182 |
s->mv[b] = s->vector_candidate[1];
|
183 |
break;
|
184 |
} |
185 |
mv.x += s->mv[b].x; |
186 |
mv.y += s->mv[b].y; |
187 |
} |
188 |
|
189 |
/* this is the one selected for the whole MB for prediction */
|
190 |
s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
|
191 |
|
192 |
/* chroma vectors are average luma vectors */
|
193 |
if (s->avctx->codec->id == CODEC_ID_VP5) {
|
194 |
s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2); |
195 |
s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2); |
196 |
} else {
|
197 |
s->mv[4] = s->mv[5] = (vp56_mv_t) {mv.x/4, mv.y/4}; |
198 |
} |
199 |
} |
200 |
|
201 |
static vp56_mb_t vp56_decode_mv(vp56_context_t *s, int row, int col) |
202 |
{ |
203 |
vp56_mv_t *mv, vect = {0,0}; |
204 |
int ctx, b;
|
205 |
|
206 |
ctx = vp56_get_vectors_predictors(s, row, col, VP56_FRAME_PREVIOUS); |
207 |
s->mb_type = vp56_parse_mb_type(s, s->mb_type, ctx); |
208 |
s->macroblocks[row * s->mb_width + col].type = s->mb_type; |
209 |
|
210 |
switch (s->mb_type) {
|
211 |
case VP56_MB_INTER_V1_PF:
|
212 |
mv = &s->vector_candidate[0];
|
213 |
break;
|
214 |
|
215 |
case VP56_MB_INTER_V2_PF:
|
216 |
mv = &s->vector_candidate[1];
|
217 |
break;
|
218 |
|
219 |
case VP56_MB_INTER_V1_GF:
|
220 |
vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN); |
221 |
mv = &s->vector_candidate[0];
|
222 |
break;
|
223 |
|
224 |
case VP56_MB_INTER_V2_GF:
|
225 |
vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN); |
226 |
mv = &s->vector_candidate[1];
|
227 |
break;
|
228 |
|
229 |
case VP56_MB_INTER_DELTA_PF:
|
230 |
s->parse_vector_adjustment(s, &vect); |
231 |
mv = &vect; |
232 |
break;
|
233 |
|
234 |
case VP56_MB_INTER_DELTA_GF:
|
235 |
vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN); |
236 |
s->parse_vector_adjustment(s, &vect); |
237 |
mv = &vect; |
238 |
break;
|
239 |
|
240 |
case VP56_MB_INTER_4V:
|
241 |
vp56_decode_4mv(s, row, col); |
242 |
return s->mb_type;
|
243 |
|
244 |
default:
|
245 |
mv = &vect; |
246 |
break;
|
247 |
} |
248 |
|
249 |
s->macroblocks[row*s->mb_width + col].mv = *mv; |
250 |
|
251 |
/* same vector for all blocks */
|
252 |
for (b=0; b<6; b++) |
253 |
s->mv[b] = *mv; |
254 |
|
255 |
return s->mb_type;
|
256 |
} |
257 |
|
258 |
static void vp56_add_predictors_dc(vp56_context_t *s, vp56_frame_t ref_frame) |
259 |
{ |
260 |
int idx = s->scantable.permutated[0]; |
261 |
int i;
|
262 |
|
263 |
for (i=0; i<6; i++) { |
264 |
vp56_ref_dc_t *ab = &s->above_blocks[s->above_block_idx[i]]; |
265 |
vp56_ref_dc_t *lb = &s->left_block[vp56_b6to4[i]]; |
266 |
int count = 0; |
267 |
int dc = 0; |
268 |
|
269 |
if (ref_frame == lb->ref_frame) {
|
270 |
dc += lb->dc_coeff; |
271 |
count++; |
272 |
} |
273 |
if (ref_frame == ab->ref_frame) {
|
274 |
dc += ab->dc_coeff; |
275 |
count++; |
276 |
} |
277 |
if (s->avctx->codec->id == CODEC_ID_VP5) {
|
278 |
if (count < 2 && ref_frame == ab[-1].ref_frame) { |
279 |
dc += ab[-1].dc_coeff;
|
280 |
count++; |
281 |
} |
282 |
if (count < 2 && ref_frame == ab[1].ref_frame) { |
283 |
dc += ab[1].dc_coeff;
|
284 |
count++; |
285 |
} |
286 |
} |
287 |
if (count == 0) |
288 |
dc = s->prev_dc[vp56_b6to3[i]][ref_frame]; |
289 |
else if (count == 2) |
290 |
dc /= 2;
|
291 |
|
292 |
s->block_coeff[i][idx] += dc; |
293 |
s->prev_dc[vp56_b6to3[i]][ref_frame] = s->block_coeff[i][idx]; |
294 |
ab->dc_coeff = s->block_coeff[i][idx]; |
295 |
ab->ref_frame = ref_frame; |
296 |
lb->dc_coeff = s->block_coeff[i][idx]; |
297 |
lb->ref_frame = ref_frame; |
298 |
s->block_coeff[i][idx] *= s->dequant_dc; |
299 |
} |
300 |
} |
301 |
|
302 |
static void vp56_edge_filter(vp56_context_t *s, uint8_t *yuv, |
303 |
int pix_inc, int line_inc, int t) |
304 |
{ |
305 |
int pix2_inc = 2 * pix_inc; |
306 |
int i, v;
|
307 |
|
308 |
for (i=0; i<12; i++) { |
309 |
v = (yuv[-pix2_inc] + 3*(yuv[0]-yuv[-pix_inc]) - yuv[pix_inc] + 4) >>3; |
310 |
v = s->adjust(v, t); |
311 |
yuv[-pix_inc] = av_clip_uint8(yuv[-pix_inc] + v); |
312 |
yuv[0] = av_clip_uint8(yuv[0] - v); |
313 |
yuv += line_inc; |
314 |
} |
315 |
} |
316 |
|
317 |
static void vp56_deblock_filter(vp56_context_t *s, uint8_t *yuv, |
318 |
int stride, int dx, int dy) |
319 |
{ |
320 |
int t = vp56_filter_threshold[s->quantizer];
|
321 |
if (dx) vp56_edge_filter(s, yuv + 10-dx , 1, stride, t); |
322 |
if (dy) vp56_edge_filter(s, yuv + stride*(10-dy), stride, 1, t); |
323 |
} |
324 |
|
325 |
static void vp56_mc(vp56_context_t *s, int b, uint8_t *src, |
326 |
int stride, int x, int y) |
327 |
{ |
328 |
int plane = vp56_b6to3[b];
|
329 |
uint8_t *dst= s->frames[VP56_FRAME_CURRENT].data[plane]+s->block_offset[b]; |
330 |
uint8_t *src_block; |
331 |
int src_offset;
|
332 |
int overlap_offset = 0; |
333 |
int mask = s->vp56_coord_div[b] - 1; |
334 |
int deblock_filtering = s->deblock_filtering;
|
335 |
int dx;
|
336 |
int dy;
|
337 |
|
338 |
if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
|
339 |
(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY |
340 |
&& !s->frames[VP56_FRAME_CURRENT].key_frame)) |
341 |
deblock_filtering = 0;
|
342 |
|
343 |
dx = s->mv[b].x / s->vp56_coord_div[b]; |
344 |
dy = s->mv[b].y / s->vp56_coord_div[b]; |
345 |
|
346 |
if (b >= 4) { |
347 |
x /= 2;
|
348 |
y /= 2;
|
349 |
} |
350 |
x += dx - 2;
|
351 |
y += dy - 2;
|
352 |
|
353 |
if (x<0 || x+12>=s->plane_width[plane] || |
354 |
y<0 || y+12>=s->plane_height[plane]) { |
355 |
ff_emulated_edge_mc(s->edge_emu_buffer, |
356 |
src + s->block_offset[b] + (dy-2)*stride + (dx-2), |
357 |
stride, 12, 12, x, y, |
358 |
s->plane_width[plane], |
359 |
s->plane_height[plane]); |
360 |
src_block = s->edge_emu_buffer; |
361 |
src_offset = 2 + 2*stride; |
362 |
} else if (deblock_filtering) { |
363 |
/* only need a 12x12 block, but there is no such dsp function, */
|
364 |
/* so copy a 16x12 block */
|
365 |
s->dsp.put_pixels_tab[0][0](s->edge_emu_buffer, |
366 |
src + s->block_offset[b] + (dy-2)*stride + (dx-2), |
367 |
stride, 12);
|
368 |
src_block = s->edge_emu_buffer; |
369 |
src_offset = 2 + 2*stride; |
370 |
} else {
|
371 |
src_block = src; |
372 |
src_offset = s->block_offset[b] + dy*stride + dx; |
373 |
} |
374 |
|
375 |
if (deblock_filtering)
|
376 |
vp56_deblock_filter(s, src_block, stride, dx&7, dy&7); |
377 |
|
378 |
if (s->mv[b].x & mask)
|
379 |
overlap_offset += (s->mv[b].x > 0) ? 1 : -1; |
380 |
if (s->mv[b].y & mask)
|
381 |
overlap_offset += (s->mv[b].y > 0) ? stride : -stride;
|
382 |
|
383 |
if (overlap_offset) {
|
384 |
if (s->filter)
|
385 |
s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset, |
386 |
stride, s->mv[b], mask, s->filter_selection, b<4);
|
387 |
else
|
388 |
s->dsp.put_no_rnd_pixels_l2[1](dst, src_block+src_offset,
|
389 |
src_block+src_offset+overlap_offset, |
390 |
stride, 8);
|
391 |
} else {
|
392 |
s->dsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8); |
393 |
} |
394 |
} |
395 |
|
396 |
static void vp56_decode_mb(vp56_context_t *s, int row, int col) |
397 |
{ |
398 |
AVFrame *frame_current, *frame_ref; |
399 |
vp56_mb_t mb_type; |
400 |
vp56_frame_t ref_frame; |
401 |
int b, plan, off;
|
402 |
|
403 |
if (s->frames[VP56_FRAME_CURRENT].key_frame)
|
404 |
mb_type = VP56_MB_INTRA; |
405 |
else
|
406 |
mb_type = vp56_decode_mv(s, row, col); |
407 |
ref_frame = vp56_reference_frame[mb_type]; |
408 |
|
409 |
memset(s->block_coeff, 0, sizeof(s->block_coeff)); |
410 |
|
411 |
s->parse_coeff(s); |
412 |
|
413 |
vp56_add_predictors_dc(s, ref_frame); |
414 |
|
415 |
frame_current = &s->frames[VP56_FRAME_CURRENT]; |
416 |
frame_ref = &s->frames[ref_frame]; |
417 |
|
418 |
switch (mb_type) {
|
419 |
case VP56_MB_INTRA:
|
420 |
for (b=0; b<6; b++) { |
421 |
plan = vp56_b6to3[b]; |
422 |
s->dsp.idct_put(frame_current->data[plan] + s->block_offset[b], |
423 |
s->stride[plan], s->block_coeff[b]); |
424 |
} |
425 |
break;
|
426 |
|
427 |
case VP56_MB_INTER_NOVEC_PF:
|
428 |
case VP56_MB_INTER_NOVEC_GF:
|
429 |
for (b=0; b<6; b++) { |
430 |
plan = vp56_b6to3[b]; |
431 |
off = s->block_offset[b]; |
432 |
s->dsp.put_pixels_tab[1][0](frame_current->data[plan] + off, |
433 |
frame_ref->data[plan] + off, |
434 |
s->stride[plan], 8);
|
435 |
s->dsp.idct_add(frame_current->data[plan] + off, |
436 |
s->stride[plan], s->block_coeff[b]); |
437 |
} |
438 |
break;
|
439 |
|
440 |
case VP56_MB_INTER_DELTA_PF:
|
441 |
case VP56_MB_INTER_V1_PF:
|
442 |
case VP56_MB_INTER_V2_PF:
|
443 |
case VP56_MB_INTER_DELTA_GF:
|
444 |
case VP56_MB_INTER_4V:
|
445 |
case VP56_MB_INTER_V1_GF:
|
446 |
case VP56_MB_INTER_V2_GF:
|
447 |
for (b=0; b<6; b++) { |
448 |
int x_off = b==1 || b==3 ? 8 : 0; |
449 |
int y_off = b==2 || b==3 ? 8 : 0; |
450 |
plan = vp56_b6to3[b]; |
451 |
vp56_mc(s, b, frame_ref->data[plan], s->stride[plan], |
452 |
16*col+x_off, 16*row+y_off); |
453 |
s->dsp.idct_add(frame_current->data[plan] + s->block_offset[b], |
454 |
s->stride[plan], s->block_coeff[b]); |
455 |
} |
456 |
break;
|
457 |
} |
458 |
} |
459 |
|
460 |
static int vp56_size_changed(AVCodecContext *avctx, vp56_context_t *s) |
461 |
{ |
462 |
int stride = s->frames[VP56_FRAME_CURRENT].linesize[0]; |
463 |
int i;
|
464 |
|
465 |
s->plane_width[0] = s->avctx->coded_width;
|
466 |
s->plane_width[1] = s->plane_width[2] = s->avctx->coded_width/2; |
467 |
s->plane_height[0] = s->avctx->coded_height;
|
468 |
s->plane_height[1] = s->plane_height[2] = s->avctx->coded_height/2; |
469 |
|
470 |
for (i=0; i<3; i++) |
471 |
s->stride[i] = s->flip * s->frames[VP56_FRAME_CURRENT].linesize[i]; |
472 |
|
473 |
s->mb_width = (s->avctx->coded_width+15) / 16; |
474 |
s->mb_height = (s->avctx->coded_height+15) / 16; |
475 |
|
476 |
if (s->mb_width > 1000 || s->mb_height > 1000) { |
477 |
av_log(avctx, AV_LOG_ERROR, "picture too big\n");
|
478 |
return -1; |
479 |
} |
480 |
|
481 |
s->above_blocks = av_realloc(s->above_blocks, |
482 |
(4*s->mb_width+6) * sizeof(*s->above_blocks)); |
483 |
s->macroblocks = av_realloc(s->macroblocks, |
484 |
s->mb_width*s->mb_height*sizeof(*s->macroblocks));
|
485 |
av_free(s->edge_emu_buffer_alloc); |
486 |
s->edge_emu_buffer_alloc = av_malloc(16*stride);
|
487 |
s->edge_emu_buffer = s->edge_emu_buffer_alloc; |
488 |
if (s->flip < 0) |
489 |
s->edge_emu_buffer += 15 * stride;
|
490 |
|
491 |
return 0; |
492 |
} |
493 |
|
494 |
int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size, |
495 |
uint8_t *buf, int buf_size)
|
496 |
{ |
497 |
vp56_context_t *s = avctx->priv_data; |
498 |
AVFrame *const p = &s->frames[VP56_FRAME_CURRENT];
|
499 |
AVFrame *picture = data; |
500 |
int mb_row, mb_col, mb_row_flip, mb_offset = 0; |
501 |
int block, y, uv, stride_y, stride_uv;
|
502 |
int golden_frame = 0; |
503 |
int res;
|
504 |
|
505 |
res = s->parse_header(s, buf, buf_size, &golden_frame); |
506 |
if (!res)
|
507 |
return -1; |
508 |
|
509 |
p->reference = 1;
|
510 |
if (avctx->get_buffer(avctx, p) < 0) { |
511 |
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
512 |
return -1; |
513 |
} |
514 |
|
515 |
if (res == 2) |
516 |
if (vp56_size_changed(avctx, s)) {
|
517 |
avctx->release_buffer(avctx, p); |
518 |
return -1; |
519 |
} |
520 |
|
521 |
if (p->key_frame) {
|
522 |
p->pict_type = FF_I_TYPE; |
523 |
s->default_models_init(s); |
524 |
for (block=0; block<s->mb_height*s->mb_width; block++) |
525 |
s->macroblocks[block].type = VP56_MB_INTRA; |
526 |
} else {
|
527 |
p->pict_type = FF_P_TYPE; |
528 |
vp56_parse_mb_type_models(s); |
529 |
s->parse_vector_models(s); |
530 |
s->mb_type = VP56_MB_INTER_NOVEC_PF; |
531 |
} |
532 |
|
533 |
s->parse_coeff_models(s); |
534 |
|
535 |
memset(s->prev_dc, 0, sizeof(s->prev_dc)); |
536 |
s->prev_dc[1][VP56_FRAME_CURRENT] = 128; |
537 |
s->prev_dc[2][VP56_FRAME_CURRENT] = 128; |
538 |
|
539 |
for (block=0; block < 4*s->mb_width+6; block++) { |
540 |
s->above_blocks[block].ref_frame = -1;
|
541 |
s->above_blocks[block].dc_coeff = 0;
|
542 |
s->above_blocks[block].not_null_dc = 0;
|
543 |
} |
544 |
s->above_blocks[2*s->mb_width + 2].ref_frame = 0; |
545 |
s->above_blocks[3*s->mb_width + 4].ref_frame = 0; |
546 |
|
547 |
stride_y = p->linesize[0];
|
548 |
stride_uv = p->linesize[1];
|
549 |
|
550 |
if (s->flip < 0) |
551 |
mb_offset = 7;
|
552 |
|
553 |
/* main macroblocks loop */
|
554 |
for (mb_row=0; mb_row<s->mb_height; mb_row++) { |
555 |
if (s->flip < 0) |
556 |
mb_row_flip = s->mb_height - mb_row - 1;
|
557 |
else
|
558 |
mb_row_flip = mb_row; |
559 |
|
560 |
for (block=0; block<4; block++) { |
561 |
s->left_block[block].ref_frame = -1;
|
562 |
s->left_block[block].dc_coeff = 0;
|
563 |
s->left_block[block].not_null_dc = 0;
|
564 |
memset(s->coeff_ctx[block], 0, 64*sizeof(s->coeff_ctx[block][0])); |
565 |
} |
566 |
memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last)); |
567 |
|
568 |
s->above_block_idx[0] = 1; |
569 |
s->above_block_idx[1] = 2; |
570 |
s->above_block_idx[2] = 1; |
571 |
s->above_block_idx[3] = 2; |
572 |
s->above_block_idx[4] = 2*s->mb_width + 2 + 1; |
573 |
s->above_block_idx[5] = 3*s->mb_width + 4 + 1; |
574 |
|
575 |
s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
|
576 |
s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
|
577 |
s->block_offset[1] = s->block_offset[0] + 8; |
578 |
s->block_offset[3] = s->block_offset[2] + 8; |
579 |
s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv; |
580 |
s->block_offset[5] = s->block_offset[4]; |
581 |
|
582 |
for (mb_col=0; mb_col<s->mb_width; mb_col++) { |
583 |
vp56_decode_mb(s, mb_row, mb_col); |
584 |
|
585 |
for (y=0; y<4; y++) { |
586 |
s->above_block_idx[y] += 2;
|
587 |
s->block_offset[y] += 16;
|
588 |
} |
589 |
|
590 |
for (uv=4; uv<6; uv++) { |
591 |
s->above_block_idx[uv] += 1;
|
592 |
s->block_offset[uv] += 8;
|
593 |
} |
594 |
} |
595 |
} |
596 |
|
597 |
if (s->frames[VP56_FRAME_PREVIOUS].data[0] |
598 |
&& (s->frames[VP56_FRAME_PREVIOUS].data[0]
|
599 |
!= s->frames[VP56_FRAME_GOLDEN].data[0])) {
|
600 |
avctx->release_buffer(avctx, &s->frames[VP56_FRAME_PREVIOUS]); |
601 |
} |
602 |
if (p->key_frame || golden_frame) {
|
603 |
if (s->frames[VP56_FRAME_GOLDEN].data[0]) |
604 |
avctx->release_buffer(avctx, &s->frames[VP56_FRAME_GOLDEN]); |
605 |
s->frames[VP56_FRAME_GOLDEN] = *p; |
606 |
} |
607 |
s->frames[VP56_FRAME_PREVIOUS] = *p; |
608 |
|
609 |
*picture = *p; |
610 |
*data_size = sizeof(AVPicture);
|
611 |
|
612 |
s->frames[VP56_FRAME_CURRENT].data[0] = NULL; |
613 |
return buf_size;
|
614 |
} |
615 |
|
616 |
void vp56_init(vp56_context_t *s, AVCodecContext *avctx, int flip) |
617 |
{ |
618 |
int i;
|
619 |
|
620 |
s->avctx = avctx; |
621 |
avctx->pix_fmt = PIX_FMT_YUV420P; |
622 |
|
623 |
if (s->avctx->idct_algo == FF_IDCT_AUTO)
|
624 |
s->avctx->idct_algo = FF_IDCT_VP3; |
625 |
dsputil_init(&s->dsp, s->avctx); |
626 |
ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct); |
627 |
|
628 |
avcodec_set_dimensions(s->avctx, 0, 0); |
629 |
|
630 |
for (i=0; i<3; i++) |
631 |
s->frames[i].data[0] = NULL; |
632 |
s->edge_emu_buffer_alloc = NULL;
|
633 |
|
634 |
s->above_blocks = NULL;
|
635 |
s->macroblocks = NULL;
|
636 |
s->quantizer = -1;
|
637 |
s->deblock_filtering = 1;
|
638 |
|
639 |
s->filter = NULL;
|
640 |
|
641 |
if (flip) {
|
642 |
s->flip = -1;
|
643 |
s->frbi = 2;
|
644 |
s->srbi = 0;
|
645 |
} else {
|
646 |
s->flip = 1;
|
647 |
s->frbi = 0;
|
648 |
s->srbi = 2;
|
649 |
} |
650 |
} |
651 |
|
652 |
int vp56_free(AVCodecContext *avctx)
|
653 |
{ |
654 |
vp56_context_t *s = avctx->priv_data; |
655 |
|
656 |
av_free(s->above_blocks); |
657 |
av_free(s->macroblocks); |
658 |
av_free(s->edge_emu_buffer_alloc); |
659 |
if (s->frames[VP56_FRAME_GOLDEN].data[0] |
660 |
&& (s->frames[VP56_FRAME_PREVIOUS].data[0]
|
661 |
!= s->frames[VP56_FRAME_GOLDEN].data[0]))
|
662 |
avctx->release_buffer(avctx, &s->frames[VP56_FRAME_GOLDEN]); |
663 |
if (s->frames[VP56_FRAME_PREVIOUS].data[0]) |
664 |
avctx->release_buffer(avctx, &s->frames[VP56_FRAME_PREVIOUS]); |
665 |
return 0; |
666 |
} |