ffmpeg / libavcodec / vp3.c @ 5509bffa
History | View | Annotate | Download (104 KB)
1 |
/*
|
---|---|
2 |
* Copyright (C) 2003-2004 the ffmpeg project
|
3 |
*
|
4 |
* This library is free software; you can redistribute it and/or
|
5 |
* modify it under the terms of the GNU Lesser General Public
|
6 |
* License as published by the Free Software Foundation; either
|
7 |
* version 2 of the License, or (at your option) any later version.
|
8 |
*
|
9 |
* This library is distributed in the hope that it will be useful,
|
10 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
11 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
12 |
* Lesser General Public License for more details.
|
13 |
*
|
14 |
* You should have received a copy of the GNU Lesser General Public
|
15 |
* License along with this library; if not, write to the Free Software
|
16 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
17 |
*
|
18 |
*/
|
19 |
|
20 |
/**
|
21 |
* @file vp3.c
|
22 |
* On2 VP3 Video Decoder
|
23 |
*
|
24 |
* VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
|
25 |
* For more information about the VP3 coding process, visit:
|
26 |
* http://multimedia.cx/
|
27 |
*
|
28 |
* Theora decoder by Alex Beregszaszi
|
29 |
*/
|
30 |
|
31 |
#include <stdio.h> |
32 |
#include <stdlib.h> |
33 |
#include <string.h> |
34 |
#include <unistd.h> |
35 |
|
36 |
#include "common.h" |
37 |
#include "avcodec.h" |
38 |
#include "dsputil.h" |
39 |
#include "mpegvideo.h" |
40 |
|
41 |
#include "vp3data.h" |
42 |
|
43 |
#define FRAGMENT_PIXELS 8 |
44 |
|
45 |
/*
|
46 |
* Debugging Variables
|
47 |
*
|
48 |
* Define one or more of the following compile-time variables to 1 to obtain
|
49 |
* elaborate information about certain aspects of the decoding process.
|
50 |
*
|
51 |
* KEYFRAMES_ONLY: set this to 1 to only see keyframes (VP3 slideshow mode)
|
52 |
* DEBUG_VP3: high-level decoding flow
|
53 |
* DEBUG_INIT: initialization parameters
|
54 |
* DEBUG_DEQUANTIZERS: display how the dequanization tables are built
|
55 |
* DEBUG_BLOCK_CODING: unpacking the superblock/macroblock/fragment coding
|
56 |
* DEBUG_MODES: unpacking the coding modes for individual fragments
|
57 |
* DEBUG_VECTORS: display the motion vectors
|
58 |
* DEBUG_TOKEN: display exhaustive information about each DCT token
|
59 |
* DEBUG_VLC: display the VLCs as they are extracted from the stream
|
60 |
* DEBUG_DC_PRED: display the process of reversing DC prediction
|
61 |
* DEBUG_IDCT: show every detail of the IDCT process
|
62 |
*/
|
63 |
|
64 |
#define KEYFRAMES_ONLY 0 |
65 |
|
66 |
#define DEBUG_VP3 0 |
67 |
#define DEBUG_INIT 0 |
68 |
#define DEBUG_DEQUANTIZERS 0 |
69 |
#define DEBUG_BLOCK_CODING 0 |
70 |
#define DEBUG_MODES 0 |
71 |
#define DEBUG_VECTORS 0 |
72 |
#define DEBUG_TOKEN 0 |
73 |
#define DEBUG_VLC 0 |
74 |
#define DEBUG_DC_PRED 0 |
75 |
#define DEBUG_IDCT 0 |
76 |
|
77 |
#if DEBUG_VP3
|
78 |
#define debug_vp3(args...) av_log(NULL, AV_LOG_DEBUG, ## args) |
79 |
#else
|
80 |
static inline void debug_vp3(const char *format, ...) { } |
81 |
#endif
|
82 |
|
83 |
#if DEBUG_INIT
|
84 |
#define debug_init(args...) av_log(NULL, AV_LOG_DEBUG, ## args) |
85 |
#else
|
86 |
static inline void debug_init(const char *format, ...) { } |
87 |
#endif
|
88 |
|
89 |
#if DEBUG_DEQUANTIZERS
|
90 |
#define debug_dequantizers(args...) av_log(NULL, AV_LOG_DEBUG, ## args) |
91 |
#else
|
92 |
static inline void debug_dequantizers(const char *format, ...) { } |
93 |
#endif
|
94 |
|
95 |
#if DEBUG_BLOCK_CODING
|
96 |
#define debug_block_coding(args...) av_log(NULL, AV_LOG_DEBUG, ## args) |
97 |
#else
|
98 |
static inline void debug_block_coding(const char *format, ...) { } |
99 |
#endif
|
100 |
|
101 |
#if DEBUG_MODES
|
102 |
#define debug_modes(args...) av_log(NULL, AV_LOG_DEBUG, ## args) |
103 |
#else
|
104 |
static inline void debug_modes(const char *format, ...) { } |
105 |
#endif
|
106 |
|
107 |
#if DEBUG_VECTORS
|
108 |
#define debug_vectors(args...) av_log(NULL, AV_LOG_DEBUG, ## args) |
109 |
#else
|
110 |
static inline void debug_vectors(const char *format, ...) { } |
111 |
#endif
|
112 |
|
113 |
#if DEBUG_TOKEN
|
114 |
#define debug_token(args...) av_log(NULL, AV_LOG_DEBUG, ## args) |
115 |
#else
|
116 |
static inline void debug_token(const char *format, ...) { } |
117 |
#endif
|
118 |
|
119 |
#if DEBUG_VLC
|
120 |
#define debug_vlc(args...) av_log(NULL, AV_LOG_DEBUG, ## args) |
121 |
#else
|
122 |
static inline void debug_vlc(const char *format, ...) { } |
123 |
#endif
|
124 |
|
125 |
#if DEBUG_DC_PRED
|
126 |
#define debug_dc_pred(args...) av_log(NULL, AV_LOG_DEBUG, ## args) |
127 |
#else
|
128 |
static inline void debug_dc_pred(const char *format, ...) { } |
129 |
#endif
|
130 |
|
131 |
#if DEBUG_IDCT
|
132 |
#define debug_idct(args...) av_log(NULL, AV_LOG_DEBUG, ## args) |
133 |
#else
|
134 |
static inline void debug_idct(const char *format, ...) { } |
135 |
#endif
|
136 |
|
137 |
typedef struct Coeff { |
138 |
struct Coeff *next;
|
139 |
DCTELEM coeff; |
140 |
uint8_t index; |
141 |
} Coeff; |
142 |
|
143 |
//FIXME split things out into their own arrays
|
144 |
typedef struct Vp3Fragment { |
145 |
Coeff *next_coeff; |
146 |
/* address of first pixel taking into account which plane the fragment
|
147 |
* lives on as well as the plane stride */
|
148 |
int first_pixel;
|
149 |
/* this is the macroblock that the fragment belongs to */
|
150 |
uint16_t macroblock; |
151 |
uint8_t coding_method; |
152 |
uint8_t coeff_count; |
153 |
int8_t motion_x; |
154 |
int8_t motion_y; |
155 |
} Vp3Fragment; |
156 |
|
157 |
#define SB_NOT_CODED 0 |
158 |
#define SB_PARTIALLY_CODED 1 |
159 |
#define SB_FULLY_CODED 2 |
160 |
|
161 |
#define MODE_INTER_NO_MV 0 |
162 |
#define MODE_INTRA 1 |
163 |
#define MODE_INTER_PLUS_MV 2 |
164 |
#define MODE_INTER_LAST_MV 3 |
165 |
#define MODE_INTER_PRIOR_LAST 4 |
166 |
#define MODE_USING_GOLDEN 5 |
167 |
#define MODE_GOLDEN_MV 6 |
168 |
#define MODE_INTER_FOURMV 7 |
169 |
#define CODING_MODE_COUNT 8 |
170 |
|
171 |
/* special internal mode */
|
172 |
#define MODE_COPY 8 |
173 |
|
174 |
/* There are 6 preset schemes, plus a free-form scheme */
|
175 |
static int ModeAlphabet[7][CODING_MODE_COUNT] = |
176 |
{ |
177 |
/* this is the custom scheme */
|
178 |
{ 0, 0, 0, 0, 0, 0, 0, 0 }, |
179 |
|
180 |
/* scheme 1: Last motion vector dominates */
|
181 |
{ MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, |
182 |
MODE_INTER_PLUS_MV, MODE_INTER_NO_MV, |
183 |
MODE_INTRA, MODE_USING_GOLDEN, |
184 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
185 |
|
186 |
/* scheme 2 */
|
187 |
{ MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, |
188 |
MODE_INTER_NO_MV, MODE_INTER_PLUS_MV, |
189 |
MODE_INTRA, MODE_USING_GOLDEN, |
190 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
191 |
|
192 |
/* scheme 3 */
|
193 |
{ MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, |
194 |
MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV, |
195 |
MODE_INTRA, MODE_USING_GOLDEN, |
196 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
197 |
|
198 |
/* scheme 4 */
|
199 |
{ MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, |
200 |
MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST, |
201 |
MODE_INTRA, MODE_USING_GOLDEN, |
202 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
203 |
|
204 |
/* scheme 5: No motion vector dominates */
|
205 |
{ MODE_INTER_NO_MV, MODE_INTER_LAST_MV, |
206 |
MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV, |
207 |
MODE_INTRA, MODE_USING_GOLDEN, |
208 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
209 |
|
210 |
/* scheme 6 */
|
211 |
{ MODE_INTER_NO_MV, MODE_USING_GOLDEN, |
212 |
MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, |
213 |
MODE_INTER_PLUS_MV, MODE_INTRA, |
214 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
215 |
|
216 |
}; |
217 |
|
218 |
#define MIN_DEQUANT_VAL 2 |
219 |
|
220 |
typedef struct Vp3DecodeContext { |
221 |
AVCodecContext *avctx; |
222 |
int theora, theora_tables;
|
223 |
int version;
|
224 |
int width, height;
|
225 |
AVFrame golden_frame; |
226 |
AVFrame last_frame; |
227 |
AVFrame current_frame; |
228 |
int keyframe;
|
229 |
DSPContext dsp; |
230 |
int flipped_image;
|
231 |
|
232 |
int quality_index;
|
233 |
int last_quality_index;
|
234 |
|
235 |
int superblock_count;
|
236 |
int superblock_width;
|
237 |
int superblock_height;
|
238 |
int y_superblock_width;
|
239 |
int y_superblock_height;
|
240 |
int c_superblock_width;
|
241 |
int c_superblock_height;
|
242 |
int u_superblock_start;
|
243 |
int v_superblock_start;
|
244 |
unsigned char *superblock_coding; |
245 |
|
246 |
int macroblock_count;
|
247 |
int macroblock_width;
|
248 |
int macroblock_height;
|
249 |
|
250 |
int fragment_count;
|
251 |
int fragment_width;
|
252 |
int fragment_height;
|
253 |
|
254 |
Vp3Fragment *all_fragments; |
255 |
Coeff *coeffs; |
256 |
Coeff *next_coeff; |
257 |
int u_fragment_start;
|
258 |
int v_fragment_start;
|
259 |
|
260 |
ScanTable scantable; |
261 |
|
262 |
/* tables */
|
263 |
uint16_t coded_dc_scale_factor[64];
|
264 |
uint32_t coded_ac_scale_factor[64];
|
265 |
uint16_t coded_intra_y_dequant[64];
|
266 |
uint16_t coded_intra_c_dequant[64];
|
267 |
uint16_t coded_inter_dequant[64];
|
268 |
|
269 |
/* this is a list of indices into the all_fragments array indicating
|
270 |
* which of the fragments are coded */
|
271 |
int *coded_fragment_list;
|
272 |
int coded_fragment_list_index;
|
273 |
int pixel_addresses_inited;
|
274 |
|
275 |
VLC dc_vlc[16];
|
276 |
VLC ac_vlc_1[16];
|
277 |
VLC ac_vlc_2[16];
|
278 |
VLC ac_vlc_3[16];
|
279 |
VLC ac_vlc_4[16];
|
280 |
|
281 |
VLC superblock_run_length_vlc; |
282 |
VLC fragment_run_length_vlc; |
283 |
VLC mode_code_vlc; |
284 |
VLC motion_vector_vlc; |
285 |
|
286 |
/* these arrays need to be on 16-byte boundaries since SSE2 operations
|
287 |
* index into them */
|
288 |
int16_t __align16 intra_y_dequant[64];
|
289 |
int16_t __align16 intra_c_dequant[64];
|
290 |
int16_t __align16 inter_dequant[64];
|
291 |
|
292 |
/* This table contains superblock_count * 16 entries. Each set of 16
|
293 |
* numbers corresponds to the fragment indices 0..15 of the superblock.
|
294 |
* An entry will be -1 to indicate that no entry corresponds to that
|
295 |
* index. */
|
296 |
int *superblock_fragments;
|
297 |
|
298 |
/* This table contains superblock_count * 4 entries. Each set of 4
|
299 |
* numbers corresponds to the macroblock indices 0..3 of the superblock.
|
300 |
* An entry will be -1 to indicate that no entry corresponds to that
|
301 |
* index. */
|
302 |
int *superblock_macroblocks;
|
303 |
|
304 |
/* This table contains macroblock_count * 6 entries. Each set of 6
|
305 |
* numbers corresponds to the fragment indices 0..5 which comprise
|
306 |
* the macroblock (4 Y fragments and 2 C fragments). */
|
307 |
int *macroblock_fragments;
|
308 |
/* This is an array that indicates how a particular macroblock
|
309 |
* is coded. */
|
310 |
unsigned char *macroblock_coding; |
311 |
|
312 |
int first_coded_y_fragment;
|
313 |
int first_coded_c_fragment;
|
314 |
int last_coded_y_fragment;
|
315 |
int last_coded_c_fragment;
|
316 |
|
317 |
uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc |
318 |
uint8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16 |
319 |
|
320 |
/* Huffman decode */
|
321 |
int hti;
|
322 |
unsigned int hbits; |
323 |
int entries;
|
324 |
int huff_code_size;
|
325 |
uint16_t huffman_table[80][32][2]; |
326 |
|
327 |
uint32_t filter_limit_values[64];
|
328 |
int bounding_values_array[256]; |
329 |
} Vp3DecodeContext; |
330 |
|
331 |
static int theora_decode_comments(AVCodecContext *avctx, GetBitContext gb); |
332 |
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext gb); |
333 |
|
334 |
/************************************************************************
|
335 |
* VP3 specific functions
|
336 |
************************************************************************/
|
337 |
|
338 |
/*
|
339 |
* This function sets up all of the various blocks mappings:
|
340 |
* superblocks <-> fragments, macroblocks <-> fragments,
|
341 |
* superblocks <-> macroblocks
|
342 |
*
|
343 |
* Returns 0 is successful; returns 1 if *anything* went wrong.
|
344 |
*/
|
345 |
static int init_block_mapping(Vp3DecodeContext *s) |
346 |
{ |
347 |
int i, j;
|
348 |
signed int hilbert_walk_y[16]; |
349 |
signed int hilbert_walk_c[16]; |
350 |
signed int hilbert_walk_mb[4]; |
351 |
|
352 |
int current_fragment = 0; |
353 |
int current_width = 0; |
354 |
int current_height = 0; |
355 |
int right_edge = 0; |
356 |
int bottom_edge = 0; |
357 |
int superblock_row_inc = 0; |
358 |
int *hilbert = NULL; |
359 |
int mapping_index = 0; |
360 |
|
361 |
int current_macroblock;
|
362 |
int c_fragment;
|
363 |
|
364 |
signed char travel_width[16] = { |
365 |
1, 1, 0, -1, |
366 |
0, 0, 1, 0, |
367 |
1, 0, 1, 0, |
368 |
0, -1, 0, 1 |
369 |
}; |
370 |
|
371 |
signed char travel_height[16] = { |
372 |
0, 0, 1, 0, |
373 |
1, 1, 0, -1, |
374 |
0, 1, 0, -1, |
375 |
-1, 0, -1, 0 |
376 |
}; |
377 |
|
378 |
signed char travel_width_mb[4] = { |
379 |
1, 0, 1, 0 |
380 |
}; |
381 |
|
382 |
signed char travel_height_mb[4] = { |
383 |
0, 1, 0, -1 |
384 |
}; |
385 |
|
386 |
debug_vp3(" vp3: initialize block mapping tables\n");
|
387 |
|
388 |
/* figure out hilbert pattern per these frame dimensions */
|
389 |
hilbert_walk_y[0] = 1; |
390 |
hilbert_walk_y[1] = 1; |
391 |
hilbert_walk_y[2] = s->fragment_width;
|
392 |
hilbert_walk_y[3] = -1; |
393 |
hilbert_walk_y[4] = s->fragment_width;
|
394 |
hilbert_walk_y[5] = s->fragment_width;
|
395 |
hilbert_walk_y[6] = 1; |
396 |
hilbert_walk_y[7] = -s->fragment_width;
|
397 |
hilbert_walk_y[8] = 1; |
398 |
hilbert_walk_y[9] = s->fragment_width;
|
399 |
hilbert_walk_y[10] = 1; |
400 |
hilbert_walk_y[11] = -s->fragment_width;
|
401 |
hilbert_walk_y[12] = -s->fragment_width;
|
402 |
hilbert_walk_y[13] = -1; |
403 |
hilbert_walk_y[14] = -s->fragment_width;
|
404 |
hilbert_walk_y[15] = 1; |
405 |
|
406 |
hilbert_walk_c[0] = 1; |
407 |
hilbert_walk_c[1] = 1; |
408 |
hilbert_walk_c[2] = s->fragment_width / 2; |
409 |
hilbert_walk_c[3] = -1; |
410 |
hilbert_walk_c[4] = s->fragment_width / 2; |
411 |
hilbert_walk_c[5] = s->fragment_width / 2; |
412 |
hilbert_walk_c[6] = 1; |
413 |
hilbert_walk_c[7] = -s->fragment_width / 2; |
414 |
hilbert_walk_c[8] = 1; |
415 |
hilbert_walk_c[9] = s->fragment_width / 2; |
416 |
hilbert_walk_c[10] = 1; |
417 |
hilbert_walk_c[11] = -s->fragment_width / 2; |
418 |
hilbert_walk_c[12] = -s->fragment_width / 2; |
419 |
hilbert_walk_c[13] = -1; |
420 |
hilbert_walk_c[14] = -s->fragment_width / 2; |
421 |
hilbert_walk_c[15] = 1; |
422 |
|
423 |
hilbert_walk_mb[0] = 1; |
424 |
hilbert_walk_mb[1] = s->macroblock_width;
|
425 |
hilbert_walk_mb[2] = 1; |
426 |
hilbert_walk_mb[3] = -s->macroblock_width;
|
427 |
|
428 |
/* iterate through each superblock (all planes) and map the fragments */
|
429 |
for (i = 0; i < s->superblock_count; i++) { |
430 |
debug_init(" superblock %d (u starts @ %d, v starts @ %d)\n",
|
431 |
i, s->u_superblock_start, s->v_superblock_start); |
432 |
|
433 |
/* time to re-assign the limits? */
|
434 |
if (i == 0) { |
435 |
|
436 |
/* start of Y superblocks */
|
437 |
right_edge = s->fragment_width; |
438 |
bottom_edge = s->fragment_height; |
439 |
current_width = -1;
|
440 |
current_height = 0;
|
441 |
superblock_row_inc = 3 * s->fragment_width -
|
442 |
(s->y_superblock_width * 4 - s->fragment_width);
|
443 |
hilbert = hilbert_walk_y; |
444 |
|
445 |
/* the first operation for this variable is to advance by 1 */
|
446 |
current_fragment = -1;
|
447 |
|
448 |
} else if (i == s->u_superblock_start) { |
449 |
|
450 |
/* start of U superblocks */
|
451 |
right_edge = s->fragment_width / 2;
|
452 |
bottom_edge = s->fragment_height / 2;
|
453 |
current_width = -1;
|
454 |
current_height = 0;
|
455 |
superblock_row_inc = 3 * (s->fragment_width / 2) - |
456 |
(s->c_superblock_width * 4 - s->fragment_width / 2); |
457 |
hilbert = hilbert_walk_c; |
458 |
|
459 |
/* the first operation for this variable is to advance by 1 */
|
460 |
current_fragment = s->u_fragment_start - 1;
|
461 |
|
462 |
} else if (i == s->v_superblock_start) { |
463 |
|
464 |
/* start of V superblocks */
|
465 |
right_edge = s->fragment_width / 2;
|
466 |
bottom_edge = s->fragment_height / 2;
|
467 |
current_width = -1;
|
468 |
current_height = 0;
|
469 |
superblock_row_inc = 3 * (s->fragment_width / 2) - |
470 |
(s->c_superblock_width * 4 - s->fragment_width / 2); |
471 |
hilbert = hilbert_walk_c; |
472 |
|
473 |
/* the first operation for this variable is to advance by 1 */
|
474 |
current_fragment = s->v_fragment_start - 1;
|
475 |
|
476 |
} |
477 |
|
478 |
if (current_width >= right_edge - 1) { |
479 |
/* reset width and move to next superblock row */
|
480 |
current_width = -1;
|
481 |
current_height += 4;
|
482 |
|
483 |
/* fragment is now at the start of a new superblock row */
|
484 |
current_fragment += superblock_row_inc; |
485 |
} |
486 |
|
487 |
/* iterate through all 16 fragments in a superblock */
|
488 |
for (j = 0; j < 16; j++) { |
489 |
current_fragment += hilbert[j]; |
490 |
current_width += travel_width[j]; |
491 |
current_height += travel_height[j]; |
492 |
|
493 |
/* check if the fragment is in bounds */
|
494 |
if ((current_width < right_edge) &&
|
495 |
(current_height < bottom_edge)) { |
496 |
s->superblock_fragments[mapping_index] = current_fragment; |
497 |
debug_init(" mapping fragment %d to superblock %d, position %d (%d/%d x %d/%d)\n",
|
498 |
s->superblock_fragments[mapping_index], i, j, |
499 |
current_width, right_edge, current_height, bottom_edge); |
500 |
} else {
|
501 |
s->superblock_fragments[mapping_index] = -1;
|
502 |
debug_init(" superblock %d, position %d has no fragment (%d/%d x %d/%d)\n",
|
503 |
i, j, |
504 |
current_width, right_edge, current_height, bottom_edge); |
505 |
} |
506 |
|
507 |
mapping_index++; |
508 |
} |
509 |
} |
510 |
|
511 |
/* initialize the superblock <-> macroblock mapping; iterate through
|
512 |
* all of the Y plane superblocks to build this mapping */
|
513 |
right_edge = s->macroblock_width; |
514 |
bottom_edge = s->macroblock_height; |
515 |
current_width = -1;
|
516 |
current_height = 0;
|
517 |
superblock_row_inc = s->macroblock_width - |
518 |
(s->y_superblock_width * 2 - s->macroblock_width);;
|
519 |
hilbert = hilbert_walk_mb; |
520 |
mapping_index = 0;
|
521 |
current_macroblock = -1;
|
522 |
for (i = 0; i < s->u_superblock_start; i++) { |
523 |
|
524 |
if (current_width >= right_edge - 1) { |
525 |
/* reset width and move to next superblock row */
|
526 |
current_width = -1;
|
527 |
current_height += 2;
|
528 |
|
529 |
/* macroblock is now at the start of a new superblock row */
|
530 |
current_macroblock += superblock_row_inc; |
531 |
} |
532 |
|
533 |
/* iterate through each potential macroblock in the superblock */
|
534 |
for (j = 0; j < 4; j++) { |
535 |
current_macroblock += hilbert_walk_mb[j]; |
536 |
current_width += travel_width_mb[j]; |
537 |
current_height += travel_height_mb[j]; |
538 |
|
539 |
/* check if the macroblock is in bounds */
|
540 |
if ((current_width < right_edge) &&
|
541 |
(current_height < bottom_edge)) { |
542 |
s->superblock_macroblocks[mapping_index] = current_macroblock; |
543 |
debug_init(" mapping macroblock %d to superblock %d, position %d (%d/%d x %d/%d)\n",
|
544 |
s->superblock_macroblocks[mapping_index], i, j, |
545 |
current_width, right_edge, current_height, bottom_edge); |
546 |
} else {
|
547 |
s->superblock_macroblocks[mapping_index] = -1;
|
548 |
debug_init(" superblock %d, position %d has no macroblock (%d/%d x %d/%d)\n",
|
549 |
i, j, |
550 |
current_width, right_edge, current_height, bottom_edge); |
551 |
} |
552 |
|
553 |
mapping_index++; |
554 |
} |
555 |
} |
556 |
|
557 |
/* initialize the macroblock <-> fragment mapping */
|
558 |
current_fragment = 0;
|
559 |
current_macroblock = 0;
|
560 |
mapping_index = 0;
|
561 |
for (i = 0; i < s->fragment_height; i += 2) { |
562 |
|
563 |
for (j = 0; j < s->fragment_width; j += 2) { |
564 |
|
565 |
debug_init(" macroblock %d contains fragments: ", current_macroblock);
|
566 |
s->all_fragments[current_fragment].macroblock = current_macroblock; |
567 |
s->macroblock_fragments[mapping_index++] = current_fragment; |
568 |
debug_init("%d ", current_fragment);
|
569 |
|
570 |
if (j + 1 < s->fragment_width) { |
571 |
s->all_fragments[current_fragment + 1].macroblock = current_macroblock;
|
572 |
s->macroblock_fragments[mapping_index++] = current_fragment + 1;
|
573 |
debug_init("%d ", current_fragment + 1); |
574 |
} else
|
575 |
s->macroblock_fragments[mapping_index++] = -1;
|
576 |
|
577 |
if (i + 1 < s->fragment_height) { |
578 |
s->all_fragments[current_fragment + s->fragment_width].macroblock = |
579 |
current_macroblock; |
580 |
s->macroblock_fragments[mapping_index++] = |
581 |
current_fragment + s->fragment_width; |
582 |
debug_init("%d ", current_fragment + s->fragment_width);
|
583 |
} else
|
584 |
s->macroblock_fragments[mapping_index++] = -1;
|
585 |
|
586 |
if ((j + 1 < s->fragment_width) && (i + 1 < s->fragment_height)) { |
587 |
s->all_fragments[current_fragment + s->fragment_width + 1].macroblock =
|
588 |
current_macroblock; |
589 |
s->macroblock_fragments[mapping_index++] = |
590 |
current_fragment + s->fragment_width + 1;
|
591 |
debug_init("%d ", current_fragment + s->fragment_width + 1); |
592 |
} else
|
593 |
s->macroblock_fragments[mapping_index++] = -1;
|
594 |
|
595 |
/* C planes */
|
596 |
c_fragment = s->u_fragment_start + |
597 |
(i * s->fragment_width / 4) + (j / 2); |
598 |
s->all_fragments[c_fragment].macroblock = s->macroblock_count; |
599 |
s->macroblock_fragments[mapping_index++] = c_fragment; |
600 |
debug_init("%d ", c_fragment);
|
601 |
|
602 |
c_fragment = s->v_fragment_start + |
603 |
(i * s->fragment_width / 4) + (j / 2); |
604 |
s->all_fragments[c_fragment].macroblock = s->macroblock_count; |
605 |
s->macroblock_fragments[mapping_index++] = c_fragment; |
606 |
debug_init("%d ", c_fragment);
|
607 |
|
608 |
debug_init("\n");
|
609 |
|
610 |
if (j + 2 <= s->fragment_width) |
611 |
current_fragment += 2;
|
612 |
else
|
613 |
current_fragment++; |
614 |
current_macroblock++; |
615 |
} |
616 |
|
617 |
current_fragment += s->fragment_width; |
618 |
} |
619 |
|
620 |
return 0; /* successful path out */ |
621 |
} |
622 |
|
623 |
/*
|
624 |
* This function wipes out all of the fragment data.
|
625 |
*/
|
626 |
static void init_frame(Vp3DecodeContext *s, GetBitContext *gb) |
627 |
{ |
628 |
int i;
|
629 |
|
630 |
/* zero out all of the fragment information */
|
631 |
s->coded_fragment_list_index = 0;
|
632 |
for (i = 0; i < s->fragment_count; i++) { |
633 |
s->all_fragments[i].coeff_count = 0;
|
634 |
s->all_fragments[i].motion_x = 127;
|
635 |
s->all_fragments[i].motion_y = 127;
|
636 |
s->all_fragments[i].next_coeff= NULL;
|
637 |
s->coeffs[i].index= |
638 |
s->coeffs[i].coeff=0;
|
639 |
s->coeffs[i].next= NULL;
|
640 |
} |
641 |
} |
642 |
|
643 |
/*
|
644 |
* This function sets up the dequantization tables used for a particular
|
645 |
* frame.
|
646 |
*/
|
647 |
static void init_dequantizer(Vp3DecodeContext *s) |
648 |
{ |
649 |
|
650 |
int ac_scale_factor = s->coded_ac_scale_factor[s->quality_index];
|
651 |
int dc_scale_factor = s->coded_dc_scale_factor[s->quality_index];
|
652 |
int i, j;
|
653 |
|
654 |
debug_vp3(" vp3: initializing dequantization tables\n");
|
655 |
|
656 |
/*
|
657 |
* Scale dequantizers:
|
658 |
*
|
659 |
* quantizer * sf
|
660 |
* --------------
|
661 |
* 100
|
662 |
*
|
663 |
* where sf = dc_scale_factor for DC quantizer
|
664 |
* or ac_scale_factor for AC quantizer
|
665 |
*
|
666 |
* Then, saturate the result to a lower limit of MIN_DEQUANT_VAL.
|
667 |
*/
|
668 |
#define SCALER 4 |
669 |
|
670 |
/* scale DC quantizers */
|
671 |
s->intra_y_dequant[0] = s->coded_intra_y_dequant[0] * dc_scale_factor / 100; |
672 |
if (s->intra_y_dequant[0] < MIN_DEQUANT_VAL * 2) |
673 |
s->intra_y_dequant[0] = MIN_DEQUANT_VAL * 2; |
674 |
s->intra_y_dequant[0] *= SCALER;
|
675 |
|
676 |
s->intra_c_dequant[0] = s->coded_intra_c_dequant[0] * dc_scale_factor / 100; |
677 |
if (s->intra_c_dequant[0] < MIN_DEQUANT_VAL * 2) |
678 |
s->intra_c_dequant[0] = MIN_DEQUANT_VAL * 2; |
679 |
s->intra_c_dequant[0] *= SCALER;
|
680 |
|
681 |
s->inter_dequant[0] = s->coded_inter_dequant[0] * dc_scale_factor / 100; |
682 |
if (s->inter_dequant[0] < MIN_DEQUANT_VAL * 4) |
683 |
s->inter_dequant[0] = MIN_DEQUANT_VAL * 4; |
684 |
s->inter_dequant[0] *= SCALER;
|
685 |
|
686 |
/* scale AC quantizers, zigzag at the same time in preparation for
|
687 |
* the dequantization phase */
|
688 |
for (i = 1; i < 64; i++) { |
689 |
int k= s->scantable.scantable[i];
|
690 |
j = s->scantable.permutated[i]; |
691 |
|
692 |
s->intra_y_dequant[j] = s->coded_intra_y_dequant[k] * ac_scale_factor / 100;
|
693 |
if (s->intra_y_dequant[j] < MIN_DEQUANT_VAL)
|
694 |
s->intra_y_dequant[j] = MIN_DEQUANT_VAL; |
695 |
s->intra_y_dequant[j] *= SCALER; |
696 |
|
697 |
s->intra_c_dequant[j] = s->coded_intra_c_dequant[k] * ac_scale_factor / 100;
|
698 |
if (s->intra_c_dequant[j] < MIN_DEQUANT_VAL)
|
699 |
s->intra_c_dequant[j] = MIN_DEQUANT_VAL; |
700 |
s->intra_c_dequant[j] *= SCALER; |
701 |
|
702 |
s->inter_dequant[j] = s->coded_inter_dequant[k] * ac_scale_factor / 100;
|
703 |
if (s->inter_dequant[j] < MIN_DEQUANT_VAL * 2) |
704 |
s->inter_dequant[j] = MIN_DEQUANT_VAL * 2;
|
705 |
s->inter_dequant[j] *= SCALER; |
706 |
} |
707 |
|
708 |
memset(s->qscale_table, (FFMAX(s->intra_y_dequant[1], s->intra_c_dequant[1])+8)/16, 512); //FIXME finetune |
709 |
|
710 |
/* print debug information as requested */
|
711 |
debug_dequantizers("intra Y dequantizers:\n");
|
712 |
for (i = 0; i < 8; i++) { |
713 |
for (j = i * 8; j < i * 8 + 8; j++) { |
714 |
debug_dequantizers(" %4d,", s->intra_y_dequant[j]);
|
715 |
} |
716 |
debug_dequantizers("\n");
|
717 |
} |
718 |
debug_dequantizers("\n");
|
719 |
|
720 |
debug_dequantizers("intra C dequantizers:\n");
|
721 |
for (i = 0; i < 8; i++) { |
722 |
for (j = i * 8; j < i * 8 + 8; j++) { |
723 |
debug_dequantizers(" %4d,", s->intra_c_dequant[j]);
|
724 |
} |
725 |
debug_dequantizers("\n");
|
726 |
} |
727 |
debug_dequantizers("\n");
|
728 |
|
729 |
debug_dequantizers("interframe dequantizers:\n");
|
730 |
for (i = 0; i < 8; i++) { |
731 |
for (j = i * 8; j < i * 8 + 8; j++) { |
732 |
debug_dequantizers(" %4d,", s->inter_dequant[j]);
|
733 |
} |
734 |
debug_dequantizers("\n");
|
735 |
} |
736 |
debug_dequantizers("\n");
|
737 |
} |
738 |
|
739 |
/*
|
740 |
* This function initializes the loop filter boundary limits if the frame's
|
741 |
* quality index is different from the previous frame's.
|
742 |
*/
|
743 |
static void init_loop_filter(Vp3DecodeContext *s) |
744 |
{ |
745 |
int *bounding_values= s->bounding_values_array+127; |
746 |
int filter_limit;
|
747 |
int x;
|
748 |
|
749 |
filter_limit = s->filter_limit_values[s->quality_index]; |
750 |
|
751 |
/* set up the bounding values */
|
752 |
memset(s->bounding_values_array, 0, 256 * sizeof(int)); |
753 |
for (x = 0; x < filter_limit; x++) { |
754 |
bounding_values[-x - filter_limit] = -filter_limit + x; |
755 |
bounding_values[-x] = -x; |
756 |
bounding_values[x] = x; |
757 |
bounding_values[x + filter_limit] = filter_limit - x; |
758 |
} |
759 |
} |
760 |
|
761 |
/*
|
762 |
* This function unpacks all of the superblock/macroblock/fragment coding
|
763 |
* information from the bitstream.
|
764 |
*/
|
765 |
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) |
766 |
{ |
767 |
int bit = 0; |
768 |
int current_superblock = 0; |
769 |
int current_run = 0; |
770 |
int decode_fully_flags = 0; |
771 |
int decode_partial_blocks = 0; |
772 |
int first_c_fragment_seen;
|
773 |
|
774 |
int i, j;
|
775 |
int current_fragment;
|
776 |
|
777 |
debug_vp3(" vp3: unpacking superblock coding\n");
|
778 |
|
779 |
if (s->keyframe) {
|
780 |
|
781 |
debug_vp3(" keyframe-- all superblocks are fully coded\n");
|
782 |
memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count); |
783 |
|
784 |
} else {
|
785 |
|
786 |
/* unpack the list of partially-coded superblocks */
|
787 |
bit = get_bits(gb, 1);
|
788 |
/* toggle the bit because as soon as the first run length is
|
789 |
* fetched the bit will be toggled again */
|
790 |
bit ^= 1;
|
791 |
while (current_superblock < s->superblock_count) {
|
792 |
if (current_run-- == 0) { |
793 |
bit ^= 1;
|
794 |
current_run = get_vlc2(gb, |
795 |
s->superblock_run_length_vlc.table, 6, 2); |
796 |
if (current_run == 33) |
797 |
current_run += get_bits(gb, 12);
|
798 |
debug_block_coding(" setting superblocks %d..%d to %s\n",
|
799 |
current_superblock, |
800 |
current_superblock + current_run - 1,
|
801 |
(bit) ? "partially coded" : "not coded"); |
802 |
|
803 |
/* if any of the superblocks are not partially coded, flag
|
804 |
* a boolean to decode the list of fully-coded superblocks */
|
805 |
if (bit == 0) { |
806 |
decode_fully_flags = 1;
|
807 |
} else {
|
808 |
|
809 |
/* make a note of the fact that there are partially coded
|
810 |
* superblocks */
|
811 |
decode_partial_blocks = 1;
|
812 |
} |
813 |
} |
814 |
s->superblock_coding[current_superblock++] = bit; |
815 |
} |
816 |
|
817 |
/* unpack the list of fully coded superblocks if any of the blocks were
|
818 |
* not marked as partially coded in the previous step */
|
819 |
if (decode_fully_flags) {
|
820 |
|
821 |
current_superblock = 0;
|
822 |
current_run = 0;
|
823 |
bit = get_bits(gb, 1);
|
824 |
/* toggle the bit because as soon as the first run length is
|
825 |
* fetched the bit will be toggled again */
|
826 |
bit ^= 1;
|
827 |
while (current_superblock < s->superblock_count) {
|
828 |
|
829 |
/* skip any superblocks already marked as partially coded */
|
830 |
if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
|
831 |
|
832 |
if (current_run-- == 0) { |
833 |
bit ^= 1;
|
834 |
current_run = get_vlc2(gb, |
835 |
s->superblock_run_length_vlc.table, 6, 2); |
836 |
if (current_run == 33) |
837 |
current_run += get_bits(gb, 12);
|
838 |
} |
839 |
|
840 |
debug_block_coding(" setting superblock %d to %s\n",
|
841 |
current_superblock, |
842 |
(bit) ? "fully coded" : "not coded"); |
843 |
s->superblock_coding[current_superblock] = 2*bit;
|
844 |
} |
845 |
current_superblock++; |
846 |
} |
847 |
} |
848 |
|
849 |
/* if there were partial blocks, initialize bitstream for
|
850 |
* unpacking fragment codings */
|
851 |
if (decode_partial_blocks) {
|
852 |
|
853 |
current_run = 0;
|
854 |
bit = get_bits(gb, 1);
|
855 |
/* toggle the bit because as soon as the first run length is
|
856 |
* fetched the bit will be toggled again */
|
857 |
bit ^= 1;
|
858 |
} |
859 |
} |
860 |
|
861 |
/* figure out which fragments are coded; iterate through each
|
862 |
* superblock (all planes) */
|
863 |
s->coded_fragment_list_index = 0;
|
864 |
s->next_coeff= s->coeffs + s->fragment_count; |
865 |
s->first_coded_y_fragment = s->first_coded_c_fragment = 0;
|
866 |
s->last_coded_y_fragment = s->last_coded_c_fragment = -1;
|
867 |
first_c_fragment_seen = 0;
|
868 |
memset(s->macroblock_coding, MODE_COPY, s->macroblock_count); |
869 |
for (i = 0; i < s->superblock_count; i++) { |
870 |
|
871 |
/* iterate through all 16 fragments in a superblock */
|
872 |
for (j = 0; j < 16; j++) { |
873 |
|
874 |
/* if the fragment is in bounds, check its coding status */
|
875 |
current_fragment = s->superblock_fragments[i * 16 + j];
|
876 |
if (current_fragment >= s->fragment_count) {
|
877 |
av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n",
|
878 |
current_fragment, s->fragment_count); |
879 |
return 1; |
880 |
} |
881 |
if (current_fragment != -1) { |
882 |
if (s->superblock_coding[i] == SB_NOT_CODED) {
|
883 |
|
884 |
/* copy all the fragments from the prior frame */
|
885 |
s->all_fragments[current_fragment].coding_method = |
886 |
MODE_COPY; |
887 |
|
888 |
} else if (s->superblock_coding[i] == SB_PARTIALLY_CODED) { |
889 |
|
890 |
/* fragment may or may not be coded; this is the case
|
891 |
* that cares about the fragment coding runs */
|
892 |
if (current_run-- == 0) { |
893 |
bit ^= 1;
|
894 |
current_run = get_vlc2(gb, |
895 |
s->fragment_run_length_vlc.table, 5, 2); |
896 |
} |
897 |
|
898 |
if (bit) {
|
899 |
/* default mode; actual mode will be decoded in
|
900 |
* the next phase */
|
901 |
s->all_fragments[current_fragment].coding_method = |
902 |
MODE_INTER_NO_MV; |
903 |
s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment; |
904 |
s->coded_fragment_list[s->coded_fragment_list_index] = |
905 |
current_fragment; |
906 |
if ((current_fragment >= s->u_fragment_start) &&
|
907 |
(s->last_coded_y_fragment == -1) &&
|
908 |
(!first_c_fragment_seen)) { |
909 |
s->first_coded_c_fragment = s->coded_fragment_list_index; |
910 |
s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
|
911 |
first_c_fragment_seen = 1;
|
912 |
} |
913 |
s->coded_fragment_list_index++; |
914 |
s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV; |
915 |
debug_block_coding(" superblock %d is partially coded, fragment %d is coded\n",
|
916 |
i, current_fragment); |
917 |
} else {
|
918 |
/* not coded; copy this fragment from the prior frame */
|
919 |
s->all_fragments[current_fragment].coding_method = |
920 |
MODE_COPY; |
921 |
debug_block_coding(" superblock %d is partially coded, fragment %d is not coded\n",
|
922 |
i, current_fragment); |
923 |
} |
924 |
|
925 |
} else {
|
926 |
|
927 |
/* fragments are fully coded in this superblock; actual
|
928 |
* coding will be determined in next step */
|
929 |
s->all_fragments[current_fragment].coding_method = |
930 |
MODE_INTER_NO_MV; |
931 |
s->all_fragments[current_fragment].next_coeff= s->coeffs + current_fragment; |
932 |
s->coded_fragment_list[s->coded_fragment_list_index] = |
933 |
current_fragment; |
934 |
if ((current_fragment >= s->u_fragment_start) &&
|
935 |
(s->last_coded_y_fragment == -1) &&
|
936 |
(!first_c_fragment_seen)) { |
937 |
s->first_coded_c_fragment = s->coded_fragment_list_index; |
938 |
s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
|
939 |
first_c_fragment_seen = 1;
|
940 |
} |
941 |
s->coded_fragment_list_index++; |
942 |
s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV; |
943 |
debug_block_coding(" superblock %d is fully coded, fragment %d is coded\n",
|
944 |
i, current_fragment); |
945 |
} |
946 |
} |
947 |
} |
948 |
} |
949 |
|
950 |
if (!first_c_fragment_seen)
|
951 |
/* only Y fragments coded in this frame */
|
952 |
s->last_coded_y_fragment = s->coded_fragment_list_index - 1;
|
953 |
else
|
954 |
/* end the list of coded C fragments */
|
955 |
s->last_coded_c_fragment = s->coded_fragment_list_index - 1;
|
956 |
|
957 |
debug_block_coding(" %d total coded fragments, y: %d -> %d, c: %d -> %d\n",
|
958 |
s->coded_fragment_list_index, |
959 |
s->first_coded_y_fragment, |
960 |
s->last_coded_y_fragment, |
961 |
s->first_coded_c_fragment, |
962 |
s->last_coded_c_fragment); |
963 |
|
964 |
return 0; |
965 |
} |
966 |
|
967 |
/*
|
968 |
* This function unpacks all the coding mode data for individual macroblocks
|
969 |
* from the bitstream.
|
970 |
*/
|
971 |
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) |
972 |
{ |
973 |
int i, j, k;
|
974 |
int scheme;
|
975 |
int current_macroblock;
|
976 |
int current_fragment;
|
977 |
int coding_mode;
|
978 |
|
979 |
debug_vp3(" vp3: unpacking encoding modes\n");
|
980 |
|
981 |
if (s->keyframe) {
|
982 |
debug_vp3(" keyframe-- all blocks are coded as INTRA\n");
|
983 |
|
984 |
for (i = 0; i < s->fragment_count; i++) |
985 |
s->all_fragments[i].coding_method = MODE_INTRA; |
986 |
|
987 |
} else {
|
988 |
|
989 |
/* fetch the mode coding scheme for this frame */
|
990 |
scheme = get_bits(gb, 3);
|
991 |
debug_modes(" using mode alphabet %d\n", scheme);
|
992 |
|
993 |
/* is it a custom coding scheme? */
|
994 |
if (scheme == 0) { |
995 |
debug_modes(" custom mode alphabet ahead:\n");
|
996 |
for (i = 0; i < 8; i++) |
997 |
ModeAlphabet[scheme][get_bits(gb, 3)] = i;
|
998 |
} |
999 |
|
1000 |
for (i = 0; i < 8; i++) |
1001 |
debug_modes(" mode[%d][%d] = %d\n", scheme, i,
|
1002 |
ModeAlphabet[scheme][i]); |
1003 |
|
1004 |
/* iterate through all of the macroblocks that contain 1 or more
|
1005 |
* coded fragments */
|
1006 |
for (i = 0; i < s->u_superblock_start; i++) { |
1007 |
|
1008 |
for (j = 0; j < 4; j++) { |
1009 |
current_macroblock = s->superblock_macroblocks[i * 4 + j];
|
1010 |
if ((current_macroblock == -1) || |
1011 |
(s->macroblock_coding[current_macroblock] == MODE_COPY)) |
1012 |
continue;
|
1013 |
if (current_macroblock >= s->macroblock_count) {
|
1014 |
av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad macroblock number (%d >= %d)\n",
|
1015 |
current_macroblock, s->macroblock_count); |
1016 |
return 1; |
1017 |
} |
1018 |
|
1019 |
/* mode 7 means get 3 bits for each coding mode */
|
1020 |
if (scheme == 7) |
1021 |
coding_mode = get_bits(gb, 3);
|
1022 |
else
|
1023 |
coding_mode = ModeAlphabet[scheme] |
1024 |
[get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; |
1025 |
|
1026 |
s->macroblock_coding[current_macroblock] = coding_mode; |
1027 |
for (k = 0; k < 6; k++) { |
1028 |
current_fragment = |
1029 |
s->macroblock_fragments[current_macroblock * 6 + k];
|
1030 |
if (current_fragment == -1) |
1031 |
continue;
|
1032 |
if (current_fragment >= s->fragment_count) {
|
1033 |
av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad fragment number (%d >= %d)\n",
|
1034 |
current_fragment, s->fragment_count); |
1035 |
return 1; |
1036 |
} |
1037 |
if (s->all_fragments[current_fragment].coding_method !=
|
1038 |
MODE_COPY) |
1039 |
s->all_fragments[current_fragment].coding_method = |
1040 |
coding_mode; |
1041 |
} |
1042 |
|
1043 |
debug_modes(" coding method for macroblock starting @ fragment %d = %d\n",
|
1044 |
s->macroblock_fragments[current_macroblock * 6], coding_mode);
|
1045 |
} |
1046 |
} |
1047 |
} |
1048 |
|
1049 |
return 0; |
1050 |
} |
1051 |
|
1052 |
/*
|
1053 |
* This function unpacks all the motion vectors for the individual
|
1054 |
* macroblocks from the bitstream.
|
1055 |
*/
|
1056 |
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) |
1057 |
{ |
1058 |
int i, j, k;
|
1059 |
int coding_mode;
|
1060 |
int motion_x[6]; |
1061 |
int motion_y[6]; |
1062 |
int last_motion_x = 0; |
1063 |
int last_motion_y = 0; |
1064 |
int prior_last_motion_x = 0; |
1065 |
int prior_last_motion_y = 0; |
1066 |
int current_macroblock;
|
1067 |
int current_fragment;
|
1068 |
|
1069 |
debug_vp3(" vp3: unpacking motion vectors\n");
|
1070 |
if (s->keyframe) {
|
1071 |
|
1072 |
debug_vp3(" keyframe-- there are no motion vectors\n");
|
1073 |
|
1074 |
} else {
|
1075 |
|
1076 |
memset(motion_x, 0, 6 * sizeof(int)); |
1077 |
memset(motion_y, 0, 6 * sizeof(int)); |
1078 |
|
1079 |
/* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
|
1080 |
coding_mode = get_bits(gb, 1);
|
1081 |
debug_vectors(" using %s scheme for unpacking motion vectors\n",
|
1082 |
(coding_mode == 0) ? "VLC" : "fixed-length"); |
1083 |
|
1084 |
/* iterate through all of the macroblocks that contain 1 or more
|
1085 |
* coded fragments */
|
1086 |
for (i = 0; i < s->u_superblock_start; i++) { |
1087 |
|
1088 |
for (j = 0; j < 4; j++) { |
1089 |
current_macroblock = s->superblock_macroblocks[i * 4 + j];
|
1090 |
if ((current_macroblock == -1) || |
1091 |
(s->macroblock_coding[current_macroblock] == MODE_COPY)) |
1092 |
continue;
|
1093 |
if (current_macroblock >= s->macroblock_count) {
|
1094 |
av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad macroblock number (%d >= %d)\n",
|
1095 |
current_macroblock, s->macroblock_count); |
1096 |
return 1; |
1097 |
} |
1098 |
|
1099 |
current_fragment = s->macroblock_fragments[current_macroblock * 6];
|
1100 |
if (current_fragment >= s->fragment_count) {
|
1101 |
av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d\n",
|
1102 |
current_fragment, s->fragment_count); |
1103 |
return 1; |
1104 |
} |
1105 |
switch (s->macroblock_coding[current_macroblock]) {
|
1106 |
|
1107 |
case MODE_INTER_PLUS_MV:
|
1108 |
case MODE_GOLDEN_MV:
|
1109 |
/* all 6 fragments use the same motion vector */
|
1110 |
if (coding_mode == 0) { |
1111 |
motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; |
1112 |
motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; |
1113 |
} else {
|
1114 |
motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)]; |
1115 |
motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)]; |
1116 |
} |
1117 |
|
1118 |
for (k = 1; k < 6; k++) { |
1119 |
motion_x[k] = motion_x[0];
|
1120 |
motion_y[k] = motion_y[0];
|
1121 |
} |
1122 |
|
1123 |
/* vector maintenance, only on MODE_INTER_PLUS_MV */
|
1124 |
if (s->macroblock_coding[current_macroblock] ==
|
1125 |
MODE_INTER_PLUS_MV) { |
1126 |
prior_last_motion_x = last_motion_x; |
1127 |
prior_last_motion_y = last_motion_y; |
1128 |
last_motion_x = motion_x[0];
|
1129 |
last_motion_y = motion_y[0];
|
1130 |
} |
1131 |
break;
|
1132 |
|
1133 |
case MODE_INTER_FOURMV:
|
1134 |
/* fetch 4 vectors from the bitstream, one for each
|
1135 |
* Y fragment, then average for the C fragment vectors */
|
1136 |
motion_x[4] = motion_y[4] = 0; |
1137 |
for (k = 0; k < 4; k++) { |
1138 |
if (coding_mode == 0) { |
1139 |
motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; |
1140 |
motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; |
1141 |
} else {
|
1142 |
motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
|
1143 |
motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
|
1144 |
} |
1145 |
motion_x[4] += motion_x[k];
|
1146 |
motion_y[4] += motion_y[k];
|
1147 |
} |
1148 |
|
1149 |
if (motion_x[4] >= 0) |
1150 |
motion_x[4] = (motion_x[4] + 2) / 4; |
1151 |
else
|
1152 |
motion_x[4] = (motion_x[4] - 2) / 4; |
1153 |
motion_x[5] = motion_x[4]; |
1154 |
|
1155 |
if (motion_y[4] >= 0) |
1156 |
motion_y[4] = (motion_y[4] + 2) / 4; |
1157 |
else
|
1158 |
motion_y[4] = (motion_y[4] - 2) / 4; |
1159 |
motion_y[5] = motion_y[4]; |
1160 |
|
1161 |
/* vector maintenance; vector[3] is treated as the
|
1162 |
* last vector in this case */
|
1163 |
prior_last_motion_x = last_motion_x; |
1164 |
prior_last_motion_y = last_motion_y; |
1165 |
last_motion_x = motion_x[3];
|
1166 |
last_motion_y = motion_y[3];
|
1167 |
break;
|
1168 |
|
1169 |
case MODE_INTER_LAST_MV:
|
1170 |
/* all 6 fragments use the last motion vector */
|
1171 |
motion_x[0] = last_motion_x;
|
1172 |
motion_y[0] = last_motion_y;
|
1173 |
for (k = 1; k < 6; k++) { |
1174 |
motion_x[k] = motion_x[0];
|
1175 |
motion_y[k] = motion_y[0];
|
1176 |
} |
1177 |
|
1178 |
/* no vector maintenance (last vector remains the
|
1179 |
* last vector) */
|
1180 |
break;
|
1181 |
|
1182 |
case MODE_INTER_PRIOR_LAST:
|
1183 |
/* all 6 fragments use the motion vector prior to the
|
1184 |
* last motion vector */
|
1185 |
motion_x[0] = prior_last_motion_x;
|
1186 |
motion_y[0] = prior_last_motion_y;
|
1187 |
for (k = 1; k < 6; k++) { |
1188 |
motion_x[k] = motion_x[0];
|
1189 |
motion_y[k] = motion_y[0];
|
1190 |
} |
1191 |
|
1192 |
/* vector maintenance */
|
1193 |
prior_last_motion_x = last_motion_x; |
1194 |
prior_last_motion_y = last_motion_y; |
1195 |
last_motion_x = motion_x[0];
|
1196 |
last_motion_y = motion_y[0];
|
1197 |
break;
|
1198 |
|
1199 |
default:
|
1200 |
/* covers intra, inter without MV, golden without MV */
|
1201 |
memset(motion_x, 0, 6 * sizeof(int)); |
1202 |
memset(motion_y, 0, 6 * sizeof(int)); |
1203 |
|
1204 |
/* no vector maintenance */
|
1205 |
break;
|
1206 |
} |
1207 |
|
1208 |
/* assign the motion vectors to the correct fragments */
|
1209 |
debug_vectors(" vectors for macroblock starting @ fragment %d (coding method %d):\n",
|
1210 |
current_fragment, |
1211 |
s->macroblock_coding[current_macroblock]); |
1212 |
for (k = 0; k < 6; k++) { |
1213 |
current_fragment = |
1214 |
s->macroblock_fragments[current_macroblock * 6 + k];
|
1215 |
if (current_fragment == -1) |
1216 |
continue;
|
1217 |
if (current_fragment >= s->fragment_count) {
|
1218 |
av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d)\n",
|
1219 |
current_fragment, s->fragment_count); |
1220 |
return 1; |
1221 |
} |
1222 |
s->all_fragments[current_fragment].motion_x = motion_x[k]; |
1223 |
s->all_fragments[current_fragment].motion_y = motion_y[k]; |
1224 |
debug_vectors(" vector %d: fragment %d = (%d, %d)\n",
|
1225 |
k, current_fragment, motion_x[k], motion_y[k]); |
1226 |
} |
1227 |
} |
1228 |
} |
1229 |
} |
1230 |
|
1231 |
return 0; |
1232 |
} |
1233 |
|
1234 |
/*
|
1235 |
* This function is called by unpack_dct_coeffs() to extract the VLCs from
|
1236 |
* the bitstream. The VLCs encode tokens which are used to unpack DCT
|
1237 |
* data. This function unpacks all the VLCs for either the Y plane or both
|
1238 |
* C planes, and is called for DC coefficients or different AC coefficient
|
1239 |
* levels (since different coefficient types require different VLC tables.
|
1240 |
*
|
1241 |
* This function returns a residual eob run. E.g, if a particular token gave
|
1242 |
* instructions to EOB the next 5 fragments and there were only 2 fragments
|
1243 |
* left in the current fragment range, 3 would be returned so that it could
|
1244 |
* be passed into the next call to this same function.
|
1245 |
*/
|
1246 |
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, |
1247 |
VLC *table, int coeff_index,
|
1248 |
int first_fragment, int last_fragment, |
1249 |
int eob_run)
|
1250 |
{ |
1251 |
int i;
|
1252 |
int token;
|
1253 |
int zero_run = 0; |
1254 |
DCTELEM coeff = 0;
|
1255 |
Vp3Fragment *fragment; |
1256 |
uint8_t *perm= s->scantable.permutated; |
1257 |
int bits_to_get;
|
1258 |
|
1259 |
if ((first_fragment >= s->fragment_count) ||
|
1260 |
(last_fragment >= s->fragment_count)) { |
1261 |
|
1262 |
av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vlcs(): bad fragment number (%d -> %d ?)\n",
|
1263 |
first_fragment, last_fragment); |
1264 |
return 0; |
1265 |
} |
1266 |
|
1267 |
for (i = first_fragment; i <= last_fragment; i++) {
|
1268 |
|
1269 |
fragment = &s->all_fragments[s->coded_fragment_list[i]]; |
1270 |
if (fragment->coeff_count > coeff_index)
|
1271 |
continue;
|
1272 |
|
1273 |
if (!eob_run) {
|
1274 |
/* decode a VLC into a token */
|
1275 |
token = get_vlc2(gb, table->table, 5, 3); |
1276 |
debug_vlc(" token = %2d, ", token);
|
1277 |
/* use the token to get a zero run, a coefficient, and an eob run */
|
1278 |
if (token <= 6) { |
1279 |
eob_run = eob_run_base[token]; |
1280 |
if (eob_run_get_bits[token])
|
1281 |
eob_run += get_bits(gb, eob_run_get_bits[token]); |
1282 |
coeff = zero_run = 0;
|
1283 |
} else {
|
1284 |
bits_to_get = coeff_get_bits[token]; |
1285 |
if (!bits_to_get)
|
1286 |
coeff = coeff_tables[token][0];
|
1287 |
else
|
1288 |
coeff = coeff_tables[token][get_bits(gb, bits_to_get)]; |
1289 |
|
1290 |
zero_run = zero_run_base[token]; |
1291 |
if (zero_run_get_bits[token])
|
1292 |
zero_run += get_bits(gb, zero_run_get_bits[token]); |
1293 |
} |
1294 |
} |
1295 |
|
1296 |
if (!eob_run) {
|
1297 |
fragment->coeff_count += zero_run; |
1298 |
if (fragment->coeff_count < 64){ |
1299 |
fragment->next_coeff->coeff= coeff; |
1300 |
fragment->next_coeff->index= perm[fragment->coeff_count++]; //FIXME perm here already?
|
1301 |
fragment->next_coeff->next= s->next_coeff; |
1302 |
s->next_coeff->next=NULL;
|
1303 |
fragment->next_coeff= s->next_coeff++; |
1304 |
} |
1305 |
debug_vlc(" fragment %d coeff = %d\n",
|
1306 |
s->coded_fragment_list[i], fragment->next_coeff[coeff_index]); |
1307 |
} else {
|
1308 |
fragment->coeff_count |= 128;
|
1309 |
debug_vlc(" fragment %d eob with %d coefficients\n",
|
1310 |
s->coded_fragment_list[i], fragment->coeff_count&127);
|
1311 |
eob_run--; |
1312 |
} |
1313 |
} |
1314 |
|
1315 |
return eob_run;
|
1316 |
} |
1317 |
|
1318 |
/*
|
1319 |
* This function unpacks all of the DCT coefficient data from the
|
1320 |
* bitstream.
|
1321 |
*/
|
1322 |
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) |
1323 |
{ |
1324 |
int i;
|
1325 |
int dc_y_table;
|
1326 |
int dc_c_table;
|
1327 |
int ac_y_table;
|
1328 |
int ac_c_table;
|
1329 |
int residual_eob_run = 0; |
1330 |
|
1331 |
/* fetch the DC table indices */
|
1332 |
dc_y_table = get_bits(gb, 4);
|
1333 |
dc_c_table = get_bits(gb, 4);
|
1334 |
|
1335 |
/* unpack the Y plane DC coefficients */
|
1336 |
debug_vp3(" vp3: unpacking Y plane DC coefficients using table %d\n",
|
1337 |
dc_y_table); |
1338 |
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
|
1339 |
s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); |
1340 |
|
1341 |
/* unpack the C plane DC coefficients */
|
1342 |
debug_vp3(" vp3: unpacking C plane DC coefficients using table %d\n",
|
1343 |
dc_c_table); |
1344 |
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
|
1345 |
s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); |
1346 |
|
1347 |
/* fetch the AC table indices */
|
1348 |
ac_y_table = get_bits(gb, 4);
|
1349 |
ac_c_table = get_bits(gb, 4);
|
1350 |
|
1351 |
/* unpack the group 1 AC coefficients (coeffs 1-5) */
|
1352 |
for (i = 1; i <= 5; i++) { |
1353 |
|
1354 |
debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
|
1355 |
i, ac_y_table); |
1356 |
residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_y_table], i, |
1357 |
s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); |
1358 |
|
1359 |
debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
|
1360 |
i, ac_c_table); |
1361 |
residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_c_table], i, |
1362 |
s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); |
1363 |
} |
1364 |
|
1365 |
/* unpack the group 2 AC coefficients (coeffs 6-14) */
|
1366 |
for (i = 6; i <= 14; i++) { |
1367 |
|
1368 |
debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
|
1369 |
i, ac_y_table); |
1370 |
residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_y_table], i, |
1371 |
s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); |
1372 |
|
1373 |
debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
|
1374 |
i, ac_c_table); |
1375 |
residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_c_table], i, |
1376 |
s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); |
1377 |
} |
1378 |
|
1379 |
/* unpack the group 3 AC coefficients (coeffs 15-27) */
|
1380 |
for (i = 15; i <= 27; i++) { |
1381 |
|
1382 |
debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
|
1383 |
i, ac_y_table); |
1384 |
residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_y_table], i, |
1385 |
s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); |
1386 |
|
1387 |
debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
|
1388 |
i, ac_c_table); |
1389 |
residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_c_table], i, |
1390 |
s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); |
1391 |
} |
1392 |
|
1393 |
/* unpack the group 4 AC coefficients (coeffs 28-63) */
|
1394 |
for (i = 28; i <= 63; i++) { |
1395 |
|
1396 |
debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
|
1397 |
i, ac_y_table); |
1398 |
residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_y_table], i, |
1399 |
s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run); |
1400 |
|
1401 |
debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
|
1402 |
i, ac_c_table); |
1403 |
residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_c_table], i, |
1404 |
s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run); |
1405 |
} |
1406 |
|
1407 |
return 0; |
1408 |
} |
1409 |
|
1410 |
/*
|
1411 |
* This function reverses the DC prediction for each coded fragment in
|
1412 |
* the frame. Much of this function is adapted directly from the original
|
1413 |
* VP3 source code.
|
1414 |
*/
|
1415 |
#define COMPATIBLE_FRAME(x) \
|
1416 |
(compatible_frame[s->all_fragments[x].coding_method] == current_frame_type) |
1417 |
#define FRAME_CODED(x) (s->all_fragments[x].coding_method != MODE_COPY)
|
1418 |
#define DC_COEFF(u) (s->coeffs[u].index ? 0 : s->coeffs[u].coeff) //FIXME do somethin to simplify this |
1419 |
static inline int iabs (int x) { return ((x < 0) ? -x : x); } |
1420 |
|
1421 |
static void reverse_dc_prediction(Vp3DecodeContext *s, |
1422 |
int first_fragment,
|
1423 |
int fragment_width,
|
1424 |
int fragment_height)
|
1425 |
{ |
1426 |
|
1427 |
#define PUL 8 |
1428 |
#define PU 4 |
1429 |
#define PUR 2 |
1430 |
#define PL 1 |
1431 |
|
1432 |
int x, y;
|
1433 |
int i = first_fragment;
|
1434 |
|
1435 |
/*
|
1436 |
* Fragment prediction groups:
|
1437 |
*
|
1438 |
* 32222222226
|
1439 |
* 10000000004
|
1440 |
* 10000000004
|
1441 |
* 10000000004
|
1442 |
* 10000000004
|
1443 |
*
|
1444 |
* Note: Groups 5 and 7 do not exist as it would mean that the
|
1445 |
* fragment's x coordinate is both 0 and (width - 1) at the same time.
|
1446 |
*/
|
1447 |
int predictor_group;
|
1448 |
short predicted_dc;
|
1449 |
|
1450 |
/* validity flags for the left, up-left, up, and up-right fragments */
|
1451 |
int fl, ful, fu, fur;
|
1452 |
|
1453 |
/* DC values for the left, up-left, up, and up-right fragments */
|
1454 |
int vl, vul, vu, vur;
|
1455 |
|
1456 |
/* indices for the left, up-left, up, and up-right fragments */
|
1457 |
int l, ul, u, ur;
|
1458 |
|
1459 |
/*
|
1460 |
* The 6 fields mean:
|
1461 |
* 0: up-left multiplier
|
1462 |
* 1: up multiplier
|
1463 |
* 2: up-right multiplier
|
1464 |
* 3: left multiplier
|
1465 |
* 4: mask
|
1466 |
* 5: right bit shift divisor (e.g., 7 means >>=7, a.k.a. div by 128)
|
1467 |
*/
|
1468 |
int predictor_transform[16][6] = { |
1469 |
{ 0, 0, 0, 0, 0, 0 }, |
1470 |
{ 0, 0, 0, 1, 0, 0 }, // PL |
1471 |
{ 0, 0, 1, 0, 0, 0 }, // PUR |
1472 |
{ 0, 0, 53, 75, 127, 7 }, // PUR|PL |
1473 |
{ 0, 1, 0, 0, 0, 0 }, // PU |
1474 |
{ 0, 1, 0, 1, 1, 1 }, // PU|PL |
1475 |
{ 0, 1, 0, 0, 0, 0 }, // PU|PUR |
1476 |
{ 0, 0, 53, 75, 127, 7 }, // PU|PUR|PL |
1477 |
{ 1, 0, 0, 0, 0, 0 }, // PUL |
1478 |
{ 0, 0, 0, 1, 0, 0 }, // PUL|PL |
1479 |
{ 1, 0, 1, 0, 1, 1 }, // PUL|PUR |
1480 |
{ 0, 0, 53, 75, 127, 7 }, // PUL|PUR|PL |
1481 |
{ 0, 1, 0, 0, 0, 0 }, // PUL|PU |
1482 |
{-26, 29, 0, 29, 31, 5 }, // PUL|PU|PL |
1483 |
{ 3, 10, 3, 0, 15, 4 }, // PUL|PU|PUR |
1484 |
{-26, 29, 0, 29, 31, 5 } // PUL|PU|PUR|PL |
1485 |
}; |
1486 |
|
1487 |
/* This table shows which types of blocks can use other blocks for
|
1488 |
* prediction. For example, INTRA is the only mode in this table to
|
1489 |
* have a frame number of 0. That means INTRA blocks can only predict
|
1490 |
* from other INTRA blocks. There are 2 golden frame coding types;
|
1491 |
* blocks encoding in these modes can only predict from other blocks
|
1492 |
* that were encoded with these 1 of these 2 modes. */
|
1493 |
unsigned char compatible_frame[8] = { |
1494 |
1, /* MODE_INTER_NO_MV */ |
1495 |
0, /* MODE_INTRA */ |
1496 |
1, /* MODE_INTER_PLUS_MV */ |
1497 |
1, /* MODE_INTER_LAST_MV */ |
1498 |
1, /* MODE_INTER_PRIOR_MV */ |
1499 |
2, /* MODE_USING_GOLDEN */ |
1500 |
2, /* MODE_GOLDEN_MV */ |
1501 |
1 /* MODE_INTER_FOUR_MV */ |
1502 |
}; |
1503 |
int current_frame_type;
|
1504 |
|
1505 |
/* there is a last DC predictor for each of the 3 frame types */
|
1506 |
short last_dc[3]; |
1507 |
|
1508 |
int transform = 0; |
1509 |
|
1510 |
debug_vp3(" vp3: reversing DC prediction\n");
|
1511 |
|
1512 |
vul = vu = vur = vl = 0;
|
1513 |
last_dc[0] = last_dc[1] = last_dc[2] = 0; |
1514 |
|
1515 |
/* for each fragment row... */
|
1516 |
for (y = 0; y < fragment_height; y++) { |
1517 |
|
1518 |
/* for each fragment in a row... */
|
1519 |
for (x = 0; x < fragment_width; x++, i++) { |
1520 |
|
1521 |
/* reverse prediction if this block was coded */
|
1522 |
if (s->all_fragments[i].coding_method != MODE_COPY) {
|
1523 |
|
1524 |
current_frame_type = |
1525 |
compatible_frame[s->all_fragments[i].coding_method]; |
1526 |
predictor_group = (x == 0) + ((y == 0) << 1) + |
1527 |
((x + 1 == fragment_width) << 2); |
1528 |
debug_dc_pred(" frag %d: group %d, orig DC = %d, ",
|
1529 |
i, predictor_group, DC_COEFF(i)); |
1530 |
|
1531 |
switch (predictor_group) {
|
1532 |
|
1533 |
case 0: |
1534 |
/* main body of fragments; consider all 4 possible
|
1535 |
* fragments for prediction */
|
1536 |
|
1537 |
/* calculate the indices of the predicting fragments */
|
1538 |
ul = i - fragment_width - 1;
|
1539 |
u = i - fragment_width; |
1540 |
ur = i - fragment_width + 1;
|
1541 |
l = i - 1;
|
1542 |
|
1543 |
/* fetch the DC values for the predicting fragments */
|
1544 |
vul = DC_COEFF(ul); |
1545 |
vu = DC_COEFF(u); |
1546 |
vur = DC_COEFF(ur); |
1547 |
vl = DC_COEFF(l); |
1548 |
|
1549 |
/* figure out which fragments are valid */
|
1550 |
ful = FRAME_CODED(ul) && COMPATIBLE_FRAME(ul); |
1551 |
fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u); |
1552 |
fur = FRAME_CODED(ur) && COMPATIBLE_FRAME(ur); |
1553 |
fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l); |
1554 |
|
1555 |
/* decide which predictor transform to use */
|
1556 |
transform = (fl*PL) | (fu*PU) | (ful*PUL) | (fur*PUR); |
1557 |
|
1558 |
break;
|
1559 |
|
1560 |
case 1: |
1561 |
/* left column of fragments, not including top corner;
|
1562 |
* only consider up and up-right fragments */
|
1563 |
|
1564 |
/* calculate the indices of the predicting fragments */
|
1565 |
u = i - fragment_width; |
1566 |
ur = i - fragment_width + 1;
|
1567 |
|
1568 |
/* fetch the DC values for the predicting fragments */
|
1569 |
vu = DC_COEFF(u); |
1570 |
vur = DC_COEFF(ur); |
1571 |
|
1572 |
/* figure out which fragments are valid */
|
1573 |
fur = FRAME_CODED(ur) && COMPATIBLE_FRAME(ur); |
1574 |
fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u); |
1575 |
|
1576 |
/* decide which predictor transform to use */
|
1577 |
transform = (fu*PU) | (fur*PUR); |
1578 |
|
1579 |
break;
|
1580 |
|
1581 |
case 2: |
1582 |
case 6: |
1583 |
/* top row of fragments, not including top-left frag;
|
1584 |
* only consider the left fragment for prediction */
|
1585 |
|
1586 |
/* calculate the indices of the predicting fragments */
|
1587 |
l = i - 1;
|
1588 |
|
1589 |
/* fetch the DC values for the predicting fragments */
|
1590 |
vl = DC_COEFF(l); |
1591 |
|
1592 |
/* figure out which fragments are valid */
|
1593 |
fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l); |
1594 |
|
1595 |
/* decide which predictor transform to use */
|
1596 |
transform = (fl*PL); |
1597 |
|
1598 |
break;
|
1599 |
|
1600 |
case 3: |
1601 |
/* top-left fragment */
|
1602 |
|
1603 |
/* nothing to predict from in this case */
|
1604 |
transform = 0;
|
1605 |
|
1606 |
break;
|
1607 |
|
1608 |
case 4: |
1609 |
/* right column of fragments, not including top corner;
|
1610 |
* consider up-left, up, and left fragments for
|
1611 |
* prediction */
|
1612 |
|
1613 |
/* calculate the indices of the predicting fragments */
|
1614 |
ul = i - fragment_width - 1;
|
1615 |
u = i - fragment_width; |
1616 |
l = i - 1;
|
1617 |
|
1618 |
/* fetch the DC values for the predicting fragments */
|
1619 |
vul = DC_COEFF(ul); |
1620 |
vu = DC_COEFF(u); |
1621 |
vl = DC_COEFF(l); |
1622 |
|
1623 |
/* figure out which fragments are valid */
|
1624 |
ful = FRAME_CODED(ul) && COMPATIBLE_FRAME(ul); |
1625 |
fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u); |
1626 |
fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l); |
1627 |
|
1628 |
/* decide which predictor transform to use */
|
1629 |
transform = (fl*PL) | (fu*PU) | (ful*PUL); |
1630 |
|
1631 |
break;
|
1632 |
|
1633 |
} |
1634 |
|
1635 |
debug_dc_pred("transform = %d, ", transform);
|
1636 |
|
1637 |
if (transform == 0) { |
1638 |
|
1639 |
/* if there were no fragments to predict from, use last
|
1640 |
* DC saved */
|
1641 |
predicted_dc = last_dc[current_frame_type]; |
1642 |
debug_dc_pred("from last DC (%d) = %d\n",
|
1643 |
current_frame_type, DC_COEFF(i)); |
1644 |
|
1645 |
} else {
|
1646 |
|
1647 |
/* apply the appropriate predictor transform */
|
1648 |
predicted_dc = |
1649 |
(predictor_transform[transform][0] * vul) +
|
1650 |
(predictor_transform[transform][1] * vu) +
|
1651 |
(predictor_transform[transform][2] * vur) +
|
1652 |
(predictor_transform[transform][3] * vl);
|
1653 |
|
1654 |
/* if there is a shift value in the transform, add
|
1655 |
* the sign bit before the shift */
|
1656 |
if (predictor_transform[transform][5] != 0) { |
1657 |
predicted_dc += ((predicted_dc >> 15) &
|
1658 |
predictor_transform[transform][4]);
|
1659 |
predicted_dc >>= predictor_transform[transform][5];
|
1660 |
} |
1661 |
|
1662 |
/* check for outranging on the [ul u l] and
|
1663 |
* [ul u ur l] predictors */
|
1664 |
if ((transform == 13) || (transform == 15)) { |
1665 |
if (iabs(predicted_dc - vu) > 128) |
1666 |
predicted_dc = vu; |
1667 |
else if (iabs(predicted_dc - vl) > 128) |
1668 |
predicted_dc = vl; |
1669 |
else if (iabs(predicted_dc - vul) > 128) |
1670 |
predicted_dc = vul; |
1671 |
} |
1672 |
|
1673 |
debug_dc_pred("from pred DC = %d\n",
|
1674 |
DC_COEFF(i)); |
1675 |
} |
1676 |
|
1677 |
/* at long last, apply the predictor */
|
1678 |
if(s->coeffs[i].index){
|
1679 |
*s->next_coeff= s->coeffs[i]; |
1680 |
s->coeffs[i].index=0;
|
1681 |
s->coeffs[i].coeff=0;
|
1682 |
s->coeffs[i].next= s->next_coeff++; |
1683 |
} |
1684 |
s->coeffs[i].coeff += predicted_dc; |
1685 |
/* save the DC */
|
1686 |
last_dc[current_frame_type] = DC_COEFF(i); |
1687 |
if(DC_COEFF(i) && !(s->all_fragments[i].coeff_count&127)){ |
1688 |
s->all_fragments[i].coeff_count= 129;
|
1689 |
// s->all_fragments[i].next_coeff= s->next_coeff;
|
1690 |
s->coeffs[i].next= s->next_coeff; |
1691 |
(s->next_coeff++)->next=NULL;
|
1692 |
} |
1693 |
} |
1694 |
} |
1695 |
} |
1696 |
} |
1697 |
|
1698 |
|
1699 |
static void horizontal_filter(unsigned char *first_pixel, int stride, |
1700 |
int *bounding_values);
|
1701 |
static void vertical_filter(unsigned char *first_pixel, int stride, |
1702 |
int *bounding_values);
|
1703 |
|
1704 |
/*
|
1705 |
* Perform the final rendering for a particular slice of data.
|
1706 |
* The slice number ranges from 0..(macroblock_height - 1).
|
1707 |
*/
|
1708 |
static void render_slice(Vp3DecodeContext *s, int slice) |
1709 |
{ |
1710 |
int x, y;
|
1711 |
int m, n;
|
1712 |
int i; /* indicates current fragment */ |
1713 |
int16_t *dequantizer; |
1714 |
DCTELEM __align16 block[64];
|
1715 |
unsigned char *output_plane; |
1716 |
unsigned char *last_plane; |
1717 |
unsigned char *golden_plane; |
1718 |
int stride;
|
1719 |
int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef; |
1720 |
int upper_motion_limit, lower_motion_limit;
|
1721 |
int motion_halfpel_index;
|
1722 |
uint8_t *motion_source; |
1723 |
int plane;
|
1724 |
int plane_width;
|
1725 |
int plane_height;
|
1726 |
int slice_height;
|
1727 |
int current_macroblock_entry = slice * s->macroblock_width * 6; |
1728 |
int fragment_width;
|
1729 |
|
1730 |
if (slice >= s->macroblock_height)
|
1731 |
return;
|
1732 |
|
1733 |
for (plane = 0; plane < 3; plane++) { |
1734 |
|
1735 |
/* set up plane-specific parameters */
|
1736 |
if (plane == 0) { |
1737 |
output_plane = s->current_frame.data[0];
|
1738 |
last_plane = s->last_frame.data[0];
|
1739 |
golden_plane = s->golden_frame.data[0];
|
1740 |
stride = s->current_frame.linesize[0];
|
1741 |
if (!s->flipped_image) stride = -stride;
|
1742 |
upper_motion_limit = 7 * s->current_frame.linesize[0]; |
1743 |
lower_motion_limit = s->height * s->current_frame.linesize[0] + s->width - 8; |
1744 |
y = slice * FRAGMENT_PIXELS * 2;
|
1745 |
plane_width = s->width; |
1746 |
plane_height = s->height; |
1747 |
slice_height = y + FRAGMENT_PIXELS * 2;
|
1748 |
i = s->macroblock_fragments[current_macroblock_entry + 0];
|
1749 |
} else if (plane == 1) { |
1750 |
output_plane = s->current_frame.data[1];
|
1751 |
last_plane = s->last_frame.data[1];
|
1752 |
golden_plane = s->golden_frame.data[1];
|
1753 |
stride = s->current_frame.linesize[1];
|
1754 |
if (!s->flipped_image) stride = -stride;
|
1755 |
upper_motion_limit = 7 * s->current_frame.linesize[1]; |
1756 |
lower_motion_limit = (s->height / 2) * s->current_frame.linesize[1] + (s->width / 2) - 8; |
1757 |
y = slice * FRAGMENT_PIXELS; |
1758 |
plane_width = s->width / 2;
|
1759 |
plane_height = s->height / 2;
|
1760 |
slice_height = y + FRAGMENT_PIXELS; |
1761 |
i = s->macroblock_fragments[current_macroblock_entry + 4];
|
1762 |
} else {
|
1763 |
output_plane = s->current_frame.data[2];
|
1764 |
last_plane = s->last_frame.data[2];
|
1765 |
golden_plane = s->golden_frame.data[2];
|
1766 |
stride = s->current_frame.linesize[2];
|
1767 |
if (!s->flipped_image) stride = -stride;
|
1768 |
upper_motion_limit = 7 * s->current_frame.linesize[2]; |
1769 |
lower_motion_limit = (s->height / 2) * s->current_frame.linesize[2] + (s->width / 2) - 8; |
1770 |
y = slice * FRAGMENT_PIXELS; |
1771 |
plane_width = s->width / 2;
|
1772 |
plane_height = s->height / 2;
|
1773 |
slice_height = y + FRAGMENT_PIXELS; |
1774 |
i = s->macroblock_fragments[current_macroblock_entry + 5];
|
1775 |
} |
1776 |
fragment_width = plane_width / FRAGMENT_PIXELS; |
1777 |
|
1778 |
if(ABS(stride) > 2048) |
1779 |
return; //various tables are fixed size |
1780 |
|
1781 |
/* for each fragment row in the slice (both of them)... */
|
1782 |
for (; y < slice_height; y += 8) { |
1783 |
|
1784 |
/* for each fragment in a row... */
|
1785 |
for (x = 0; x < plane_width; x += 8, i++) { |
1786 |
|
1787 |
if ((i < 0) || (i >= s->fragment_count)) { |
1788 |
av_log(s->avctx, AV_LOG_ERROR, " vp3:render_slice(): bad fragment number (%d)\n", i);
|
1789 |
return;
|
1790 |
} |
1791 |
|
1792 |
/* transform if this block was coded */
|
1793 |
if ((s->all_fragments[i].coding_method != MODE_COPY) &&
|
1794 |
!((s->avctx->flags & CODEC_FLAG_GRAY) && plane)) { |
1795 |
|
1796 |
if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
|
1797 |
(s->all_fragments[i].coding_method == MODE_GOLDEN_MV)) |
1798 |
motion_source= golden_plane; |
1799 |
else
|
1800 |
motion_source= last_plane; |
1801 |
|
1802 |
motion_source += s->all_fragments[i].first_pixel; |
1803 |
motion_halfpel_index = 0;
|
1804 |
|
1805 |
/* sort out the motion vector if this fragment is coded
|
1806 |
* using a motion vector method */
|
1807 |
if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
|
1808 |
(s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) { |
1809 |
int src_x, src_y;
|
1810 |
motion_x = s->all_fragments[i].motion_x; |
1811 |
motion_y = s->all_fragments[i].motion_y; |
1812 |
if(plane){
|
1813 |
motion_x= (motion_x>>1) | (motion_x&1); |
1814 |
motion_y= (motion_y>>1) | (motion_y&1); |
1815 |
} |
1816 |
|
1817 |
src_x= (motion_x>>1) + x;
|
1818 |
src_y= (motion_y>>1) + y;
|
1819 |
if ((motion_x == 127) || (motion_y == 127)) |
1820 |
av_log(s->avctx, AV_LOG_ERROR, " help! got invalid motion vector! (%X, %X)\n", motion_x, motion_y);
|
1821 |
|
1822 |
motion_halfpel_index = motion_x & 0x01;
|
1823 |
motion_source += (motion_x >> 1);
|
1824 |
|
1825 |
motion_halfpel_index |= (motion_y & 0x01) << 1; |
1826 |
motion_source += ((motion_y >> 1) * stride);
|
1827 |
|
1828 |
if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){ |
1829 |
uint8_t *temp= s->edge_emu_buffer; |
1830 |
if(stride<0) temp -= 9*stride; |
1831 |
else temp += 9*stride; |
1832 |
|
1833 |
ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height); |
1834 |
motion_source= temp; |
1835 |
} |
1836 |
} |
1837 |
|
1838 |
|
1839 |
/* first, take care of copying a block from either the
|
1840 |
* previous or the golden frame */
|
1841 |
if (s->all_fragments[i].coding_method != MODE_INTRA) {
|
1842 |
/* Note, it is possible to implement all MC cases with
|
1843 |
put_no_rnd_pixels_l2 which would look more like the
|
1844 |
VP3 source but this would be slower as
|
1845 |
put_no_rnd_pixels_tab is better optimzed */
|
1846 |
if(motion_halfpel_index != 3){ |
1847 |
s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
|
1848 |
output_plane + s->all_fragments[i].first_pixel, |
1849 |
motion_source, stride, 8);
|
1850 |
}else{
|
1851 |
int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1 |
1852 |
s->dsp.put_no_rnd_pixels_l2[1](
|
1853 |
output_plane + s->all_fragments[i].first_pixel, |
1854 |
motion_source - d, |
1855 |
motion_source + stride + 1 + d,
|
1856 |
stride, 8);
|
1857 |
} |
1858 |
dequantizer = s->inter_dequant; |
1859 |
}else{
|
1860 |
if (plane == 0) |
1861 |
dequantizer = s->intra_y_dequant; |
1862 |
else
|
1863 |
dequantizer = s->intra_c_dequant; |
1864 |
} |
1865 |
|
1866 |
/* dequantize the DCT coefficients */
|
1867 |
debug_idct("fragment %d, coding mode %d, DC = %d, dequant = %d:\n",
|
1868 |
i, s->all_fragments[i].coding_method, |
1869 |
DC_COEFF(i), dequantizer[0]);
|
1870 |
|
1871 |
if(s->avctx->idct_algo==FF_IDCT_VP3){
|
1872 |
Coeff *coeff= s->coeffs + i; |
1873 |
memset(block, 0, sizeof(block)); |
1874 |
while(coeff->next){
|
1875 |
block[coeff->index]= coeff->coeff * dequantizer[coeff->index]; |
1876 |
coeff= coeff->next; |
1877 |
} |
1878 |
}else{
|
1879 |
Coeff *coeff= s->coeffs + i; |
1880 |
memset(block, 0, sizeof(block)); |
1881 |
while(coeff->next){
|
1882 |
block[coeff->index]= (coeff->coeff * dequantizer[coeff->index] + 2)>>2; |
1883 |
coeff= coeff->next; |
1884 |
} |
1885 |
} |
1886 |
|
1887 |
/* invert DCT and place (or add) in final output */
|
1888 |
|
1889 |
if (s->all_fragments[i].coding_method == MODE_INTRA) {
|
1890 |
if(s->avctx->idct_algo!=FF_IDCT_VP3)
|
1891 |
block[0] += 128<<3; |
1892 |
s->dsp.idct_put( |
1893 |
output_plane + s->all_fragments[i].first_pixel, |
1894 |
stride, |
1895 |
block); |
1896 |
} else {
|
1897 |
s->dsp.idct_add( |
1898 |
output_plane + s->all_fragments[i].first_pixel, |
1899 |
stride, |
1900 |
block); |
1901 |
} |
1902 |
|
1903 |
debug_idct("block after idct_%s():\n",
|
1904 |
(s->all_fragments[i].coding_method == MODE_INTRA)? |
1905 |
"put" : "add"); |
1906 |
for (m = 0; m < 8; m++) { |
1907 |
for (n = 0; n < 8; n++) { |
1908 |
debug_idct(" %3d", *(output_plane +
|
1909 |
s->all_fragments[i].first_pixel + (m * stride + n))); |
1910 |
} |
1911 |
debug_idct("\n");
|
1912 |
} |
1913 |
debug_idct("\n");
|
1914 |
|
1915 |
} else {
|
1916 |
|
1917 |
/* copy directly from the previous frame */
|
1918 |
s->dsp.put_pixels_tab[1][0]( |
1919 |
output_plane + s->all_fragments[i].first_pixel, |
1920 |
last_plane + s->all_fragments[i].first_pixel, |
1921 |
stride, 8);
|
1922 |
|
1923 |
} |
1924 |
#if 0
|
1925 |
/* perform the left edge filter if:
|
1926 |
* - the fragment is not on the left column
|
1927 |
* - the fragment is coded in this frame
|
1928 |
* - the fragment is not coded in this frame but the left
|
1929 |
* fragment is coded in this frame (this is done instead
|
1930 |
* of a right edge filter when rendering the left fragment
|
1931 |
* since this fragment is not available yet) */
|
1932 |
if ((x > 0) &&
|
1933 |
((s->all_fragments[i].coding_method != MODE_COPY) ||
|
1934 |
((s->all_fragments[i].coding_method == MODE_COPY) &&
|
1935 |
(s->all_fragments[i - 1].coding_method != MODE_COPY)) )) {
|
1936 |
horizontal_filter(
|
1937 |
output_plane + s->all_fragments[i].first_pixel + 7*stride,
|
1938 |
-stride, bounding_values);
|
1939 |
}
|
1940 |
|
1941 |
/* perform the top edge filter if:
|
1942 |
* - the fragment is not on the top row
|
1943 |
* - the fragment is coded in this frame
|
1944 |
* - the fragment is not coded in this frame but the above
|
1945 |
* fragment is coded in this frame (this is done instead
|
1946 |
* of a bottom edge filter when rendering the above
|
1947 |
* fragment since this fragment is not available yet) */
|
1948 |
if ((y > 0) &&
|
1949 |
((s->all_fragments[i].coding_method != MODE_COPY) ||
|
1950 |
((s->all_fragments[i].coding_method == MODE_COPY) &&
|
1951 |
(s->all_fragments[i - fragment_width].coding_method != MODE_COPY)) )) {
|
1952 |
vertical_filter(
|
1953 |
output_plane + s->all_fragments[i].first_pixel - stride,
|
1954 |
-stride, bounding_values);
|
1955 |
}
|
1956 |
#endif
|
1957 |
} |
1958 |
} |
1959 |
} |
1960 |
|
1961 |
/* this looks like a good place for slice dispatch... */
|
1962 |
/* algorithm:
|
1963 |
* if (slice == s->macroblock_height - 1)
|
1964 |
* dispatch (both last slice & 2nd-to-last slice);
|
1965 |
* else if (slice > 0)
|
1966 |
* dispatch (slice - 1);
|
1967 |
*/
|
1968 |
|
1969 |
emms_c(); |
1970 |
} |
1971 |
|
1972 |
static void horizontal_filter(unsigned char *first_pixel, int stride, |
1973 |
int *bounding_values)
|
1974 |
{ |
1975 |
unsigned char *end; |
1976 |
int filter_value;
|
1977 |
|
1978 |
for (end= first_pixel + 8*stride; first_pixel < end; first_pixel += stride) { |
1979 |
filter_value = |
1980 |
(first_pixel[-2] - first_pixel[ 1]) |
1981 |
+3*(first_pixel[ 0] - first_pixel[-1]); |
1982 |
filter_value = bounding_values[(filter_value + 4) >> 3]; |
1983 |
first_pixel[-1] = clip_uint8(first_pixel[-1] + filter_value); |
1984 |
first_pixel[ 0] = clip_uint8(first_pixel[ 0] - filter_value); |
1985 |
} |
1986 |
} |
1987 |
|
1988 |
static void vertical_filter(unsigned char *first_pixel, int stride, |
1989 |
int *bounding_values)
|
1990 |
{ |
1991 |
unsigned char *end; |
1992 |
int filter_value;
|
1993 |
const int nstride= -stride; |
1994 |
|
1995 |
for (end= first_pixel + 8; first_pixel < end; first_pixel++) { |
1996 |
filter_value = |
1997 |
(first_pixel[2 * nstride] - first_pixel[ stride])
|
1998 |
+3*(first_pixel[0 ] - first_pixel[nstride]); |
1999 |
filter_value = bounding_values[(filter_value + 4) >> 3]; |
2000 |
first_pixel[nstride] = clip_uint8(first_pixel[nstride] + filter_value); |
2001 |
first_pixel[0] = clip_uint8(first_pixel[0] - filter_value); |
2002 |
} |
2003 |
} |
2004 |
|
2005 |
static void apply_loop_filter(Vp3DecodeContext *s) |
2006 |
{ |
2007 |
int x, y, plane;
|
2008 |
int width, height;
|
2009 |
int fragment;
|
2010 |
int stride;
|
2011 |
unsigned char *plane_data; |
2012 |
int *bounding_values= s->bounding_values_array+127; |
2013 |
|
2014 |
#if 0
|
2015 |
int bounding_values_array[256];
|
2016 |
int filter_limit;
|
2017 |
|
2018 |
/* find the right loop limit value */
|
2019 |
for (x = 63; x >= 0; x--) {
|
2020 |
if (vp31_ac_scale_factor[x] >= s->quality_index)
|
2021 |
break;
|
2022 |
}
|
2023 |
filter_limit = vp31_filter_limit_values[s->quality_index];
|
2024 |
|
2025 |
/* set up the bounding values */
|
2026 |
memset(bounding_values_array, 0, 256 * sizeof(int));
|
2027 |
for (x = 0; x < filter_limit; x++) {
|
2028 |
bounding_values[-x - filter_limit] = -filter_limit + x;
|
2029 |
bounding_values[-x] = -x;
|
2030 |
bounding_values[x] = x;
|
2031 |
bounding_values[x + filter_limit] = filter_limit - x;
|
2032 |
}
|
2033 |
#endif
|
2034 |
|
2035 |
for (plane = 0; plane < 3; plane++) { |
2036 |
|
2037 |
if (plane == 0) { |
2038 |
/* Y plane parameters */
|
2039 |
fragment = 0;
|
2040 |
width = s->fragment_width; |
2041 |
height = s->fragment_height; |
2042 |
stride = s->current_frame.linesize[0];
|
2043 |
plane_data = s->current_frame.data[0];
|
2044 |
} else if (plane == 1) { |
2045 |
/* U plane parameters */
|
2046 |
fragment = s->u_fragment_start; |
2047 |
width = s->fragment_width / 2;
|
2048 |
height = s->fragment_height / 2;
|
2049 |
stride = s->current_frame.linesize[1];
|
2050 |
plane_data = s->current_frame.data[1];
|
2051 |
} else {
|
2052 |
/* V plane parameters */
|
2053 |
fragment = s->v_fragment_start; |
2054 |
width = s->fragment_width / 2;
|
2055 |
height = s->fragment_height / 2;
|
2056 |
stride = s->current_frame.linesize[2];
|
2057 |
plane_data = s->current_frame.data[2];
|
2058 |
} |
2059 |
|
2060 |
for (y = 0; y < height; y++) { |
2061 |
|
2062 |
for (x = 0; x < width; x++) { |
2063 |
START_TIMER |
2064 |
/* do not perform left edge filter for left columns frags */
|
2065 |
if ((x > 0) && |
2066 |
(s->all_fragments[fragment].coding_method != MODE_COPY)) { |
2067 |
horizontal_filter( |
2068 |
plane_data + s->all_fragments[fragment].first_pixel - 7*stride,
|
2069 |
stride, bounding_values); |
2070 |
} |
2071 |
|
2072 |
/* do not perform top edge filter for top row fragments */
|
2073 |
if ((y > 0) && |
2074 |
(s->all_fragments[fragment].coding_method != MODE_COPY)) { |
2075 |
vertical_filter( |
2076 |
plane_data + s->all_fragments[fragment].first_pixel + stride, |
2077 |
stride, bounding_values); |
2078 |
} |
2079 |
|
2080 |
/* do not perform right edge filter for right column
|
2081 |
* fragments or if right fragment neighbor is also coded
|
2082 |
* in this frame (it will be filtered in next iteration) */
|
2083 |
if ((x < width - 1) && |
2084 |
(s->all_fragments[fragment].coding_method != MODE_COPY) && |
2085 |
(s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
|
2086 |
horizontal_filter( |
2087 |
plane_data + s->all_fragments[fragment + 1].first_pixel - 7*stride, |
2088 |
stride, bounding_values); |
2089 |
} |
2090 |
|
2091 |
/* do not perform bottom edge filter for bottom row
|
2092 |
* fragments or if bottom fragment neighbor is also coded
|
2093 |
* in this frame (it will be filtered in the next row) */
|
2094 |
if ((y < height - 1) && |
2095 |
(s->all_fragments[fragment].coding_method != MODE_COPY) && |
2096 |
(s->all_fragments[fragment + width].coding_method == MODE_COPY)) { |
2097 |
vertical_filter( |
2098 |
plane_data + s->all_fragments[fragment + width].first_pixel + stride, |
2099 |
stride, bounding_values); |
2100 |
} |
2101 |
|
2102 |
fragment++; |
2103 |
STOP_TIMER("loop filter")
|
2104 |
} |
2105 |
} |
2106 |
} |
2107 |
} |
2108 |
|
2109 |
/*
|
2110 |
* This function computes the first pixel addresses for each fragment.
|
2111 |
* This function needs to be invoked after the first frame is allocated
|
2112 |
* so that it has access to the plane strides.
|
2113 |
*/
|
2114 |
static void vp3_calculate_pixel_addresses(Vp3DecodeContext *s) |
2115 |
{ |
2116 |
|
2117 |
int i, x, y;
|
2118 |
|
2119 |
/* figure out the first pixel addresses for each of the fragments */
|
2120 |
/* Y plane */
|
2121 |
i = 0;
|
2122 |
for (y = s->fragment_height; y > 0; y--) { |
2123 |
for (x = 0; x < s->fragment_width; x++) { |
2124 |
s->all_fragments[i++].first_pixel = |
2125 |
s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
|
2126 |
s->golden_frame.linesize[0] +
|
2127 |
x * FRAGMENT_PIXELS; |
2128 |
debug_init(" fragment %d, first pixel @ %d\n",
|
2129 |
i-1, s->all_fragments[i-1].first_pixel); |
2130 |
} |
2131 |
} |
2132 |
|
2133 |
/* U plane */
|
2134 |
i = s->u_fragment_start; |
2135 |
for (y = s->fragment_height / 2; y > 0; y--) { |
2136 |
for (x = 0; x < s->fragment_width / 2; x++) { |
2137 |
s->all_fragments[i++].first_pixel = |
2138 |
s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
|
2139 |
s->golden_frame.linesize[1] +
|
2140 |
x * FRAGMENT_PIXELS; |
2141 |
debug_init(" fragment %d, first pixel @ %d\n",
|
2142 |
i-1, s->all_fragments[i-1].first_pixel); |
2143 |
} |
2144 |
} |
2145 |
|
2146 |
/* V plane */
|
2147 |
i = s->v_fragment_start; |
2148 |
for (y = s->fragment_height / 2; y > 0; y--) { |
2149 |
for (x = 0; x < s->fragment_width / 2; x++) { |
2150 |
s->all_fragments[i++].first_pixel = |
2151 |
s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
|
2152 |
s->golden_frame.linesize[2] +
|
2153 |
x * FRAGMENT_PIXELS; |
2154 |
debug_init(" fragment %d, first pixel @ %d\n",
|
2155 |
i-1, s->all_fragments[i-1].first_pixel); |
2156 |
} |
2157 |
} |
2158 |
} |
2159 |
|
2160 |
/* FIXME: this should be merged with the above! */
|
2161 |
static void theora_calculate_pixel_addresses(Vp3DecodeContext *s) |
2162 |
{ |
2163 |
|
2164 |
int i, x, y;
|
2165 |
|
2166 |
/* figure out the first pixel addresses for each of the fragments */
|
2167 |
/* Y plane */
|
2168 |
i = 0;
|
2169 |
for (y = 1; y <= s->fragment_height; y++) { |
2170 |
for (x = 0; x < s->fragment_width; x++) { |
2171 |
s->all_fragments[i++].first_pixel = |
2172 |
s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
|
2173 |
s->golden_frame.linesize[0] +
|
2174 |
x * FRAGMENT_PIXELS; |
2175 |
debug_init(" fragment %d, first pixel @ %d\n",
|
2176 |
i-1, s->all_fragments[i-1].first_pixel); |
2177 |
} |
2178 |
} |
2179 |
|
2180 |
/* U plane */
|
2181 |
i = s->u_fragment_start; |
2182 |
for (y = 1; y <= s->fragment_height / 2; y++) { |
2183 |
for (x = 0; x < s->fragment_width / 2; x++) { |
2184 |
s->all_fragments[i++].first_pixel = |
2185 |
s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
|
2186 |
s->golden_frame.linesize[1] +
|
2187 |
x * FRAGMENT_PIXELS; |
2188 |
debug_init(" fragment %d, first pixel @ %d\n",
|
2189 |
i-1, s->all_fragments[i-1].first_pixel); |
2190 |
} |
2191 |
} |
2192 |
|
2193 |
/* V plane */
|
2194 |
i = s->v_fragment_start; |
2195 |
for (y = 1; y <= s->fragment_height / 2; y++) { |
2196 |
for (x = 0; x < s->fragment_width / 2; x++) { |
2197 |
s->all_fragments[i++].first_pixel = |
2198 |
s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
|
2199 |
s->golden_frame.linesize[2] +
|
2200 |
x * FRAGMENT_PIXELS; |
2201 |
debug_init(" fragment %d, first pixel @ %d\n",
|
2202 |
i-1, s->all_fragments[i-1].first_pixel); |
2203 |
} |
2204 |
} |
2205 |
} |
2206 |
|
2207 |
/*
|
2208 |
* This is the ffmpeg/libavcodec API init function.
|
2209 |
*/
|
2210 |
static int vp3_decode_init(AVCodecContext *avctx) |
2211 |
{ |
2212 |
Vp3DecodeContext *s = avctx->priv_data; |
2213 |
int i;
|
2214 |
int c_width;
|
2215 |
int c_height;
|
2216 |
int y_superblock_count;
|
2217 |
int c_superblock_count;
|
2218 |
|
2219 |
if (avctx->codec_tag == MKTAG('V','P','3','0')) |
2220 |
s->version = 0;
|
2221 |
else
|
2222 |
s->version = 1;
|
2223 |
|
2224 |
s->avctx = avctx; |
2225 |
s->width = (avctx->width + 15) & 0xFFFFFFF0; |
2226 |
s->height = (avctx->height + 15) & 0xFFFFFFF0; |
2227 |
avctx->pix_fmt = PIX_FMT_YUV420P; |
2228 |
avctx->has_b_frames = 0;
|
2229 |
if(avctx->idct_algo==FF_IDCT_AUTO)
|
2230 |
avctx->idct_algo=FF_IDCT_VP3; |
2231 |
dsputil_init(&s->dsp, avctx); |
2232 |
|
2233 |
ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); |
2234 |
|
2235 |
/* initialize to an impossible value which will force a recalculation
|
2236 |
* in the first frame decode */
|
2237 |
s->quality_index = -1;
|
2238 |
|
2239 |
s->y_superblock_width = (s->width + 31) / 32; |
2240 |
s->y_superblock_height = (s->height + 31) / 32; |
2241 |
y_superblock_count = s->y_superblock_width * s->y_superblock_height; |
2242 |
|
2243 |
/* work out the dimensions for the C planes */
|
2244 |
c_width = s->width / 2;
|
2245 |
c_height = s->height / 2;
|
2246 |
s->c_superblock_width = (c_width + 31) / 32; |
2247 |
s->c_superblock_height = (c_height + 31) / 32; |
2248 |
c_superblock_count = s->c_superblock_width * s->c_superblock_height; |
2249 |
|
2250 |
s->superblock_count = y_superblock_count + (c_superblock_count * 2);
|
2251 |
s->u_superblock_start = y_superblock_count; |
2252 |
s->v_superblock_start = s->u_superblock_start + c_superblock_count; |
2253 |
s->superblock_coding = av_malloc(s->superblock_count); |
2254 |
|
2255 |
s->macroblock_width = (s->width + 15) / 16; |
2256 |
s->macroblock_height = (s->height + 15) / 16; |
2257 |
s->macroblock_count = s->macroblock_width * s->macroblock_height; |
2258 |
|
2259 |
s->fragment_width = s->width / FRAGMENT_PIXELS; |
2260 |
s->fragment_height = s->height / FRAGMENT_PIXELS; |
2261 |
|
2262 |
/* fragment count covers all 8x8 blocks for all 3 planes */
|
2263 |
s->fragment_count = s->fragment_width * s->fragment_height * 3 / 2; |
2264 |
s->u_fragment_start = s->fragment_width * s->fragment_height; |
2265 |
s->v_fragment_start = s->fragment_width * s->fragment_height * 5 / 4; |
2266 |
|
2267 |
debug_init(" Y plane: %d x %d\n", s->width, s->height);
|
2268 |
debug_init(" C plane: %d x %d\n", c_width, c_height);
|
2269 |
debug_init(" Y superblocks: %d x %d, %d total\n",
|
2270 |
s->y_superblock_width, s->y_superblock_height, y_superblock_count); |
2271 |
debug_init(" C superblocks: %d x %d, %d total\n",
|
2272 |
s->c_superblock_width, s->c_superblock_height, c_superblock_count); |
2273 |
debug_init(" total superblocks = %d, U starts @ %d, V starts @ %d\n",
|
2274 |
s->superblock_count, s->u_superblock_start, s->v_superblock_start); |
2275 |
debug_init(" macroblocks: %d x %d, %d total\n",
|
2276 |
s->macroblock_width, s->macroblock_height, s->macroblock_count); |
2277 |
debug_init(" %d fragments, %d x %d, u starts @ %d, v starts @ %d\n",
|
2278 |
s->fragment_count, |
2279 |
s->fragment_width, |
2280 |
s->fragment_height, |
2281 |
s->u_fragment_start, |
2282 |
s->v_fragment_start); |
2283 |
|
2284 |
s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
|
2285 |
s->coeffs = av_malloc(s->fragment_count * sizeof(Coeff) * 65); |
2286 |
s->coded_fragment_list = av_malloc(s->fragment_count * sizeof(int)); |
2287 |
s->pixel_addresses_inited = 0;
|
2288 |
|
2289 |
if (!s->theora_tables)
|
2290 |
{ |
2291 |
for (i = 0; i < 64; i++) |
2292 |
s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i]; |
2293 |
for (i = 0; i < 64; i++) |
2294 |
s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i]; |
2295 |
for (i = 0; i < 64; i++) |
2296 |
s->coded_intra_y_dequant[i] = vp31_intra_y_dequant[i]; |
2297 |
for (i = 0; i < 64; i++) |
2298 |
s->coded_intra_c_dequant[i] = vp31_intra_c_dequant[i]; |
2299 |
for (i = 0; i < 64; i++) |
2300 |
s->coded_inter_dequant[i] = vp31_inter_dequant[i]; |
2301 |
for (i = 0; i < 64; i++) |
2302 |
s->filter_limit_values[i] = vp31_filter_limit_values[i]; |
2303 |
|
2304 |
/* init VLC tables */
|
2305 |
for (i = 0; i < 16; i++) { |
2306 |
|
2307 |
/* DC histograms */
|
2308 |
init_vlc(&s->dc_vlc[i], 5, 32, |
2309 |
&dc_bias[i][0][1], 4, 2, |
2310 |
&dc_bias[i][0][0], 4, 2, 0); |
2311 |
|
2312 |
/* group 1 AC histograms */
|
2313 |
init_vlc(&s->ac_vlc_1[i], 5, 32, |
2314 |
&ac_bias_0[i][0][1], 4, 2, |
2315 |
&ac_bias_0[i][0][0], 4, 2, 0); |
2316 |
|
2317 |
/* group 2 AC histograms */
|
2318 |
init_vlc(&s->ac_vlc_2[i], 5, 32, |
2319 |
&ac_bias_1[i][0][1], 4, 2, |
2320 |
&ac_bias_1[i][0][0], 4, 2, 0); |
2321 |
|
2322 |
/* group 3 AC histograms */
|
2323 |
init_vlc(&s->ac_vlc_3[i], 5, 32, |
2324 |
&ac_bias_2[i][0][1], 4, 2, |
2325 |
&ac_bias_2[i][0][0], 4, 2, 0); |
2326 |
|
2327 |
/* group 4 AC histograms */
|
2328 |
init_vlc(&s->ac_vlc_4[i], 5, 32, |
2329 |
&ac_bias_3[i][0][1], 4, 2, |
2330 |
&ac_bias_3[i][0][0], 4, 2, 0); |
2331 |
} |
2332 |
} else {
|
2333 |
for (i = 0; i < 16; i++) { |
2334 |
|
2335 |
/* DC histograms */
|
2336 |
init_vlc(&s->dc_vlc[i], 5, 32, |
2337 |
&s->huffman_table[i][0][1], 4, 2, |
2338 |
&s->huffman_table[i][0][0], 4, 2, 0); |
2339 |
|
2340 |
/* group 1 AC histograms */
|
2341 |
init_vlc(&s->ac_vlc_1[i], 5, 32, |
2342 |
&s->huffman_table[i+16][0][1], 4, 2, |
2343 |
&s->huffman_table[i+16][0][0], 4, 2, 0); |
2344 |
|
2345 |
/* group 2 AC histograms */
|
2346 |
init_vlc(&s->ac_vlc_2[i], 5, 32, |
2347 |
&s->huffman_table[i+16*2][0][1], 4, 2, |
2348 |
&s->huffman_table[i+16*2][0][0], 4, 2, 0); |
2349 |
|
2350 |
/* group 3 AC histograms */
|
2351 |
init_vlc(&s->ac_vlc_3[i], 5, 32, |
2352 |
&s->huffman_table[i+16*3][0][1], 4, 2, |
2353 |
&s->huffman_table[i+16*3][0][0], 4, 2, 0); |
2354 |
|
2355 |
/* group 4 AC histograms */
|
2356 |
init_vlc(&s->ac_vlc_4[i], 5, 32, |
2357 |
&s->huffman_table[i+16*4][0][1], 4, 2, |
2358 |
&s->huffman_table[i+16*4][0][0], 4, 2, 0); |
2359 |
} |
2360 |
} |
2361 |
|
2362 |
init_vlc(&s->superblock_run_length_vlc, 6, 34, |
2363 |
&superblock_run_length_vlc_table[0][1], 4, 2, |
2364 |
&superblock_run_length_vlc_table[0][0], 4, 2, 0); |
2365 |
|
2366 |
init_vlc(&s->fragment_run_length_vlc, 5, 30, |
2367 |
&fragment_run_length_vlc_table[0][1], 4, 2, |
2368 |
&fragment_run_length_vlc_table[0][0], 4, 2, 0); |
2369 |
|
2370 |
init_vlc(&s->mode_code_vlc, 3, 8, |
2371 |
&mode_code_vlc_table[0][1], 2, 1, |
2372 |
&mode_code_vlc_table[0][0], 2, 1, 0); |
2373 |
|
2374 |
init_vlc(&s->motion_vector_vlc, 6, 63, |
2375 |
&motion_vector_vlc_table[0][1], 2, 1, |
2376 |
&motion_vector_vlc_table[0][0], 2, 1, 0); |
2377 |
|
2378 |
/* work out the block mapping tables */
|
2379 |
s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int)); |
2380 |
s->superblock_macroblocks = av_malloc(s->superblock_count * 4 * sizeof(int)); |
2381 |
s->macroblock_fragments = av_malloc(s->macroblock_count * 6 * sizeof(int)); |
2382 |
s->macroblock_coding = av_malloc(s->macroblock_count + 1);
|
2383 |
init_block_mapping(s); |
2384 |
|
2385 |
for (i = 0; i < 3; i++) { |
2386 |
s->current_frame.data[i] = NULL;
|
2387 |
s->last_frame.data[i] = NULL;
|
2388 |
s->golden_frame.data[i] = NULL;
|
2389 |
} |
2390 |
|
2391 |
return 0; |
2392 |
} |
2393 |
|
2394 |
/*
|
2395 |
* This is the ffmpeg/libavcodec API frame decode function.
|
2396 |
*/
|
2397 |
static int vp3_decode_frame(AVCodecContext *avctx, |
2398 |
void *data, int *data_size, |
2399 |
uint8_t *buf, int buf_size)
|
2400 |
{ |
2401 |
Vp3DecodeContext *s = avctx->priv_data; |
2402 |
GetBitContext gb; |
2403 |
static int counter = 0; |
2404 |
int i;
|
2405 |
|
2406 |
init_get_bits(&gb, buf, buf_size * 8);
|
2407 |
|
2408 |
if (s->theora && get_bits1(&gb))
|
2409 |
{ |
2410 |
#if 1 |
2411 |
av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
|
2412 |
return -1; |
2413 |
#else
|
2414 |
int ptype = get_bits(&gb, 7); |
2415 |
|
2416 |
skip_bits(&gb, 6*8); /* "theora" */ |
2417 |
|
2418 |
switch(ptype)
|
2419 |
{ |
2420 |
case 1: |
2421 |
theora_decode_comments(avctx, gb); |
2422 |
break;
|
2423 |
case 2: |
2424 |
theora_decode_tables(avctx, gb); |
2425 |
init_dequantizer(s); |
2426 |
break;
|
2427 |
default:
|
2428 |
av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype);
|
2429 |
} |
2430 |
return buf_size;
|
2431 |
#endif
|
2432 |
} |
2433 |
|
2434 |
s->keyframe = !get_bits1(&gb); |
2435 |
if (!s->theora)
|
2436 |
skip_bits(&gb, 1);
|
2437 |
s->last_quality_index = s->quality_index; |
2438 |
s->quality_index = get_bits(&gb, 6);
|
2439 |
if (s->theora >= 0x030200) |
2440 |
skip_bits1(&gb); |
2441 |
|
2442 |
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
2443 |
av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
|
2444 |
s->keyframe?"key":"", counter, s->quality_index); |
2445 |
counter++; |
2446 |
|
2447 |
if (s->quality_index != s->last_quality_index) {
|
2448 |
init_dequantizer(s); |
2449 |
init_loop_filter(s); |
2450 |
} |
2451 |
|
2452 |
if (s->keyframe) {
|
2453 |
if (!s->theora)
|
2454 |
{ |
2455 |
skip_bits(&gb, 4); /* width code */ |
2456 |
skip_bits(&gb, 4); /* height code */ |
2457 |
if (s->version)
|
2458 |
{ |
2459 |
s->version = get_bits(&gb, 5);
|
2460 |
if (counter == 1) |
2461 |
av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
|
2462 |
} |
2463 |
} |
2464 |
if (s->version || s->theora)
|
2465 |
{ |
2466 |
if (get_bits1(&gb))
|
2467 |
av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
|
2468 |
skip_bits(&gb, 2); /* reserved? */ |
2469 |
} |
2470 |
|
2471 |
if (s->last_frame.data[0] == s->golden_frame.data[0]) { |
2472 |
if (s->golden_frame.data[0]) |
2473 |
avctx->release_buffer(avctx, &s->golden_frame); |
2474 |
s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */
|
2475 |
} else {
|
2476 |
if (s->golden_frame.data[0]) |
2477 |
avctx->release_buffer(avctx, &s->golden_frame); |
2478 |
if (s->last_frame.data[0]) |
2479 |
avctx->release_buffer(avctx, &s->last_frame); |
2480 |
} |
2481 |
|
2482 |
s->golden_frame.reference = 3;
|
2483 |
if(avctx->get_buffer(avctx, &s->golden_frame) < 0) { |
2484 |
av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
|
2485 |
return -1; |
2486 |
} |
2487 |
|
2488 |
/* golden frame is also the current frame */
|
2489 |
memcpy(&s->current_frame, &s->golden_frame, sizeof(AVFrame));
|
2490 |
|
2491 |
/* time to figure out pixel addresses? */
|
2492 |
if (!s->pixel_addresses_inited)
|
2493 |
{ |
2494 |
if (!s->flipped_image)
|
2495 |
vp3_calculate_pixel_addresses(s); |
2496 |
else
|
2497 |
theora_calculate_pixel_addresses(s); |
2498 |
} |
2499 |
} else {
|
2500 |
/* allocate a new current frame */
|
2501 |
s->current_frame.reference = 3;
|
2502 |
if(avctx->get_buffer(avctx, &s->current_frame) < 0) { |
2503 |
av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
|
2504 |
return -1; |
2505 |
} |
2506 |
} |
2507 |
|
2508 |
s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame
|
2509 |
s->current_frame.qstride= 0;
|
2510 |
|
2511 |
{START_TIMER |
2512 |
init_frame(s, &gb); |
2513 |
STOP_TIMER("init_frame")}
|
2514 |
|
2515 |
#if KEYFRAMES_ONLY
|
2516 |
if (!s->keyframe) {
|
2517 |
|
2518 |
memcpy(s->current_frame.data[0], s->golden_frame.data[0], |
2519 |
s->current_frame.linesize[0] * s->height);
|
2520 |
memcpy(s->current_frame.data[1], s->golden_frame.data[1], |
2521 |
s->current_frame.linesize[1] * s->height / 2); |
2522 |
memcpy(s->current_frame.data[2], s->golden_frame.data[2], |
2523 |
s->current_frame.linesize[2] * s->height / 2); |
2524 |
|
2525 |
} else {
|
2526 |
#endif
|
2527 |
|
2528 |
{START_TIMER |
2529 |
if (unpack_superblocks(s, &gb)){
|
2530 |
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
|
2531 |
return -1; |
2532 |
} |
2533 |
STOP_TIMER("unpack_superblocks")}
|
2534 |
{START_TIMER |
2535 |
if (unpack_modes(s, &gb)){
|
2536 |
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
|
2537 |
return -1; |
2538 |
} |
2539 |
STOP_TIMER("unpack_modes")}
|
2540 |
{START_TIMER |
2541 |
if (unpack_vectors(s, &gb)){
|
2542 |
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
|
2543 |
return -1; |
2544 |
} |
2545 |
STOP_TIMER("unpack_vectors")}
|
2546 |
{START_TIMER |
2547 |
if (unpack_dct_coeffs(s, &gb)){
|
2548 |
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
|
2549 |
return -1; |
2550 |
} |
2551 |
STOP_TIMER("unpack_dct_coeffs")}
|
2552 |
{START_TIMER |
2553 |
|
2554 |
reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
|
2555 |
if ((avctx->flags & CODEC_FLAG_GRAY) == 0) { |
2556 |
reverse_dc_prediction(s, s->u_fragment_start, |
2557 |
s->fragment_width / 2, s->fragment_height / 2); |
2558 |
reverse_dc_prediction(s, s->v_fragment_start, |
2559 |
s->fragment_width / 2, s->fragment_height / 2); |
2560 |
} |
2561 |
STOP_TIMER("reverse_dc_prediction")}
|
2562 |
{START_TIMER |
2563 |
|
2564 |
for (i = 0; i < s->macroblock_height; i++) |
2565 |
render_slice(s, i); |
2566 |
STOP_TIMER("render_fragments")}
|
2567 |
|
2568 |
{START_TIMER |
2569 |
apply_loop_filter(s); |
2570 |
STOP_TIMER("apply_loop_filter")}
|
2571 |
#if KEYFRAMES_ONLY
|
2572 |
} |
2573 |
#endif
|
2574 |
|
2575 |
*data_size=sizeof(AVFrame);
|
2576 |
*(AVFrame*)data= s->current_frame; |
2577 |
|
2578 |
/* release the last frame, if it is allocated and if it is not the
|
2579 |
* golden frame */
|
2580 |
if ((s->last_frame.data[0]) && |
2581 |
(s->last_frame.data[0] != s->golden_frame.data[0])) |
2582 |
avctx->release_buffer(avctx, &s->last_frame); |
2583 |
|
2584 |
/* shuffle frames (last = current) */
|
2585 |
memcpy(&s->last_frame, &s->current_frame, sizeof(AVFrame));
|
2586 |
s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ |
2587 |
|
2588 |
return buf_size;
|
2589 |
} |
2590 |
|
2591 |
/*
|
2592 |
* This is the ffmpeg/libavcodec API module cleanup function.
|
2593 |
*/
|
2594 |
static int vp3_decode_end(AVCodecContext *avctx) |
2595 |
{ |
2596 |
Vp3DecodeContext *s = avctx->priv_data; |
2597 |
|
2598 |
av_free(s->all_fragments); |
2599 |
av_free(s->coeffs); |
2600 |
av_free(s->coded_fragment_list); |
2601 |
av_free(s->superblock_fragments); |
2602 |
av_free(s->superblock_macroblocks); |
2603 |
av_free(s->macroblock_fragments); |
2604 |
av_free(s->macroblock_coding); |
2605 |
|
2606 |
/* release all frames */
|
2607 |
if (s->golden_frame.data[0] && s->golden_frame.data[0] != s->last_frame.data[0]) |
2608 |
avctx->release_buffer(avctx, &s->golden_frame); |
2609 |
if (s->last_frame.data[0]) |
2610 |
avctx->release_buffer(avctx, &s->last_frame); |
2611 |
/* no need to release the current_frame since it will always be pointing
|
2612 |
* to the same frame as either the golden or last frame */
|
2613 |
|
2614 |
return 0; |
2615 |
} |
2616 |
|
2617 |
static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) |
2618 |
{ |
2619 |
Vp3DecodeContext *s = avctx->priv_data; |
2620 |
|
2621 |
if (get_bits(gb, 1)) { |
2622 |
int token;
|
2623 |
if (s->entries >= 32) { /* overflow */ |
2624 |
av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
|
2625 |
return -1; |
2626 |
} |
2627 |
token = get_bits(gb, 5);
|
2628 |
//av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size);
|
2629 |
s->huffman_table[s->hti][token][0] = s->hbits;
|
2630 |
s->huffman_table[s->hti][token][1] = s->huff_code_size;
|
2631 |
s->entries++; |
2632 |
} |
2633 |
else {
|
2634 |
if (s->huff_code_size >= 32) {/* overflow */ |
2635 |
av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
|
2636 |
return -1; |
2637 |
} |
2638 |
s->huff_code_size++; |
2639 |
s->hbits <<= 1;
|
2640 |
read_huffman_tree(avctx, gb); |
2641 |
s->hbits |= 1;
|
2642 |
read_huffman_tree(avctx, gb); |
2643 |
s->hbits >>= 1;
|
2644 |
s->huff_code_size--; |
2645 |
} |
2646 |
return 0; |
2647 |
} |
2648 |
|
2649 |
static int theora_decode_header(AVCodecContext *avctx, GetBitContext gb) |
2650 |
{ |
2651 |
Vp3DecodeContext *s = avctx->priv_data; |
2652 |
int major, minor, micro;
|
2653 |
|
2654 |
major = get_bits(&gb, 8); /* version major */ |
2655 |
minor = get_bits(&gb, 8); /* version minor */ |
2656 |
micro = get_bits(&gb, 8); /* version micro */ |
2657 |
av_log(avctx, AV_LOG_INFO, "Theora bitstream version %d.%d.%d\n",
|
2658 |
major, minor, micro); |
2659 |
|
2660 |
/* FIXME: endianess? */
|
2661 |
s->theora = (major << 16) | (minor << 8) | micro; |
2662 |
|
2663 |
/* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
|
2664 |
/* but previous versions have the image flipped relative to vp3 */
|
2665 |
if (s->theora < 0x030200) |
2666 |
{ |
2667 |
s->flipped_image = 1;
|
2668 |
av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
|
2669 |
} |
2670 |
|
2671 |
s->width = get_bits(&gb, 16) << 4; |
2672 |
s->height = get_bits(&gb, 16) << 4; |
2673 |
|
2674 |
if(avcodec_check_dimensions(avctx, s->width, s->height)){
|
2675 |
av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height);
|
2676 |
s->width= s->height= 0;
|
2677 |
return -1; |
2678 |
} |
2679 |
|
2680 |
if (s->theora >= 0x030400) |
2681 |
{ |
2682 |
skip_bits(&gb, 32); /* total number of superblocks in a frame */ |
2683 |
// fixme, the next field is 36bits long
|
2684 |
skip_bits(&gb, 32); /* total number of blocks in a frame */ |
2685 |
skip_bits(&gb, 4); /* total number of blocks in a frame */ |
2686 |
skip_bits(&gb, 32); /* total number of macroblocks in a frame */ |
2687 |
|
2688 |
skip_bits(&gb, 24); /* frame width */ |
2689 |
skip_bits(&gb, 24); /* frame height */ |
2690 |
} |
2691 |
else
|
2692 |
{ |
2693 |
skip_bits(&gb, 24); /* frame width */ |
2694 |
skip_bits(&gb, 24); /* frame height */ |
2695 |
} |
2696 |
|
2697 |
skip_bits(&gb, 8); /* offset x */ |
2698 |
skip_bits(&gb, 8); /* offset y */ |
2699 |
|
2700 |
skip_bits(&gb, 32); /* fps numerator */ |
2701 |
skip_bits(&gb, 32); /* fps denumerator */ |
2702 |
skip_bits(&gb, 24); /* aspect numerator */ |
2703 |
skip_bits(&gb, 24); /* aspect denumerator */ |
2704 |
|
2705 |
if (s->theora < 0x030200) |
2706 |
skip_bits(&gb, 5); /* keyframe frequency force */ |
2707 |
skip_bits(&gb, 8); /* colorspace */ |
2708 |
if (s->theora >= 0x030400) |
2709 |
skip_bits(&gb, 2); /* pixel format: 420,res,422,444 */ |
2710 |
skip_bits(&gb, 24); /* bitrate */ |
2711 |
|
2712 |
skip_bits(&gb, 6); /* quality hint */ |
2713 |
|
2714 |
if (s->theora >= 0x030200) |
2715 |
{ |
2716 |
skip_bits(&gb, 5); /* keyframe frequency force */ |
2717 |
|
2718 |
if (s->theora < 0x030400) |
2719 |
skip_bits(&gb, 5); /* spare bits */ |
2720 |
} |
2721 |
|
2722 |
// align_get_bits(&gb);
|
2723 |
|
2724 |
avctx->width = s->width; |
2725 |
avctx->height = s->height; |
2726 |
|
2727 |
return 0; |
2728 |
} |
2729 |
|
2730 |
static inline int theora_get_32bit(GetBitContext gb) |
2731 |
{ |
2732 |
int ret = get_bits(&gb, 8); |
2733 |
ret += get_bits(&gb, 8) << 8; |
2734 |
ret += get_bits(&gb, 8) << 16; |
2735 |
ret += get_bits(&gb, 8) << 24; |
2736 |
|
2737 |
return ret;
|
2738 |
} |
2739 |
|
2740 |
static int theora_decode_comments(AVCodecContext *avctx, GetBitContext gb) |
2741 |
{ |
2742 |
Vp3DecodeContext *s = avctx->priv_data; |
2743 |
int len;
|
2744 |
|
2745 |
if (s->theora <= 0x030200) |
2746 |
{ |
2747 |
int i, comments;
|
2748 |
|
2749 |
// vendor string
|
2750 |
len = get_bits_long(&gb, 32);
|
2751 |
len = le2me_32(len); |
2752 |
while(len--)
|
2753 |
skip_bits(&gb, 8);
|
2754 |
|
2755 |
// user comments
|
2756 |
comments = get_bits_long(&gb, 32);
|
2757 |
comments = le2me_32(comments); |
2758 |
for (i = 0; i < comments; i++) |
2759 |
{ |
2760 |
len = get_bits_long(&gb, 32);
|
2761 |
len = be2me_32(len); |
2762 |
while(len--)
|
2763 |
skip_bits(&gb, 8);
|
2764 |
} |
2765 |
} |
2766 |
else
|
2767 |
{ |
2768 |
do {
|
2769 |
len = get_bits_long(&gb, 32);
|
2770 |
len = le2me_32(len); |
2771 |
if (len <= 0) |
2772 |
break;
|
2773 |
while (len--)
|
2774 |
skip_bits(&gb, 8);
|
2775 |
} while (1); |
2776 |
} |
2777 |
return 0; |
2778 |
} |
2779 |
|
2780 |
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext gb) |
2781 |
{ |
2782 |
Vp3DecodeContext *s = avctx->priv_data; |
2783 |
int i, n, matrices;
|
2784 |
|
2785 |
if (s->theora >= 0x030200) { |
2786 |
n = get_bits(&gb, 3);
|
2787 |
/* loop filter limit values table */
|
2788 |
for (i = 0; i < 64; i++) |
2789 |
s->filter_limit_values[i] = get_bits(&gb, n); |
2790 |
} |
2791 |
|
2792 |
if (s->theora >= 0x030200) |
2793 |
n = get_bits(&gb, 4) + 1; |
2794 |
else
|
2795 |
n = 16;
|
2796 |
/* quality threshold table */
|
2797 |
for (i = 0; i < 64; i++) |
2798 |
s->coded_ac_scale_factor[i] = get_bits(&gb, n); |
2799 |
|
2800 |
if (s->theora >= 0x030200) |
2801 |
n = get_bits(&gb, 4) + 1; |
2802 |
else
|
2803 |
n = 16;
|
2804 |
/* dc scale factor table */
|
2805 |
for (i = 0; i < 64; i++) |
2806 |
s->coded_dc_scale_factor[i] = get_bits(&gb, n); |
2807 |
|
2808 |
if (s->theora >= 0x030200) |
2809 |
matrices = get_bits(&gb, 9) + 1; |
2810 |
else
|
2811 |
matrices = 3;
|
2812 |
if (matrices != 3) { |
2813 |
av_log(avctx,AV_LOG_ERROR, "unsupported matrices: %d\n", matrices);
|
2814 |
// return -1;
|
2815 |
} |
2816 |
/* y coeffs */
|
2817 |
for (i = 0; i < 64; i++) |
2818 |
s->coded_intra_y_dequant[i] = get_bits(&gb, 8);
|
2819 |
|
2820 |
/* uv coeffs */
|
2821 |
for (i = 0; i < 64; i++) |
2822 |
s->coded_intra_c_dequant[i] = get_bits(&gb, 8);
|
2823 |
|
2824 |
/* inter coeffs */
|
2825 |
for (i = 0; i < 64; i++) |
2826 |
s->coded_inter_dequant[i] = get_bits(&gb, 8);
|
2827 |
|
2828 |
/* skip unknown matrices */
|
2829 |
n = matrices - 3;
|
2830 |
while(n--)
|
2831 |
for (i = 0; i < 64; i++) |
2832 |
skip_bits(&gb, 8);
|
2833 |
|
2834 |
for (i = 0; i <= 1; i++) { |
2835 |
for (n = 0; n <= 2; n++) { |
2836 |
int newqr;
|
2837 |
if (i > 0 || n > 0) |
2838 |
newqr = get_bits(&gb, 1);
|
2839 |
else
|
2840 |
newqr = 1;
|
2841 |
if (!newqr) {
|
2842 |
if (i > 0) |
2843 |
get_bits(&gb, 1);
|
2844 |
} |
2845 |
else {
|
2846 |
int qi = 0; |
2847 |
skip_bits(&gb, av_log2(matrices-1)+1); |
2848 |
while (qi < 63) { |
2849 |
qi += get_bits(&gb, av_log2(63-qi)+1) + 1; |
2850 |
skip_bits(&gb, av_log2(matrices-1)+1); |
2851 |
} |
2852 |
if (qi > 63) { |
2853 |
av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
|
2854 |
return -1; |
2855 |
} |
2856 |
} |
2857 |
} |
2858 |
} |
2859 |
|
2860 |
/* Huffman tables */
|
2861 |
for (s->hti = 0; s->hti < 80; s->hti++) { |
2862 |
s->entries = 0;
|
2863 |
s->huff_code_size = 1;
|
2864 |
if (!get_bits(&gb, 1)) { |
2865 |
s->hbits = 0;
|
2866 |
read_huffman_tree(avctx, &gb); |
2867 |
s->hbits = 1;
|
2868 |
read_huffman_tree(avctx, &gb); |
2869 |
} |
2870 |
} |
2871 |
|
2872 |
s->theora_tables = 1;
|
2873 |
|
2874 |
return 0; |
2875 |
} |
2876 |
|
2877 |
static int theora_decode_init(AVCodecContext *avctx) |
2878 |
{ |
2879 |
Vp3DecodeContext *s = avctx->priv_data; |
2880 |
GetBitContext gb; |
2881 |
int ptype;
|
2882 |
uint8_t *p= avctx->extradata; |
2883 |
int op_bytes, i;
|
2884 |
|
2885 |
s->theora = 1;
|
2886 |
|
2887 |
if (!avctx->extradata_size)
|
2888 |
{ |
2889 |
av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
|
2890 |
return -1; |
2891 |
} |
2892 |
|
2893 |
for(i=0;i<3;i++) { |
2894 |
op_bytes = *(p++)<<8;
|
2895 |
op_bytes += *(p++); |
2896 |
|
2897 |
init_get_bits(&gb, p, op_bytes); |
2898 |
p += op_bytes; |
2899 |
|
2900 |
ptype = get_bits(&gb, 8);
|
2901 |
debug_vp3("Theora headerpacket type: %x\n", ptype);
|
2902 |
|
2903 |
if (!(ptype & 0x80)) |
2904 |
{ |
2905 |
av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
|
2906 |
return -1; |
2907 |
} |
2908 |
|
2909 |
// FIXME: check for this aswell
|
2910 |
skip_bits(&gb, 6*8); /* "theora" */ |
2911 |
|
2912 |
switch(ptype)
|
2913 |
{ |
2914 |
case 0x80: |
2915 |
theora_decode_header(avctx, gb); |
2916 |
break;
|
2917 |
case 0x81: |
2918 |
// FIXME: is this needed? it breaks sometimes
|
2919 |
// theora_decode_comments(avctx, gb);
|
2920 |
break;
|
2921 |
case 0x82: |
2922 |
theora_decode_tables(avctx, gb); |
2923 |
break;
|
2924 |
default:
|
2925 |
av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80); |
2926 |
break;
|
2927 |
} |
2928 |
} |
2929 |
|
2930 |
vp3_decode_init(avctx); |
2931 |
return 0; |
2932 |
} |
2933 |
|
2934 |
AVCodec vp3_decoder = { |
2935 |
"vp3",
|
2936 |
CODEC_TYPE_VIDEO, |
2937 |
CODEC_ID_VP3, |
2938 |
sizeof(Vp3DecodeContext),
|
2939 |
vp3_decode_init, |
2940 |
NULL,
|
2941 |
vp3_decode_end, |
2942 |
vp3_decode_frame, |
2943 |
0,
|
2944 |
NULL
|
2945 |
}; |
2946 |
|
2947 |
#ifndef CONFIG_LIBTHEORA
|
2948 |
AVCodec theora_decoder = { |
2949 |
"theora",
|
2950 |
CODEC_TYPE_VIDEO, |
2951 |
CODEC_ID_THEORA, |
2952 |
sizeof(Vp3DecodeContext),
|
2953 |
theora_decode_init, |
2954 |
NULL,
|
2955 |
vp3_decode_end, |
2956 |
vp3_decode_frame, |
2957 |
0,
|
2958 |
NULL
|
2959 |
}; |
2960 |
#endif
|