ffmpeg / libavcodec / vp3.c @ d23e3e5f
History | View | Annotate | Download (77.2 KB)
1 |
/*
|
---|---|
2 |
* Copyright (C) 2003-2004 the ffmpeg project
|
3 |
*
|
4 |
* This file is part of FFmpeg.
|
5 |
*
|
6 |
* FFmpeg is free software; you can redistribute it and/or
|
7 |
* modify it under the terms of the GNU Lesser General Public
|
8 |
* License as published by the Free Software Foundation; either
|
9 |
* version 2.1 of the License, or (at your option) any later version.
|
10 |
*
|
11 |
* FFmpeg is distributed in the hope that it will be useful,
|
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
* Lesser General Public License for more details.
|
15 |
*
|
16 |
* You should have received a copy of the GNU Lesser General Public
|
17 |
* License along with FFmpeg; if not, write to the Free Software
|
18 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
19 |
*/
|
20 |
|
21 |
/**
|
22 |
* @file
|
23 |
* On2 VP3 Video Decoder
|
24 |
*
|
25 |
* VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
|
26 |
* For more information about the VP3 coding process, visit:
|
27 |
* http://wiki.multimedia.cx/index.php?title=On2_VP3
|
28 |
*
|
29 |
* Theora decoder by Alex Beregszaszi
|
30 |
*/
|
31 |
|
32 |
#include <stdio.h> |
33 |
#include <stdlib.h> |
34 |
#include <string.h> |
35 |
|
36 |
#include "libavcore/imgutils.h" |
37 |
#include "avcodec.h" |
38 |
#include "dsputil.h" |
39 |
#include "get_bits.h" |
40 |
|
41 |
#include "vp3data.h" |
42 |
#include "xiph.h" |
43 |
|
44 |
#define FRAGMENT_PIXELS 8 |
45 |
|
46 |
static av_cold int vp3_decode_end(AVCodecContext *avctx); |
47 |
|
48 |
//FIXME split things out into their own arrays
|
49 |
typedef struct Vp3Fragment { |
50 |
int16_t dc; |
51 |
uint8_t coding_method; |
52 |
uint8_t qpi; |
53 |
} Vp3Fragment; |
54 |
|
55 |
#define SB_NOT_CODED 0 |
56 |
#define SB_PARTIALLY_CODED 1 |
57 |
#define SB_FULLY_CODED 2 |
58 |
|
59 |
// This is the maximum length of a single long bit run that can be encoded
|
60 |
// for superblock coding or block qps. Theora special-cases this to read a
|
61 |
// bit instead of flipping the current bit to allow for runs longer than 4129.
|
62 |
#define MAXIMUM_LONG_BIT_RUN 4129 |
63 |
|
64 |
#define MODE_INTER_NO_MV 0 |
65 |
#define MODE_INTRA 1 |
66 |
#define MODE_INTER_PLUS_MV 2 |
67 |
#define MODE_INTER_LAST_MV 3 |
68 |
#define MODE_INTER_PRIOR_LAST 4 |
69 |
#define MODE_USING_GOLDEN 5 |
70 |
#define MODE_GOLDEN_MV 6 |
71 |
#define MODE_INTER_FOURMV 7 |
72 |
#define CODING_MODE_COUNT 8 |
73 |
|
74 |
/* special internal mode */
|
75 |
#define MODE_COPY 8 |
76 |
|
77 |
/* There are 6 preset schemes, plus a free-form scheme */
|
78 |
static const int ModeAlphabet[6][CODING_MODE_COUNT] = |
79 |
{ |
80 |
/* scheme 1: Last motion vector dominates */
|
81 |
{ MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, |
82 |
MODE_INTER_PLUS_MV, MODE_INTER_NO_MV, |
83 |
MODE_INTRA, MODE_USING_GOLDEN, |
84 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
85 |
|
86 |
/* scheme 2 */
|
87 |
{ MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, |
88 |
MODE_INTER_NO_MV, MODE_INTER_PLUS_MV, |
89 |
MODE_INTRA, MODE_USING_GOLDEN, |
90 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
91 |
|
92 |
/* scheme 3 */
|
93 |
{ MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, |
94 |
MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV, |
95 |
MODE_INTRA, MODE_USING_GOLDEN, |
96 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
97 |
|
98 |
/* scheme 4 */
|
99 |
{ MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, |
100 |
MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST, |
101 |
MODE_INTRA, MODE_USING_GOLDEN, |
102 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
103 |
|
104 |
/* scheme 5: No motion vector dominates */
|
105 |
{ MODE_INTER_NO_MV, MODE_INTER_LAST_MV, |
106 |
MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV, |
107 |
MODE_INTRA, MODE_USING_GOLDEN, |
108 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
109 |
|
110 |
/* scheme 6 */
|
111 |
{ MODE_INTER_NO_MV, MODE_USING_GOLDEN, |
112 |
MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, |
113 |
MODE_INTER_PLUS_MV, MODE_INTRA, |
114 |
MODE_GOLDEN_MV, MODE_INTER_FOURMV }, |
115 |
|
116 |
}; |
117 |
|
118 |
static const uint8_t hilbert_offset[16][2] = { |
119 |
{0,0}, {1,0}, {1,1}, {0,1}, |
120 |
{0,2}, {0,3}, {1,3}, {1,2}, |
121 |
{2,2}, {2,3}, {3,3}, {3,2}, |
122 |
{3,1}, {2,1}, {2,0}, {3,0} |
123 |
}; |
124 |
|
125 |
#define MIN_DEQUANT_VAL 2 |
126 |
|
127 |
typedef struct Vp3DecodeContext { |
128 |
AVCodecContext *avctx; |
129 |
int theora, theora_tables;
|
130 |
int version;
|
131 |
int width, height;
|
132 |
int chroma_x_shift, chroma_y_shift;
|
133 |
AVFrame golden_frame; |
134 |
AVFrame last_frame; |
135 |
AVFrame current_frame; |
136 |
int keyframe;
|
137 |
DSPContext dsp; |
138 |
int flipped_image;
|
139 |
int last_slice_end;
|
140 |
int skip_loop_filter;
|
141 |
|
142 |
int qps[3]; |
143 |
int nqps;
|
144 |
int last_qps[3]; |
145 |
|
146 |
int superblock_count;
|
147 |
int y_superblock_width;
|
148 |
int y_superblock_height;
|
149 |
int y_superblock_count;
|
150 |
int c_superblock_width;
|
151 |
int c_superblock_height;
|
152 |
int c_superblock_count;
|
153 |
int u_superblock_start;
|
154 |
int v_superblock_start;
|
155 |
unsigned char *superblock_coding; |
156 |
|
157 |
int macroblock_count;
|
158 |
int macroblock_width;
|
159 |
int macroblock_height;
|
160 |
|
161 |
int fragment_count;
|
162 |
int fragment_width[2]; |
163 |
int fragment_height[2]; |
164 |
|
165 |
Vp3Fragment *all_fragments; |
166 |
int fragment_start[3]; |
167 |
int data_offset[3]; |
168 |
|
169 |
int8_t (*motion_val[2])[2]; |
170 |
|
171 |
ScanTable scantable; |
172 |
|
173 |
/* tables */
|
174 |
uint16_t coded_dc_scale_factor[64];
|
175 |
uint32_t coded_ac_scale_factor[64];
|
176 |
uint8_t base_matrix[384][64]; |
177 |
uint8_t qr_count[2][3]; |
178 |
uint8_t qr_size [2][3][64]; |
179 |
uint16_t qr_base[2][3][64]; |
180 |
|
181 |
/**
|
182 |
* This is a list of all tokens in bitstream order. Reordering takes place
|
183 |
* by pulling from each level during IDCT. As a consequence, IDCT must be
|
184 |
* in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
|
185 |
* otherwise. The 32 different tokens with up to 12 bits of extradata are
|
186 |
* collapsed into 3 types, packed as follows:
|
187 |
* (from the low to high bits)
|
188 |
*
|
189 |
* 2 bits: type (0,1,2)
|
190 |
* 0: EOB run, 14 bits for run length (12 needed)
|
191 |
* 1: zero run, 7 bits for run length
|
192 |
* 7 bits for the next coefficient (3 needed)
|
193 |
* 2: coefficient, 14 bits (11 needed)
|
194 |
*
|
195 |
* Coefficients are signed, so are packed in the highest bits for automatic
|
196 |
* sign extension.
|
197 |
*/
|
198 |
int16_t *dct_tokens[3][64]; |
199 |
int16_t *dct_tokens_base; |
200 |
#define TOKEN_EOB(eob_run) ((eob_run) << 2) |
201 |
#define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) << 9) + ((zero_run) << 2) + 1) |
202 |
#define TOKEN_COEFF(coeff) (((coeff) << 2) + 2) |
203 |
|
204 |
/**
|
205 |
* number of blocks that contain DCT coefficients at the given level or higher
|
206 |
*/
|
207 |
int num_coded_frags[3][64]; |
208 |
int total_num_coded_frags;
|
209 |
|
210 |
/* this is a list of indexes into the all_fragments array indicating
|
211 |
* which of the fragments are coded */
|
212 |
int *coded_fragment_list[3]; |
213 |
|
214 |
VLC dc_vlc[16];
|
215 |
VLC ac_vlc_1[16];
|
216 |
VLC ac_vlc_2[16];
|
217 |
VLC ac_vlc_3[16];
|
218 |
VLC ac_vlc_4[16];
|
219 |
|
220 |
VLC superblock_run_length_vlc; |
221 |
VLC fragment_run_length_vlc; |
222 |
VLC mode_code_vlc; |
223 |
VLC motion_vector_vlc; |
224 |
|
225 |
/* these arrays need to be on 16-byte boundaries since SSE2 operations
|
226 |
* index into them */
|
227 |
DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; //<qmat[qpi][is_inter][plane] |
228 |
|
229 |
/* This table contains superblock_count * 16 entries. Each set of 16
|
230 |
* numbers corresponds to the fragment indexes 0..15 of the superblock.
|
231 |
* An entry will be -1 to indicate that no entry corresponds to that
|
232 |
* index. */
|
233 |
int *superblock_fragments;
|
234 |
|
235 |
/* This is an array that indicates how a particular macroblock
|
236 |
* is coded. */
|
237 |
unsigned char *macroblock_coding; |
238 |
|
239 |
uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc |
240 |
int8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16 |
241 |
|
242 |
/* Huffman decode */
|
243 |
int hti;
|
244 |
unsigned int hbits; |
245 |
int entries;
|
246 |
int huff_code_size;
|
247 |
uint32_t huffman_table[80][32][2]; |
248 |
|
249 |
uint8_t filter_limit_values[64];
|
250 |
DECLARE_ALIGNED(8, int, bounding_values_array)[256+2]; |
251 |
} Vp3DecodeContext; |
252 |
|
253 |
/************************************************************************
|
254 |
* VP3 specific functions
|
255 |
************************************************************************/
|
256 |
|
257 |
/*
|
258 |
* This function sets up all of the various blocks mappings:
|
259 |
* superblocks <-> fragments, macroblocks <-> fragments,
|
260 |
* superblocks <-> macroblocks
|
261 |
*
|
262 |
* @return 0 is successful; returns 1 if *anything* went wrong.
|
263 |
*/
|
264 |
static int init_block_mapping(Vp3DecodeContext *s) |
265 |
{ |
266 |
int sb_x, sb_y, plane;
|
267 |
int x, y, i, j = 0; |
268 |
|
269 |
for (plane = 0; plane < 3; plane++) { |
270 |
int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
|
271 |
int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
|
272 |
int frag_width = s->fragment_width[!!plane];
|
273 |
int frag_height = s->fragment_height[!!plane];
|
274 |
|
275 |
for (sb_y = 0; sb_y < sb_height; sb_y++) |
276 |
for (sb_x = 0; sb_x < sb_width; sb_x++) |
277 |
for (i = 0; i < 16; i++) { |
278 |
x = 4*sb_x + hilbert_offset[i][0]; |
279 |
y = 4*sb_y + hilbert_offset[i][1]; |
280 |
|
281 |
if (x < frag_width && y < frag_height)
|
282 |
s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x; |
283 |
else
|
284 |
s->superblock_fragments[j++] = -1;
|
285 |
} |
286 |
} |
287 |
|
288 |
return 0; /* successful path out */ |
289 |
} |
290 |
|
291 |
/*
|
292 |
* This function sets up the dequantization tables used for a particular
|
293 |
* frame.
|
294 |
*/
|
295 |
static void init_dequantizer(Vp3DecodeContext *s, int qpi) |
296 |
{ |
297 |
int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
|
298 |
int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]];
|
299 |
int i, plane, inter, qri, bmi, bmj, qistart;
|
300 |
|
301 |
for(inter=0; inter<2; inter++){ |
302 |
for(plane=0; plane<3; plane++){ |
303 |
int sum=0; |
304 |
for(qri=0; qri<s->qr_count[inter][plane]; qri++){ |
305 |
sum+= s->qr_size[inter][plane][qri]; |
306 |
if(s->qps[qpi] <= sum)
|
307 |
break;
|
308 |
} |
309 |
qistart= sum - s->qr_size[inter][plane][qri]; |
310 |
bmi= s->qr_base[inter][plane][qri ]; |
311 |
bmj= s->qr_base[inter][plane][qri+1];
|
312 |
for(i=0; i<64; i++){ |
313 |
int coeff= ( 2*(sum -s->qps[qpi])*s->base_matrix[bmi][i] |
314 |
- 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i]
|
315 |
+ s->qr_size[inter][plane][qri]) |
316 |
/ (2*s->qr_size[inter][plane][qri]);
|
317 |
|
318 |
int qmin= 8<<(inter + !i); |
319 |
int qscale= i ? ac_scale_factor : dc_scale_factor;
|
320 |
|
321 |
s->qmat[qpi][inter][plane][s->dsp.idct_permutation[i]]= av_clip((qscale * coeff)/100 * 4, qmin, 4096); |
322 |
} |
323 |
// all DC coefficients use the same quant so as not to interfere with DC prediction
|
324 |
s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0]; |
325 |
} |
326 |
} |
327 |
|
328 |
memset(s->qscale_table, (FFMAX(s->qmat[0][0][0][1], s->qmat[0][0][1][1])+8)/16, 512); //FIXME finetune |
329 |
} |
330 |
|
331 |
/*
|
332 |
* This function initializes the loop filter boundary limits if the frame's
|
333 |
* quality index is different from the previous frame's.
|
334 |
*
|
335 |
* The filter_limit_values may not be larger than 127.
|
336 |
*/
|
337 |
static void init_loop_filter(Vp3DecodeContext *s) |
338 |
{ |
339 |
int *bounding_values= s->bounding_values_array+127; |
340 |
int filter_limit;
|
341 |
int x;
|
342 |
int value;
|
343 |
|
344 |
filter_limit = s->filter_limit_values[s->qps[0]];
|
345 |
|
346 |
/* set up the bounding values */
|
347 |
memset(s->bounding_values_array, 0, 256 * sizeof(int)); |
348 |
for (x = 0; x < filter_limit; x++) { |
349 |
bounding_values[-x] = -x; |
350 |
bounding_values[x] = x; |
351 |
} |
352 |
for (x = value = filter_limit; x < 128 && value; x++, value--) { |
353 |
bounding_values[ x] = value; |
354 |
bounding_values[-x] = -value; |
355 |
} |
356 |
if (value)
|
357 |
bounding_values[128] = value;
|
358 |
bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202; |
359 |
} |
360 |
|
361 |
/*
|
362 |
* This function unpacks all of the superblock/macroblock/fragment coding
|
363 |
* information from the bitstream.
|
364 |
*/
|
365 |
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) |
366 |
{ |
367 |
int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start }; |
368 |
int bit = 0; |
369 |
int current_superblock = 0; |
370 |
int current_run = 0; |
371 |
int num_partial_superblocks = 0; |
372 |
|
373 |
int i, j;
|
374 |
int current_fragment;
|
375 |
int plane;
|
376 |
|
377 |
if (s->keyframe) {
|
378 |
memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count); |
379 |
|
380 |
} else {
|
381 |
|
382 |
/* unpack the list of partially-coded superblocks */
|
383 |
bit = get_bits1(gb) ^ 1;
|
384 |
current_run = 0;
|
385 |
|
386 |
while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) { |
387 |
if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
|
388 |
bit = get_bits1(gb); |
389 |
else
|
390 |
bit ^= 1;
|
391 |
|
392 |
current_run = get_vlc2(gb, |
393 |
s->superblock_run_length_vlc.table, 6, 2) + 1; |
394 |
if (current_run == 34) |
395 |
current_run += get_bits(gb, 12);
|
396 |
|
397 |
if (current_superblock + current_run > s->superblock_count) {
|
398 |
av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n");
|
399 |
return -1; |
400 |
} |
401 |
|
402 |
memset(s->superblock_coding + current_superblock, bit, current_run); |
403 |
|
404 |
current_superblock += current_run; |
405 |
if (bit)
|
406 |
num_partial_superblocks += current_run; |
407 |
} |
408 |
|
409 |
/* unpack the list of fully coded superblocks if any of the blocks were
|
410 |
* not marked as partially coded in the previous step */
|
411 |
if (num_partial_superblocks < s->superblock_count) {
|
412 |
int superblocks_decoded = 0; |
413 |
|
414 |
current_superblock = 0;
|
415 |
bit = get_bits1(gb) ^ 1;
|
416 |
current_run = 0;
|
417 |
|
418 |
while (superblocks_decoded < s->superblock_count - num_partial_superblocks
|
419 |
&& get_bits_left(gb) > 0) {
|
420 |
|
421 |
if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
|
422 |
bit = get_bits1(gb); |
423 |
else
|
424 |
bit ^= 1;
|
425 |
|
426 |
current_run = get_vlc2(gb, |
427 |
s->superblock_run_length_vlc.table, 6, 2) + 1; |
428 |
if (current_run == 34) |
429 |
current_run += get_bits(gb, 12);
|
430 |
|
431 |
for (j = 0; j < current_run; current_superblock++) { |
432 |
if (current_superblock >= s->superblock_count) {
|
433 |
av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n");
|
434 |
return -1; |
435 |
} |
436 |
|
437 |
/* skip any superblocks already marked as partially coded */
|
438 |
if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
|
439 |
s->superblock_coding[current_superblock] = 2*bit;
|
440 |
j++; |
441 |
} |
442 |
} |
443 |
superblocks_decoded += current_run; |
444 |
} |
445 |
} |
446 |
|
447 |
/* if there were partial blocks, initialize bitstream for
|
448 |
* unpacking fragment codings */
|
449 |
if (num_partial_superblocks) {
|
450 |
|
451 |
current_run = 0;
|
452 |
bit = get_bits1(gb); |
453 |
/* toggle the bit because as soon as the first run length is
|
454 |
* fetched the bit will be toggled again */
|
455 |
bit ^= 1;
|
456 |
} |
457 |
} |
458 |
|
459 |
/* figure out which fragments are coded; iterate through each
|
460 |
* superblock (all planes) */
|
461 |
s->total_num_coded_frags = 0;
|
462 |
memset(s->macroblock_coding, MODE_COPY, s->macroblock_count); |
463 |
|
464 |
for (plane = 0; plane < 3; plane++) { |
465 |
int sb_start = superblock_starts[plane];
|
466 |
int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count);
|
467 |
int num_coded_frags = 0; |
468 |
|
469 |
for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) { |
470 |
|
471 |
/* iterate through all 16 fragments in a superblock */
|
472 |
for (j = 0; j < 16; j++) { |
473 |
|
474 |
/* if the fragment is in bounds, check its coding status */
|
475 |
current_fragment = s->superblock_fragments[i * 16 + j];
|
476 |
if (current_fragment != -1) { |
477 |
int coded = s->superblock_coding[i];
|
478 |
|
479 |
if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
|
480 |
|
481 |
/* fragment may or may not be coded; this is the case
|
482 |
* that cares about the fragment coding runs */
|
483 |
if (current_run-- == 0) { |
484 |
bit ^= 1;
|
485 |
current_run = get_vlc2(gb, |
486 |
s->fragment_run_length_vlc.table, 5, 2); |
487 |
} |
488 |
coded = bit; |
489 |
} |
490 |
|
491 |
if (coded) {
|
492 |
/* default mode; actual mode will be decoded in
|
493 |
* the next phase */
|
494 |
s->all_fragments[current_fragment].coding_method = |
495 |
MODE_INTER_NO_MV; |
496 |
s->coded_fragment_list[plane][num_coded_frags++] = |
497 |
current_fragment; |
498 |
} else {
|
499 |
/* not coded; copy this fragment from the prior frame */
|
500 |
s->all_fragments[current_fragment].coding_method = |
501 |
MODE_COPY; |
502 |
} |
503 |
} |
504 |
} |
505 |
} |
506 |
s->total_num_coded_frags += num_coded_frags; |
507 |
for (i = 0; i < 64; i++) |
508 |
s->num_coded_frags[plane][i] = num_coded_frags; |
509 |
if (plane < 2) |
510 |
s->coded_fragment_list[plane+1] = s->coded_fragment_list[plane] + num_coded_frags;
|
511 |
} |
512 |
return 0; |
513 |
} |
514 |
|
515 |
/*
|
516 |
* This function unpacks all the coding mode data for individual macroblocks
|
517 |
* from the bitstream.
|
518 |
*/
|
519 |
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) |
520 |
{ |
521 |
int i, j, k, sb_x, sb_y;
|
522 |
int scheme;
|
523 |
int current_macroblock;
|
524 |
int current_fragment;
|
525 |
int coding_mode;
|
526 |
int custom_mode_alphabet[CODING_MODE_COUNT];
|
527 |
const int *alphabet; |
528 |
Vp3Fragment *frag; |
529 |
|
530 |
if (s->keyframe) {
|
531 |
for (i = 0; i < s->fragment_count; i++) |
532 |
s->all_fragments[i].coding_method = MODE_INTRA; |
533 |
|
534 |
} else {
|
535 |
|
536 |
/* fetch the mode coding scheme for this frame */
|
537 |
scheme = get_bits(gb, 3);
|
538 |
|
539 |
/* is it a custom coding scheme? */
|
540 |
if (scheme == 0) { |
541 |
for (i = 0; i < 8; i++) |
542 |
custom_mode_alphabet[i] = MODE_INTER_NO_MV; |
543 |
for (i = 0; i < 8; i++) |
544 |
custom_mode_alphabet[get_bits(gb, 3)] = i;
|
545 |
alphabet = custom_mode_alphabet; |
546 |
} else
|
547 |
alphabet = ModeAlphabet[scheme-1];
|
548 |
|
549 |
/* iterate through all of the macroblocks that contain 1 or more
|
550 |
* coded fragments */
|
551 |
for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { |
552 |
for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { |
553 |
if (get_bits_left(gb) <= 0) |
554 |
return -1; |
555 |
|
556 |
for (j = 0; j < 4; j++) { |
557 |
int mb_x = 2*sb_x + (j>>1); |
558 |
int mb_y = 2*sb_y + (((j>>1)+j)&1); |
559 |
current_macroblock = mb_y * s->macroblock_width + mb_x; |
560 |
|
561 |
if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height)
|
562 |
continue;
|
563 |
|
564 |
#define BLOCK_X (2*mb_x + (k&1)) |
565 |
#define BLOCK_Y (2*mb_y + (k>>1)) |
566 |
/* coding modes are only stored if the macroblock has at least one
|
567 |
* luma block coded, otherwise it must be INTER_NO_MV */
|
568 |
for (k = 0; k < 4; k++) { |
569 |
current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
|
570 |
if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
|
571 |
break;
|
572 |
} |
573 |
if (k == 4) { |
574 |
s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV; |
575 |
continue;
|
576 |
} |
577 |
|
578 |
/* mode 7 means get 3 bits for each coding mode */
|
579 |
if (scheme == 7) |
580 |
coding_mode = get_bits(gb, 3);
|
581 |
else
|
582 |
coding_mode = alphabet |
583 |
[get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; |
584 |
|
585 |
s->macroblock_coding[current_macroblock] = coding_mode; |
586 |
for (k = 0; k < 4; k++) { |
587 |
frag = s->all_fragments + BLOCK_Y*s->fragment_width[0] + BLOCK_X;
|
588 |
if (frag->coding_method != MODE_COPY)
|
589 |
frag->coding_method = coding_mode; |
590 |
} |
591 |
|
592 |
#define SET_CHROMA_MODES \
|
593 |
if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \ |
594 |
frag[s->fragment_start[1]].coding_method = coding_mode;\
|
595 |
if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \ |
596 |
frag[s->fragment_start[2]].coding_method = coding_mode;
|
597 |
|
598 |
if (s->chroma_y_shift) {
|
599 |
frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x;
|
600 |
SET_CHROMA_MODES |
601 |
} else if (s->chroma_x_shift) { |
602 |
frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x; |
603 |
for (k = 0; k < 2; k++) { |
604 |
SET_CHROMA_MODES |
605 |
frag += s->fragment_width[1];
|
606 |
} |
607 |
} else {
|
608 |
for (k = 0; k < 4; k++) { |
609 |
frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X;
|
610 |
SET_CHROMA_MODES |
611 |
} |
612 |
} |
613 |
} |
614 |
} |
615 |
} |
616 |
} |
617 |
|
618 |
return 0; |
619 |
} |
620 |
|
621 |
/*
|
622 |
* This function unpacks all the motion vectors for the individual
|
623 |
* macroblocks from the bitstream.
|
624 |
*/
|
625 |
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) |
626 |
{ |
627 |
int j, k, sb_x, sb_y;
|
628 |
int coding_mode;
|
629 |
int motion_x[4]; |
630 |
int motion_y[4]; |
631 |
int last_motion_x = 0; |
632 |
int last_motion_y = 0; |
633 |
int prior_last_motion_x = 0; |
634 |
int prior_last_motion_y = 0; |
635 |
int current_macroblock;
|
636 |
int current_fragment;
|
637 |
int frag;
|
638 |
|
639 |
if (s->keyframe)
|
640 |
return 0; |
641 |
|
642 |
/* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
|
643 |
coding_mode = get_bits1(gb); |
644 |
|
645 |
/* iterate through all of the macroblocks that contain 1 or more
|
646 |
* coded fragments */
|
647 |
for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { |
648 |
for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { |
649 |
if (get_bits_left(gb) <= 0) |
650 |
return -1; |
651 |
|
652 |
for (j = 0; j < 4; j++) { |
653 |
int mb_x = 2*sb_x + (j>>1); |
654 |
int mb_y = 2*sb_y + (((j>>1)+j)&1); |
655 |
current_macroblock = mb_y * s->macroblock_width + mb_x; |
656 |
|
657 |
if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height ||
|
658 |
(s->macroblock_coding[current_macroblock] == MODE_COPY)) |
659 |
continue;
|
660 |
|
661 |
switch (s->macroblock_coding[current_macroblock]) {
|
662 |
|
663 |
case MODE_INTER_PLUS_MV:
|
664 |
case MODE_GOLDEN_MV:
|
665 |
/* all 6 fragments use the same motion vector */
|
666 |
if (coding_mode == 0) { |
667 |
motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; |
668 |
motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; |
669 |
} else {
|
670 |
motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)]; |
671 |
motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)]; |
672 |
} |
673 |
|
674 |
/* vector maintenance, only on MODE_INTER_PLUS_MV */
|
675 |
if (s->macroblock_coding[current_macroblock] ==
|
676 |
MODE_INTER_PLUS_MV) { |
677 |
prior_last_motion_x = last_motion_x; |
678 |
prior_last_motion_y = last_motion_y; |
679 |
last_motion_x = motion_x[0];
|
680 |
last_motion_y = motion_y[0];
|
681 |
} |
682 |
break;
|
683 |
|
684 |
case MODE_INTER_FOURMV:
|
685 |
/* vector maintenance */
|
686 |
prior_last_motion_x = last_motion_x; |
687 |
prior_last_motion_y = last_motion_y; |
688 |
|
689 |
/* fetch 4 vectors from the bitstream, one for each
|
690 |
* Y fragment, then average for the C fragment vectors */
|
691 |
for (k = 0; k < 4; k++) { |
692 |
current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
|
693 |
if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
|
694 |
if (coding_mode == 0) { |
695 |
motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; |
696 |
motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; |
697 |
} else {
|
698 |
motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
|
699 |
motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
|
700 |
} |
701 |
last_motion_x = motion_x[k]; |
702 |
last_motion_y = motion_y[k]; |
703 |
} else {
|
704 |
motion_x[k] = 0;
|
705 |
motion_y[k] = 0;
|
706 |
} |
707 |
} |
708 |
break;
|
709 |
|
710 |
case MODE_INTER_LAST_MV:
|
711 |
/* all 6 fragments use the last motion vector */
|
712 |
motion_x[0] = last_motion_x;
|
713 |
motion_y[0] = last_motion_y;
|
714 |
|
715 |
/* no vector maintenance (last vector remains the
|
716 |
* last vector) */
|
717 |
break;
|
718 |
|
719 |
case MODE_INTER_PRIOR_LAST:
|
720 |
/* all 6 fragments use the motion vector prior to the
|
721 |
* last motion vector */
|
722 |
motion_x[0] = prior_last_motion_x;
|
723 |
motion_y[0] = prior_last_motion_y;
|
724 |
|
725 |
/* vector maintenance */
|
726 |
prior_last_motion_x = last_motion_x; |
727 |
prior_last_motion_y = last_motion_y; |
728 |
last_motion_x = motion_x[0];
|
729 |
last_motion_y = motion_y[0];
|
730 |
break;
|
731 |
|
732 |
default:
|
733 |
/* covers intra, inter without MV, golden without MV */
|
734 |
motion_x[0] = 0; |
735 |
motion_y[0] = 0; |
736 |
|
737 |
/* no vector maintenance */
|
738 |
break;
|
739 |
} |
740 |
|
741 |
/* assign the motion vectors to the correct fragments */
|
742 |
for (k = 0; k < 4; k++) { |
743 |
current_fragment = |
744 |
BLOCK_Y*s->fragment_width[0] + BLOCK_X;
|
745 |
if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
|
746 |
s->motion_val[0][current_fragment][0] = motion_x[k]; |
747 |
s->motion_val[0][current_fragment][1] = motion_y[k]; |
748 |
} else {
|
749 |
s->motion_val[0][current_fragment][0] = motion_x[0]; |
750 |
s->motion_val[0][current_fragment][1] = motion_y[0]; |
751 |
} |
752 |
} |
753 |
|
754 |
if (s->chroma_y_shift) {
|
755 |
if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
|
756 |
motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2); |
757 |
motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2); |
758 |
} |
759 |
motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); |
760 |
motion_y[0] = (motion_y[0]>>1) | (motion_y[0]&1); |
761 |
frag = mb_y*s->fragment_width[1] + mb_x;
|
762 |
s->motion_val[1][frag][0] = motion_x[0]; |
763 |
s->motion_val[1][frag][1] = motion_y[0]; |
764 |
} else if (s->chroma_x_shift) { |
765 |
if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
|
766 |
motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1); |
767 |
motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1); |
768 |
motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1); |
769 |
motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1); |
770 |
} else {
|
771 |
motion_x[1] = motion_x[0]; |
772 |
motion_y[1] = motion_y[0]; |
773 |
} |
774 |
motion_x[0] = (motion_x[0]>>1) | (motion_x[0]&1); |
775 |
motion_x[1] = (motion_x[1]>>1) | (motion_x[1]&1); |
776 |
|
777 |
frag = 2*mb_y*s->fragment_width[1] + mb_x; |
778 |
for (k = 0; k < 2; k++) { |
779 |
s->motion_val[1][frag][0] = motion_x[k]; |
780 |
s->motion_val[1][frag][1] = motion_y[k]; |
781 |
frag += s->fragment_width[1];
|
782 |
} |
783 |
} else {
|
784 |
for (k = 0; k < 4; k++) { |
785 |
frag = BLOCK_Y*s->fragment_width[1] + BLOCK_X;
|
786 |
if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
|
787 |
s->motion_val[1][frag][0] = motion_x[k]; |
788 |
s->motion_val[1][frag][1] = motion_y[k]; |
789 |
} else {
|
790 |
s->motion_val[1][frag][0] = motion_x[0]; |
791 |
s->motion_val[1][frag][1] = motion_y[0]; |
792 |
} |
793 |
} |
794 |
} |
795 |
} |
796 |
} |
797 |
} |
798 |
|
799 |
return 0; |
800 |
} |
801 |
|
802 |
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb) |
803 |
{ |
804 |
int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
|
805 |
int num_blocks = s->total_num_coded_frags;
|
806 |
|
807 |
for (qpi = 0; qpi < s->nqps-1 && num_blocks > 0; qpi++) { |
808 |
i = blocks_decoded = num_blocks_at_qpi = 0;
|
809 |
|
810 |
bit = get_bits1(gb) ^ 1;
|
811 |
run_length = 0;
|
812 |
|
813 |
do {
|
814 |
if (run_length == MAXIMUM_LONG_BIT_RUN)
|
815 |
bit = get_bits1(gb); |
816 |
else
|
817 |
bit ^= 1;
|
818 |
|
819 |
run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; |
820 |
if (run_length == 34) |
821 |
run_length += get_bits(gb, 12);
|
822 |
blocks_decoded += run_length; |
823 |
|
824 |
if (!bit)
|
825 |
num_blocks_at_qpi += run_length; |
826 |
|
827 |
for (j = 0; j < run_length; i++) { |
828 |
if (i >= s->total_num_coded_frags)
|
829 |
return -1; |
830 |
|
831 |
if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) { |
832 |
s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
|
833 |
j++; |
834 |
} |
835 |
} |
836 |
} while (blocks_decoded < num_blocks && get_bits_left(gb) > 0); |
837 |
|
838 |
num_blocks -= num_blocks_at_qpi; |
839 |
} |
840 |
|
841 |
return 0; |
842 |
} |
843 |
|
844 |
/*
|
845 |
* This function is called by unpack_dct_coeffs() to extract the VLCs from
|
846 |
* the bitstream. The VLCs encode tokens which are used to unpack DCT
|
847 |
* data. This function unpacks all the VLCs for either the Y plane or both
|
848 |
* C planes, and is called for DC coefficients or different AC coefficient
|
849 |
* levels (since different coefficient types require different VLC tables.
|
850 |
*
|
851 |
* This function returns a residual eob run. E.g, if a particular token gave
|
852 |
* instructions to EOB the next 5 fragments and there were only 2 fragments
|
853 |
* left in the current fragment range, 3 would be returned so that it could
|
854 |
* be passed into the next call to this same function.
|
855 |
*/
|
856 |
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, |
857 |
VLC *table, int coeff_index,
|
858 |
int plane,
|
859 |
int eob_run)
|
860 |
{ |
861 |
int i, j = 0; |
862 |
int token;
|
863 |
int zero_run = 0; |
864 |
DCTELEM coeff = 0;
|
865 |
int bits_to_get;
|
866 |
int blocks_ended;
|
867 |
int coeff_i = 0; |
868 |
int num_coeffs = s->num_coded_frags[plane][coeff_index];
|
869 |
int16_t *dct_tokens = s->dct_tokens[plane][coeff_index]; |
870 |
|
871 |
/* local references to structure members to avoid repeated deferences */
|
872 |
int *coded_fragment_list = s->coded_fragment_list[plane];
|
873 |
Vp3Fragment *all_fragments = s->all_fragments; |
874 |
VLC_TYPE (*vlc_table)[2] = table->table;
|
875 |
|
876 |
if (num_coeffs < 0) |
877 |
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index);
|
878 |
|
879 |
if (eob_run > num_coeffs) {
|
880 |
coeff_i = blocks_ended = num_coeffs; |
881 |
eob_run -= num_coeffs; |
882 |
} else {
|
883 |
coeff_i = blocks_ended = eob_run; |
884 |
eob_run = 0;
|
885 |
} |
886 |
|
887 |
// insert fake EOB token to cover the split between planes or zzi
|
888 |
if (blocks_ended)
|
889 |
dct_tokens[j++] = blocks_ended << 2;
|
890 |
|
891 |
while (coeff_i < num_coeffs && get_bits_left(gb) > 0) { |
892 |
/* decode a VLC into a token */
|
893 |
token = get_vlc2(gb, vlc_table, 11, 3); |
894 |
/* use the token to get a zero run, a coefficient, and an eob run */
|
895 |
if (token <= 6) { |
896 |
eob_run = eob_run_base[token]; |
897 |
if (eob_run_get_bits[token])
|
898 |
eob_run += get_bits(gb, eob_run_get_bits[token]); |
899 |
|
900 |
// record only the number of blocks ended in this plane,
|
901 |
// any spill will be recorded in the next plane.
|
902 |
if (eob_run > num_coeffs - coeff_i) {
|
903 |
dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i); |
904 |
blocks_ended += num_coeffs - coeff_i; |
905 |
eob_run -= num_coeffs - coeff_i; |
906 |
coeff_i = num_coeffs; |
907 |
} else {
|
908 |
dct_tokens[j++] = TOKEN_EOB(eob_run); |
909 |
blocks_ended += eob_run; |
910 |
coeff_i += eob_run; |
911 |
eob_run = 0;
|
912 |
} |
913 |
} else {
|
914 |
bits_to_get = coeff_get_bits[token]; |
915 |
if (bits_to_get)
|
916 |
bits_to_get = get_bits(gb, bits_to_get); |
917 |
coeff = coeff_tables[token][bits_to_get]; |
918 |
|
919 |
zero_run = zero_run_base[token]; |
920 |
if (zero_run_get_bits[token])
|
921 |
zero_run += get_bits(gb, zero_run_get_bits[token]); |
922 |
|
923 |
if (zero_run) {
|
924 |
dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run); |
925 |
} else {
|
926 |
// Save DC into the fragment structure. DC prediction is
|
927 |
// done in raster order, so the actual DC can't be in with
|
928 |
// other tokens. We still need the token in dct_tokens[]
|
929 |
// however, or else the structure collapses on itself.
|
930 |
if (!coeff_index)
|
931 |
all_fragments[coded_fragment_list[coeff_i]].dc = coeff; |
932 |
|
933 |
dct_tokens[j++] = TOKEN_COEFF(coeff); |
934 |
} |
935 |
|
936 |
if (coeff_index + zero_run > 64) { |
937 |
av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with"
|
938 |
" %d coeffs left\n", zero_run, 64-coeff_index); |
939 |
zero_run = 64 - coeff_index;
|
940 |
} |
941 |
|
942 |
// zero runs code multiple coefficients,
|
943 |
// so don't try to decode coeffs for those higher levels
|
944 |
for (i = coeff_index+1; i <= coeff_index+zero_run; i++) |
945 |
s->num_coded_frags[plane][i]--; |
946 |
coeff_i++; |
947 |
} |
948 |
} |
949 |
|
950 |
if (blocks_ended > s->num_coded_frags[plane][coeff_index])
|
951 |
av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
|
952 |
|
953 |
// decrement the number of blocks that have higher coeffecients for each
|
954 |
// EOB run at this level
|
955 |
if (blocks_ended)
|
956 |
for (i = coeff_index+1; i < 64; i++) |
957 |
s->num_coded_frags[plane][i] -= blocks_ended; |
958 |
|
959 |
// setup the next buffer
|
960 |
if (plane < 2) |
961 |
s->dct_tokens[plane+1][coeff_index] = dct_tokens + j;
|
962 |
else if (coeff_index < 63) |
963 |
s->dct_tokens[0][coeff_index+1] = dct_tokens + j; |
964 |
|
965 |
return eob_run;
|
966 |
} |
967 |
|
968 |
static void reverse_dc_prediction(Vp3DecodeContext *s, |
969 |
int first_fragment,
|
970 |
int fragment_width,
|
971 |
int fragment_height);
|
972 |
/*
|
973 |
* This function unpacks all of the DCT coefficient data from the
|
974 |
* bitstream.
|
975 |
*/
|
976 |
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) |
977 |
{ |
978 |
int i;
|
979 |
int dc_y_table;
|
980 |
int dc_c_table;
|
981 |
int ac_y_table;
|
982 |
int ac_c_table;
|
983 |
int residual_eob_run = 0; |
984 |
VLC *y_tables[64];
|
985 |
VLC *c_tables[64];
|
986 |
|
987 |
s->dct_tokens[0][0] = s->dct_tokens_base; |
988 |
|
989 |
/* fetch the DC table indexes */
|
990 |
dc_y_table = get_bits(gb, 4);
|
991 |
dc_c_table = get_bits(gb, 4);
|
992 |
|
993 |
/* unpack the Y plane DC coefficients */
|
994 |
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
|
995 |
0, residual_eob_run);
|
996 |
|
997 |
/* reverse prediction of the Y-plane DC coefficients */
|
998 |
reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]); |
999 |
|
1000 |
/* unpack the C plane DC coefficients */
|
1001 |
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
|
1002 |
1, residual_eob_run);
|
1003 |
residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
|
1004 |
2, residual_eob_run);
|
1005 |
|
1006 |
/* reverse prediction of the C-plane DC coefficients */
|
1007 |
if (!(s->avctx->flags & CODEC_FLAG_GRAY))
|
1008 |
{ |
1009 |
reverse_dc_prediction(s, s->fragment_start[1],
|
1010 |
s->fragment_width[1], s->fragment_height[1]); |
1011 |
reverse_dc_prediction(s, s->fragment_start[2],
|
1012 |
s->fragment_width[1], s->fragment_height[1]); |
1013 |
} |
1014 |
|
1015 |
/* fetch the AC table indexes */
|
1016 |
ac_y_table = get_bits(gb, 4);
|
1017 |
ac_c_table = get_bits(gb, 4);
|
1018 |
|
1019 |
/* build tables of AC VLC tables */
|
1020 |
for (i = 1; i <= 5; i++) { |
1021 |
y_tables[i] = &s->ac_vlc_1[ac_y_table]; |
1022 |
c_tables[i] = &s->ac_vlc_1[ac_c_table]; |
1023 |
} |
1024 |
for (i = 6; i <= 14; i++) { |
1025 |
y_tables[i] = &s->ac_vlc_2[ac_y_table]; |
1026 |
c_tables[i] = &s->ac_vlc_2[ac_c_table]; |
1027 |
} |
1028 |
for (i = 15; i <= 27; i++) { |
1029 |
y_tables[i] = &s->ac_vlc_3[ac_y_table]; |
1030 |
c_tables[i] = &s->ac_vlc_3[ac_c_table]; |
1031 |
} |
1032 |
for (i = 28; i <= 63; i++) { |
1033 |
y_tables[i] = &s->ac_vlc_4[ac_y_table]; |
1034 |
c_tables[i] = &s->ac_vlc_4[ac_c_table]; |
1035 |
} |
1036 |
|
1037 |
/* decode all AC coefficents */
|
1038 |
for (i = 1; i <= 63; i++) { |
1039 |
residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i, |
1040 |
0, residual_eob_run);
|
1041 |
|
1042 |
residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, |
1043 |
1, residual_eob_run);
|
1044 |
residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, |
1045 |
2, residual_eob_run);
|
1046 |
} |
1047 |
|
1048 |
return 0; |
1049 |
} |
1050 |
|
1051 |
/*
|
1052 |
* This function reverses the DC prediction for each coded fragment in
|
1053 |
* the frame. Much of this function is adapted directly from the original
|
1054 |
* VP3 source code.
|
1055 |
*/
|
1056 |
#define COMPATIBLE_FRAME(x) \
|
1057 |
(compatible_frame[s->all_fragments[x].coding_method] == current_frame_type) |
1058 |
#define DC_COEFF(u) s->all_fragments[u].dc
|
1059 |
|
1060 |
static void reverse_dc_prediction(Vp3DecodeContext *s, |
1061 |
int first_fragment,
|
1062 |
int fragment_width,
|
1063 |
int fragment_height)
|
1064 |
{ |
1065 |
|
1066 |
#define PUL 8 |
1067 |
#define PU 4 |
1068 |
#define PUR 2 |
1069 |
#define PL 1 |
1070 |
|
1071 |
int x, y;
|
1072 |
int i = first_fragment;
|
1073 |
|
1074 |
int predicted_dc;
|
1075 |
|
1076 |
/* DC values for the left, up-left, up, and up-right fragments */
|
1077 |
int vl, vul, vu, vur;
|
1078 |
|
1079 |
/* indexes for the left, up-left, up, and up-right fragments */
|
1080 |
int l, ul, u, ur;
|
1081 |
|
1082 |
/*
|
1083 |
* The 6 fields mean:
|
1084 |
* 0: up-left multiplier
|
1085 |
* 1: up multiplier
|
1086 |
* 2: up-right multiplier
|
1087 |
* 3: left multiplier
|
1088 |
*/
|
1089 |
static const int predictor_transform[16][4] = { |
1090 |
{ 0, 0, 0, 0}, |
1091 |
{ 0, 0, 0,128}, // PL |
1092 |
{ 0, 0,128, 0}, // PUR |
1093 |
{ 0, 0, 53, 75}, // PUR|PL |
1094 |
{ 0,128, 0, 0}, // PU |
1095 |
{ 0, 64, 0, 64}, // PU|PL |
1096 |
{ 0,128, 0, 0}, // PU|PUR |
1097 |
{ 0, 0, 53, 75}, // PU|PUR|PL |
1098 |
{128, 0, 0, 0}, // PUL |
1099 |
{ 0, 0, 0,128}, // PUL|PL |
1100 |
{ 64, 0, 64, 0}, // PUL|PUR |
1101 |
{ 0, 0, 53, 75}, // PUL|PUR|PL |
1102 |
{ 0,128, 0, 0}, // PUL|PU |
1103 |
{-104,116, 0,116}, // PUL|PU|PL |
1104 |
{ 24, 80, 24, 0}, // PUL|PU|PUR |
1105 |
{-104,116, 0,116} // PUL|PU|PUR|PL |
1106 |
}; |
1107 |
|
1108 |
/* This table shows which types of blocks can use other blocks for
|
1109 |
* prediction. For example, INTRA is the only mode in this table to
|
1110 |
* have a frame number of 0. That means INTRA blocks can only predict
|
1111 |
* from other INTRA blocks. There are 2 golden frame coding types;
|
1112 |
* blocks encoding in these modes can only predict from other blocks
|
1113 |
* that were encoded with these 1 of these 2 modes. */
|
1114 |
static const unsigned char compatible_frame[9] = { |
1115 |
1, /* MODE_INTER_NO_MV */ |
1116 |
0, /* MODE_INTRA */ |
1117 |
1, /* MODE_INTER_PLUS_MV */ |
1118 |
1, /* MODE_INTER_LAST_MV */ |
1119 |
1, /* MODE_INTER_PRIOR_MV */ |
1120 |
2, /* MODE_USING_GOLDEN */ |
1121 |
2, /* MODE_GOLDEN_MV */ |
1122 |
1, /* MODE_INTER_FOUR_MV */ |
1123 |
3 /* MODE_COPY */ |
1124 |
}; |
1125 |
int current_frame_type;
|
1126 |
|
1127 |
/* there is a last DC predictor for each of the 3 frame types */
|
1128 |
short last_dc[3]; |
1129 |
|
1130 |
int transform = 0; |
1131 |
|
1132 |
vul = vu = vur = vl = 0;
|
1133 |
last_dc[0] = last_dc[1] = last_dc[2] = 0; |
1134 |
|
1135 |
/* for each fragment row... */
|
1136 |
for (y = 0; y < fragment_height; y++) { |
1137 |
|
1138 |
/* for each fragment in a row... */
|
1139 |
for (x = 0; x < fragment_width; x++, i++) { |
1140 |
|
1141 |
/* reverse prediction if this block was coded */
|
1142 |
if (s->all_fragments[i].coding_method != MODE_COPY) {
|
1143 |
|
1144 |
current_frame_type = |
1145 |
compatible_frame[s->all_fragments[i].coding_method]; |
1146 |
|
1147 |
transform= 0;
|
1148 |
if(x){
|
1149 |
l= i-1;
|
1150 |
vl = DC_COEFF(l); |
1151 |
if(COMPATIBLE_FRAME(l))
|
1152 |
transform |= PL; |
1153 |
} |
1154 |
if(y){
|
1155 |
u= i-fragment_width; |
1156 |
vu = DC_COEFF(u); |
1157 |
if(COMPATIBLE_FRAME(u))
|
1158 |
transform |= PU; |
1159 |
if(x){
|
1160 |
ul= i-fragment_width-1;
|
1161 |
vul = DC_COEFF(ul); |
1162 |
if(COMPATIBLE_FRAME(ul))
|
1163 |
transform |= PUL; |
1164 |
} |
1165 |
if(x + 1 < fragment_width){ |
1166 |
ur= i-fragment_width+1;
|
1167 |
vur = DC_COEFF(ur); |
1168 |
if(COMPATIBLE_FRAME(ur))
|
1169 |
transform |= PUR; |
1170 |
} |
1171 |
} |
1172 |
|
1173 |
if (transform == 0) { |
1174 |
|
1175 |
/* if there were no fragments to predict from, use last
|
1176 |
* DC saved */
|
1177 |
predicted_dc = last_dc[current_frame_type]; |
1178 |
} else {
|
1179 |
|
1180 |
/* apply the appropriate predictor transform */
|
1181 |
predicted_dc = |
1182 |
(predictor_transform[transform][0] * vul) +
|
1183 |
(predictor_transform[transform][1] * vu) +
|
1184 |
(predictor_transform[transform][2] * vur) +
|
1185 |
(predictor_transform[transform][3] * vl);
|
1186 |
|
1187 |
predicted_dc /= 128;
|
1188 |
|
1189 |
/* check for outranging on the [ul u l] and
|
1190 |
* [ul u ur l] predictors */
|
1191 |
if ((transform == 15) || (transform == 13)) { |
1192 |
if (FFABS(predicted_dc - vu) > 128) |
1193 |
predicted_dc = vu; |
1194 |
else if (FFABS(predicted_dc - vl) > 128) |
1195 |
predicted_dc = vl; |
1196 |
else if (FFABS(predicted_dc - vul) > 128) |
1197 |
predicted_dc = vul; |
1198 |
} |
1199 |
} |
1200 |
|
1201 |
/* at long last, apply the predictor */
|
1202 |
DC_COEFF(i) += predicted_dc; |
1203 |
/* save the DC */
|
1204 |
last_dc[current_frame_type] = DC_COEFF(i); |
1205 |
} |
1206 |
} |
1207 |
} |
1208 |
} |
1209 |
|
1210 |
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend) |
1211 |
{ |
1212 |
int x, y;
|
1213 |
int *bounding_values= s->bounding_values_array+127; |
1214 |
|
1215 |
int width = s->fragment_width[!!plane];
|
1216 |
int height = s->fragment_height[!!plane];
|
1217 |
int fragment = s->fragment_start [plane] + ystart * width;
|
1218 |
int stride = s->current_frame.linesize[plane];
|
1219 |
uint8_t *plane_data = s->current_frame.data [plane]; |
1220 |
if (!s->flipped_image) stride = -stride;
|
1221 |
plane_data += s->data_offset[plane] + 8*ystart*stride;
|
1222 |
|
1223 |
for (y = ystart; y < yend; y++) {
|
1224 |
|
1225 |
for (x = 0; x < width; x++) { |
1226 |
/* This code basically just deblocks on the edges of coded blocks.
|
1227 |
* However, it has to be much more complicated because of the
|
1228 |
* braindamaged deblock ordering used in VP3/Theora. Order matters
|
1229 |
* because some pixels get filtered twice. */
|
1230 |
if( s->all_fragments[fragment].coding_method != MODE_COPY )
|
1231 |
{ |
1232 |
/* do not perform left edge filter for left columns frags */
|
1233 |
if (x > 0) { |
1234 |
s->dsp.vp3_h_loop_filter( |
1235 |
plane_data + 8*x,
|
1236 |
stride, bounding_values); |
1237 |
} |
1238 |
|
1239 |
/* do not perform top edge filter for top row fragments */
|
1240 |
if (y > 0) { |
1241 |
s->dsp.vp3_v_loop_filter( |
1242 |
plane_data + 8*x,
|
1243 |
stride, bounding_values); |
1244 |
} |
1245 |
|
1246 |
/* do not perform right edge filter for right column
|
1247 |
* fragments or if right fragment neighbor is also coded
|
1248 |
* in this frame (it will be filtered in next iteration) */
|
1249 |
if ((x < width - 1) && |
1250 |
(s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
|
1251 |
s->dsp.vp3_h_loop_filter( |
1252 |
plane_data + 8*x + 8, |
1253 |
stride, bounding_values); |
1254 |
} |
1255 |
|
1256 |
/* do not perform bottom edge filter for bottom row
|
1257 |
* fragments or if bottom fragment neighbor is also coded
|
1258 |
* in this frame (it will be filtered in the next row) */
|
1259 |
if ((y < height - 1) && |
1260 |
(s->all_fragments[fragment + width].coding_method == MODE_COPY)) { |
1261 |
s->dsp.vp3_v_loop_filter( |
1262 |
plane_data + 8*x + 8*stride, |
1263 |
stride, bounding_values); |
1264 |
} |
1265 |
} |
1266 |
|
1267 |
fragment++; |
1268 |
} |
1269 |
plane_data += 8*stride;
|
1270 |
} |
1271 |
} |
1272 |
|
1273 |
/**
|
1274 |
* Pull DCT tokens from the 64 levels to decode and dequant the coefficients
|
1275 |
* for the next block in coding order
|
1276 |
*/
|
1277 |
static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, |
1278 |
int plane, int inter, DCTELEM block[64]) |
1279 |
{ |
1280 |
int16_t *dequantizer = s->qmat[frag->qpi][inter][plane]; |
1281 |
uint8_t *perm = s->scantable.permutated; |
1282 |
int i = 0; |
1283 |
|
1284 |
do {
|
1285 |
int token = *s->dct_tokens[plane][i];
|
1286 |
switch (token & 3) { |
1287 |
case 0: // EOB |
1288 |
if (--token < 4) // 0-3 are token types, so the EOB run must now be 0 |
1289 |
s->dct_tokens[plane][i]++; |
1290 |
else
|
1291 |
*s->dct_tokens[plane][i] = token & ~3;
|
1292 |
goto end;
|
1293 |
case 1: // zero run |
1294 |
s->dct_tokens[plane][i]++; |
1295 |
i += (token >> 2) & 0x7f; |
1296 |
block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
|
1297 |
i++; |
1298 |
break;
|
1299 |
case 2: // coeff |
1300 |
block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
|
1301 |
s->dct_tokens[plane][i++]++; |
1302 |
break;
|
1303 |
default: // shouldn't happen |
1304 |
return i;
|
1305 |
} |
1306 |
} while (i < 64); |
1307 |
end:
|
1308 |
// the actual DC+prediction is in the fragment structure
|
1309 |
block[0] = frag->dc * s->qmat[0][inter][plane][0]; |
1310 |
return i;
|
1311 |
} |
1312 |
|
1313 |
/**
|
1314 |
* called when all pixels up to row y are complete
|
1315 |
*/
|
1316 |
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) |
1317 |
{ |
1318 |
int h, cy;
|
1319 |
int offset[4]; |
1320 |
|
1321 |
if(s->avctx->draw_horiz_band==NULL) |
1322 |
return;
|
1323 |
|
1324 |
h= y - s->last_slice_end; |
1325 |
s->last_slice_end= y; |
1326 |
y -= h; |
1327 |
|
1328 |
if (!s->flipped_image) {
|
1329 |
y = s->avctx->height - y - h; |
1330 |
} |
1331 |
|
1332 |
cy = y >> s->chroma_y_shift; |
1333 |
offset[0] = s->current_frame.linesize[0]*y; |
1334 |
offset[1] = s->current_frame.linesize[1]*cy; |
1335 |
offset[2] = s->current_frame.linesize[2]*cy; |
1336 |
offset[3] = 0; |
1337 |
|
1338 |
emms_c(); |
1339 |
s->avctx->draw_horiz_band(s->avctx, &s->current_frame, offset, y, 3, h);
|
1340 |
} |
1341 |
|
1342 |
/*
|
1343 |
* Perform the final rendering for a particular slice of data.
|
1344 |
* The slice number ranges from 0..(c_superblock_height - 1).
|
1345 |
*/
|
1346 |
static void render_slice(Vp3DecodeContext *s, int slice) |
1347 |
{ |
1348 |
int x, y, i, j;
|
1349 |
LOCAL_ALIGNED_16(DCTELEM, block, [64]);
|
1350 |
int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef; |
1351 |
int motion_halfpel_index;
|
1352 |
uint8_t *motion_source; |
1353 |
int plane, first_pixel;
|
1354 |
|
1355 |
if (slice >= s->c_superblock_height)
|
1356 |
return;
|
1357 |
|
1358 |
for (plane = 0; plane < 3; plane++) { |
1359 |
uint8_t *output_plane = s->current_frame.data [plane] + s->data_offset[plane]; |
1360 |
uint8_t * last_plane = s-> last_frame.data [plane] + s->data_offset[plane]; |
1361 |
uint8_t *golden_plane = s-> golden_frame.data [plane] + s->data_offset[plane]; |
1362 |
int stride = s->current_frame.linesize[plane];
|
1363 |
int plane_width = s->width >> (plane && s->chroma_x_shift);
|
1364 |
int plane_height = s->height >> (plane && s->chroma_y_shift);
|
1365 |
int8_t (*motion_val)[2] = s->motion_val[!!plane];
|
1366 |
|
1367 |
int sb_x, sb_y = slice << (!plane && s->chroma_y_shift);
|
1368 |
int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift); |
1369 |
int slice_width = plane ? s->c_superblock_width : s->y_superblock_width;
|
1370 |
|
1371 |
int fragment_width = s->fragment_width[!!plane];
|
1372 |
int fragment_height = s->fragment_height[!!plane];
|
1373 |
int fragment_start = s->fragment_start[plane];
|
1374 |
|
1375 |
if (!s->flipped_image) stride = -stride;
|
1376 |
if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY))
|
1377 |
continue;
|
1378 |
|
1379 |
|
1380 |
if(FFABS(stride) > 2048) |
1381 |
return; //various tables are fixed size |
1382 |
|
1383 |
/* for each superblock row in the slice (both of them)... */
|
1384 |
for (; sb_y < slice_height; sb_y++) {
|
1385 |
|
1386 |
/* for each superblock in a row... */
|
1387 |
for (sb_x = 0; sb_x < slice_width; sb_x++) { |
1388 |
|
1389 |
/* for each block in a superblock... */
|
1390 |
for (j = 0; j < 16; j++) { |
1391 |
x = 4*sb_x + hilbert_offset[j][0]; |
1392 |
y = 4*sb_y + hilbert_offset[j][1]; |
1393 |
|
1394 |
i = fragment_start + y*fragment_width + x; |
1395 |
|
1396 |
// bounds check
|
1397 |
if (x >= fragment_width || y >= fragment_height)
|
1398 |
continue;
|
1399 |
|
1400 |
first_pixel = 8*y*stride + 8*x; |
1401 |
|
1402 |
/* transform if this block was coded */
|
1403 |
if (s->all_fragments[i].coding_method != MODE_COPY) {
|
1404 |
if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
|
1405 |
(s->all_fragments[i].coding_method == MODE_GOLDEN_MV)) |
1406 |
motion_source= golden_plane; |
1407 |
else
|
1408 |
motion_source= last_plane; |
1409 |
|
1410 |
motion_source += first_pixel; |
1411 |
motion_halfpel_index = 0;
|
1412 |
|
1413 |
/* sort out the motion vector if this fragment is coded
|
1414 |
* using a motion vector method */
|
1415 |
if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
|
1416 |
(s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) { |
1417 |
int src_x, src_y;
|
1418 |
motion_x = motion_val[y*fragment_width + x][0];
|
1419 |
motion_y = motion_val[y*fragment_width + x][1];
|
1420 |
|
1421 |
src_x= (motion_x>>1) + 8*x; |
1422 |
src_y= (motion_y>>1) + 8*y; |
1423 |
|
1424 |
motion_halfpel_index = motion_x & 0x01;
|
1425 |
motion_source += (motion_x >> 1);
|
1426 |
|
1427 |
motion_halfpel_index |= (motion_y & 0x01) << 1; |
1428 |
motion_source += ((motion_y >> 1) * stride);
|
1429 |
|
1430 |
if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){ |
1431 |
uint8_t *temp= s->edge_emu_buffer; |
1432 |
if(stride<0) temp -= 9*stride; |
1433 |
else temp += 9*stride; |
1434 |
|
1435 |
s->dsp.emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, plane_width, plane_height); |
1436 |
motion_source= temp; |
1437 |
} |
1438 |
} |
1439 |
|
1440 |
|
1441 |
/* first, take care of copying a block from either the
|
1442 |
* previous or the golden frame */
|
1443 |
if (s->all_fragments[i].coding_method != MODE_INTRA) {
|
1444 |
/* Note, it is possible to implement all MC cases with
|
1445 |
put_no_rnd_pixels_l2 which would look more like the
|
1446 |
VP3 source but this would be slower as
|
1447 |
put_no_rnd_pixels_tab is better optimzed */
|
1448 |
if(motion_halfpel_index != 3){ |
1449 |
s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
|
1450 |
output_plane + first_pixel, |
1451 |
motion_source, stride, 8);
|
1452 |
}else{
|
1453 |
int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1 |
1454 |
s->dsp.put_no_rnd_pixels_l2[1](
|
1455 |
output_plane + first_pixel, |
1456 |
motion_source - d, |
1457 |
motion_source + stride + 1 + d,
|
1458 |
stride, 8);
|
1459 |
} |
1460 |
} |
1461 |
|
1462 |
s->dsp.clear_block(block); |
1463 |
|
1464 |
/* invert DCT and place (or add) in final output */
|
1465 |
|
1466 |
if (s->all_fragments[i].coding_method == MODE_INTRA) {
|
1467 |
vp3_dequant(s, s->all_fragments + i, plane, 0, block);
|
1468 |
if(s->avctx->idct_algo!=FF_IDCT_VP3)
|
1469 |
block[0] += 128<<3; |
1470 |
s->dsp.idct_put( |
1471 |
output_plane + first_pixel, |
1472 |
stride, |
1473 |
block); |
1474 |
} else {
|
1475 |
if (vp3_dequant(s, s->all_fragments + i, plane, 1, block)) { |
1476 |
s->dsp.idct_add( |
1477 |
output_plane + first_pixel, |
1478 |
stride, |
1479 |
block); |
1480 |
} else {
|
1481 |
s->dsp.vp3_idct_dc_add(output_plane + first_pixel, stride, block); |
1482 |
} |
1483 |
} |
1484 |
} else {
|
1485 |
|
1486 |
/* copy directly from the previous frame */
|
1487 |
s->dsp.put_pixels_tab[1][0]( |
1488 |
output_plane + first_pixel, |
1489 |
last_plane + first_pixel, |
1490 |
stride, 8);
|
1491 |
|
1492 |
} |
1493 |
} |
1494 |
} |
1495 |
|
1496 |
// Filter up to the last row in the superblock row
|
1497 |
if (!s->skip_loop_filter)
|
1498 |
apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1)); |
1499 |
} |
1500 |
} |
1501 |
|
1502 |
/* this looks like a good place for slice dispatch... */
|
1503 |
/* algorithm:
|
1504 |
* if (slice == s->macroblock_height - 1)
|
1505 |
* dispatch (both last slice & 2nd-to-last slice);
|
1506 |
* else if (slice > 0)
|
1507 |
* dispatch (slice - 1);
|
1508 |
*/
|
1509 |
|
1510 |
vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) -16, s->height-16)); |
1511 |
} |
1512 |
|
1513 |
/*
|
1514 |
* This is the ffmpeg/libavcodec API init function.
|
1515 |
*/
|
1516 |
static av_cold int vp3_decode_init(AVCodecContext *avctx) |
1517 |
{ |
1518 |
Vp3DecodeContext *s = avctx->priv_data; |
1519 |
int i, inter, plane;
|
1520 |
int c_width;
|
1521 |
int c_height;
|
1522 |
int y_fragment_count, c_fragment_count;
|
1523 |
|
1524 |
if (avctx->codec_tag == MKTAG('V','P','3','0')) |
1525 |
s->version = 0;
|
1526 |
else
|
1527 |
s->version = 1;
|
1528 |
|
1529 |
s->avctx = avctx; |
1530 |
s->width = FFALIGN(avctx->width, 16);
|
1531 |
s->height = FFALIGN(avctx->height, 16);
|
1532 |
if (avctx->pix_fmt == PIX_FMT_NONE)
|
1533 |
avctx->pix_fmt = PIX_FMT_YUV420P; |
1534 |
avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; |
1535 |
if(avctx->idct_algo==FF_IDCT_AUTO)
|
1536 |
avctx->idct_algo=FF_IDCT_VP3; |
1537 |
dsputil_init(&s->dsp, avctx); |
1538 |
|
1539 |
ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); |
1540 |
|
1541 |
/* initialize to an impossible value which will force a recalculation
|
1542 |
* in the first frame decode */
|
1543 |
for (i = 0; i < 3; i++) |
1544 |
s->qps[i] = -1;
|
1545 |
|
1546 |
avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); |
1547 |
|
1548 |
s->y_superblock_width = (s->width + 31) / 32; |
1549 |
s->y_superblock_height = (s->height + 31) / 32; |
1550 |
s->y_superblock_count = s->y_superblock_width * s->y_superblock_height; |
1551 |
|
1552 |
/* work out the dimensions for the C planes */
|
1553 |
c_width = s->width >> s->chroma_x_shift; |
1554 |
c_height = s->height >> s->chroma_y_shift; |
1555 |
s->c_superblock_width = (c_width + 31) / 32; |
1556 |
s->c_superblock_height = (c_height + 31) / 32; |
1557 |
s->c_superblock_count = s->c_superblock_width * s->c_superblock_height; |
1558 |
|
1559 |
s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
|
1560 |
s->u_superblock_start = s->y_superblock_count; |
1561 |
s->v_superblock_start = s->u_superblock_start + s->c_superblock_count; |
1562 |
s->superblock_coding = av_malloc(s->superblock_count); |
1563 |
|
1564 |
s->macroblock_width = (s->width + 15) / 16; |
1565 |
s->macroblock_height = (s->height + 15) / 16; |
1566 |
s->macroblock_count = s->macroblock_width * s->macroblock_height; |
1567 |
|
1568 |
s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
|
1569 |
s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
|
1570 |
s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift; |
1571 |
s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift; |
1572 |
|
1573 |
/* fragment count covers all 8x8 blocks for all 3 planes */
|
1574 |
y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; |
1575 |
c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; |
1576 |
s->fragment_count = y_fragment_count + 2*c_fragment_count;
|
1577 |
s->fragment_start[1] = y_fragment_count;
|
1578 |
s->fragment_start[2] = y_fragment_count + c_fragment_count;
|
1579 |
|
1580 |
s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
|
1581 |
s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int)); |
1582 |
s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base)); |
1583 |
s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0])); |
1584 |
s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1])); |
1585 |
|
1586 |
if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base ||
|
1587 |
!s->coded_fragment_list[0] || !s->motion_val[0] || !s->motion_val[1]) { |
1588 |
vp3_decode_end(avctx); |
1589 |
return -1; |
1590 |
} |
1591 |
|
1592 |
if (!s->theora_tables)
|
1593 |
{ |
1594 |
for (i = 0; i < 64; i++) { |
1595 |
s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i]; |
1596 |
s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i]; |
1597 |
s->base_matrix[0][i] = vp31_intra_y_dequant[i];
|
1598 |
s->base_matrix[1][i] = vp31_intra_c_dequant[i];
|
1599 |
s->base_matrix[2][i] = vp31_inter_dequant[i];
|
1600 |
s->filter_limit_values[i] = vp31_filter_limit_values[i]; |
1601 |
} |
1602 |
|
1603 |
for(inter=0; inter<2; inter++){ |
1604 |
for(plane=0; plane<3; plane++){ |
1605 |
s->qr_count[inter][plane]= 1;
|
1606 |
s->qr_size [inter][plane][0]= 63; |
1607 |
s->qr_base [inter][plane][0]=
|
1608 |
s->qr_base [inter][plane][1]= 2*inter + (!!plane)*!inter; |
1609 |
} |
1610 |
} |
1611 |
|
1612 |
/* init VLC tables */
|
1613 |
for (i = 0; i < 16; i++) { |
1614 |
|
1615 |
/* DC histograms */
|
1616 |
init_vlc(&s->dc_vlc[i], 11, 32, |
1617 |
&dc_bias[i][0][1], 4, 2, |
1618 |
&dc_bias[i][0][0], 4, 2, 0); |
1619 |
|
1620 |
/* group 1 AC histograms */
|
1621 |
init_vlc(&s->ac_vlc_1[i], 11, 32, |
1622 |
&ac_bias_0[i][0][1], 4, 2, |
1623 |
&ac_bias_0[i][0][0], 4, 2, 0); |
1624 |
|
1625 |
/* group 2 AC histograms */
|
1626 |
init_vlc(&s->ac_vlc_2[i], 11, 32, |
1627 |
&ac_bias_1[i][0][1], 4, 2, |
1628 |
&ac_bias_1[i][0][0], 4, 2, 0); |
1629 |
|
1630 |
/* group 3 AC histograms */
|
1631 |
init_vlc(&s->ac_vlc_3[i], 11, 32, |
1632 |
&ac_bias_2[i][0][1], 4, 2, |
1633 |
&ac_bias_2[i][0][0], 4, 2, 0); |
1634 |
|
1635 |
/* group 4 AC histograms */
|
1636 |
init_vlc(&s->ac_vlc_4[i], 11, 32, |
1637 |
&ac_bias_3[i][0][1], 4, 2, |
1638 |
&ac_bias_3[i][0][0], 4, 2, 0); |
1639 |
} |
1640 |
} else {
|
1641 |
|
1642 |
for (i = 0; i < 16; i++) { |
1643 |
/* DC histograms */
|
1644 |
if (init_vlc(&s->dc_vlc[i], 11, 32, |
1645 |
&s->huffman_table[i][0][1], 8, 4, |
1646 |
&s->huffman_table[i][0][0], 8, 4, 0) < 0) |
1647 |
goto vlc_fail;
|
1648 |
|
1649 |
/* group 1 AC histograms */
|
1650 |
if (init_vlc(&s->ac_vlc_1[i], 11, 32, |
1651 |
&s->huffman_table[i+16][0][1], 8, 4, |
1652 |
&s->huffman_table[i+16][0][0], 8, 4, 0) < 0) |
1653 |
goto vlc_fail;
|
1654 |
|
1655 |
/* group 2 AC histograms */
|
1656 |
if (init_vlc(&s->ac_vlc_2[i], 11, 32, |
1657 |
&s->huffman_table[i+16*2][0][1], 8, 4, |
1658 |
&s->huffman_table[i+16*2][0][0], 8, 4, 0) < 0) |
1659 |
goto vlc_fail;
|
1660 |
|
1661 |
/* group 3 AC histograms */
|
1662 |
if (init_vlc(&s->ac_vlc_3[i], 11, 32, |
1663 |
&s->huffman_table[i+16*3][0][1], 8, 4, |
1664 |
&s->huffman_table[i+16*3][0][0], 8, 4, 0) < 0) |
1665 |
goto vlc_fail;
|
1666 |
|
1667 |
/* group 4 AC histograms */
|
1668 |
if (init_vlc(&s->ac_vlc_4[i], 11, 32, |
1669 |
&s->huffman_table[i+16*4][0][1], 8, 4, |
1670 |
&s->huffman_table[i+16*4][0][0], 8, 4, 0) < 0) |
1671 |
goto vlc_fail;
|
1672 |
} |
1673 |
} |
1674 |
|
1675 |
init_vlc(&s->superblock_run_length_vlc, 6, 34, |
1676 |
&superblock_run_length_vlc_table[0][1], 4, 2, |
1677 |
&superblock_run_length_vlc_table[0][0], 4, 2, 0); |
1678 |
|
1679 |
init_vlc(&s->fragment_run_length_vlc, 5, 30, |
1680 |
&fragment_run_length_vlc_table[0][1], 4, 2, |
1681 |
&fragment_run_length_vlc_table[0][0], 4, 2, 0); |
1682 |
|
1683 |
init_vlc(&s->mode_code_vlc, 3, 8, |
1684 |
&mode_code_vlc_table[0][1], 2, 1, |
1685 |
&mode_code_vlc_table[0][0], 2, 1, 0); |
1686 |
|
1687 |
init_vlc(&s->motion_vector_vlc, 6, 63, |
1688 |
&motion_vector_vlc_table[0][1], 2, 1, |
1689 |
&motion_vector_vlc_table[0][0], 2, 1, 0); |
1690 |
|
1691 |
/* work out the block mapping tables */
|
1692 |
s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int)); |
1693 |
s->macroblock_coding = av_malloc(s->macroblock_count + 1);
|
1694 |
if (!s->superblock_fragments || !s->macroblock_coding) {
|
1695 |
vp3_decode_end(avctx); |
1696 |
return -1; |
1697 |
} |
1698 |
init_block_mapping(s); |
1699 |
|
1700 |
for (i = 0; i < 3; i++) { |
1701 |
s->current_frame.data[i] = NULL;
|
1702 |
s->last_frame.data[i] = NULL;
|
1703 |
s->golden_frame.data[i] = NULL;
|
1704 |
} |
1705 |
|
1706 |
return 0; |
1707 |
|
1708 |
vlc_fail:
|
1709 |
av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n");
|
1710 |
return -1; |
1711 |
} |
1712 |
|
1713 |
/*
|
1714 |
* This is the ffmpeg/libavcodec API frame decode function.
|
1715 |
*/
|
1716 |
static int vp3_decode_frame(AVCodecContext *avctx, |
1717 |
void *data, int *data_size, |
1718 |
AVPacket *avpkt) |
1719 |
{ |
1720 |
const uint8_t *buf = avpkt->data;
|
1721 |
int buf_size = avpkt->size;
|
1722 |
Vp3DecodeContext *s = avctx->priv_data; |
1723 |
GetBitContext gb; |
1724 |
int i;
|
1725 |
|
1726 |
init_get_bits(&gb, buf, buf_size * 8);
|
1727 |
|
1728 |
if (s->theora && get_bits1(&gb))
|
1729 |
{ |
1730 |
av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
|
1731 |
return -1; |
1732 |
} |
1733 |
|
1734 |
s->keyframe = !get_bits1(&gb); |
1735 |
if (!s->theora)
|
1736 |
skip_bits(&gb, 1);
|
1737 |
for (i = 0; i < 3; i++) |
1738 |
s->last_qps[i] = s->qps[i]; |
1739 |
|
1740 |
s->nqps=0;
|
1741 |
do{
|
1742 |
s->qps[s->nqps++]= get_bits(&gb, 6);
|
1743 |
} while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb)); |
1744 |
for (i = s->nqps; i < 3; i++) |
1745 |
s->qps[i] = -1;
|
1746 |
|
1747 |
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
|
1748 |
av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
|
1749 |
s->keyframe?"key":"", avctx->frame_number+1, s->qps[0]); |
1750 |
|
1751 |
s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
|
1752 |
avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL : AVDISCARD_NONKEY); |
1753 |
|
1754 |
if (s->qps[0] != s->last_qps[0]) |
1755 |
init_loop_filter(s); |
1756 |
|
1757 |
for (i = 0; i < s->nqps; i++) |
1758 |
// reinit all dequantizers if the first one changed, because
|
1759 |
// the DC of the first quantizer must be used for all matrices
|
1760 |
if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0]) |
1761 |
init_dequantizer(s, i); |
1762 |
|
1763 |
if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
|
1764 |
return buf_size;
|
1765 |
|
1766 |
s->current_frame.reference = 3;
|
1767 |
s->current_frame.pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE; |
1768 |
if (avctx->get_buffer(avctx, &s->current_frame) < 0) { |
1769 |
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
1770 |
goto error;
|
1771 |
} |
1772 |
|
1773 |
if (s->keyframe) {
|
1774 |
if (!s->theora)
|
1775 |
{ |
1776 |
skip_bits(&gb, 4); /* width code */ |
1777 |
skip_bits(&gb, 4); /* height code */ |
1778 |
if (s->version)
|
1779 |
{ |
1780 |
s->version = get_bits(&gb, 5);
|
1781 |
if (avctx->frame_number == 0) |
1782 |
av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
|
1783 |
} |
1784 |
} |
1785 |
if (s->version || s->theora)
|
1786 |
{ |
1787 |
if (get_bits1(&gb))
|
1788 |
av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
|
1789 |
skip_bits(&gb, 2); /* reserved? */ |
1790 |
} |
1791 |
} else {
|
1792 |
if (!s->golden_frame.data[0]) { |
1793 |
av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n");
|
1794 |
|
1795 |
s->golden_frame.reference = 3;
|
1796 |
s->golden_frame.pict_type = FF_I_TYPE; |
1797 |
if (avctx->get_buffer(avctx, &s->golden_frame) < 0) { |
1798 |
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
1799 |
goto error;
|
1800 |
} |
1801 |
s->last_frame = s->golden_frame; |
1802 |
s->last_frame.type = FF_BUFFER_TYPE_COPY; |
1803 |
} |
1804 |
} |
1805 |
|
1806 |
s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame
|
1807 |
s->current_frame.qstride= 0;
|
1808 |
|
1809 |
memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment)); |
1810 |
|
1811 |
if (unpack_superblocks(s, &gb)){
|
1812 |
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
|
1813 |
goto error;
|
1814 |
} |
1815 |
if (unpack_modes(s, &gb)){
|
1816 |
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
|
1817 |
goto error;
|
1818 |
} |
1819 |
if (unpack_vectors(s, &gb)){
|
1820 |
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
|
1821 |
goto error;
|
1822 |
} |
1823 |
if (unpack_block_qpis(s, &gb)){
|
1824 |
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
|
1825 |
goto error;
|
1826 |
} |
1827 |
if (unpack_dct_coeffs(s, &gb)){
|
1828 |
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
|
1829 |
goto error;
|
1830 |
} |
1831 |
|
1832 |
for (i = 0; i < 3; i++) { |
1833 |
int height = s->height >> (i && s->chroma_y_shift);
|
1834 |
if (s->flipped_image)
|
1835 |
s->data_offset[i] = 0;
|
1836 |
else
|
1837 |
s->data_offset[i] = (height-1) * s->current_frame.linesize[i];
|
1838 |
} |
1839 |
|
1840 |
s->last_slice_end = 0;
|
1841 |
for (i = 0; i < s->c_superblock_height; i++) |
1842 |
render_slice(s, i); |
1843 |
|
1844 |
// filter the last row
|
1845 |
for (i = 0; i < 3; i++) { |
1846 |
int row = (s->height >> (3+(i && s->chroma_y_shift))) - 1; |
1847 |
apply_loop_filter(s, i, row, row+1);
|
1848 |
} |
1849 |
vp3_draw_horiz_band(s, s->avctx->height); |
1850 |
|
1851 |
*data_size=sizeof(AVFrame);
|
1852 |
*(AVFrame*)data= s->current_frame; |
1853 |
|
1854 |
/* release the last frame, if it is allocated and if it is not the
|
1855 |
* golden frame */
|
1856 |
if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) |
1857 |
avctx->release_buffer(avctx, &s->last_frame); |
1858 |
|
1859 |
/* shuffle frames (last = current) */
|
1860 |
s->last_frame= s->current_frame; |
1861 |
|
1862 |
if (s->keyframe) {
|
1863 |
if (s->golden_frame.data[0]) |
1864 |
avctx->release_buffer(avctx, &s->golden_frame); |
1865 |
s->golden_frame = s->current_frame; |
1866 |
s->last_frame.type = FF_BUFFER_TYPE_COPY; |
1867 |
} |
1868 |
|
1869 |
s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ |
1870 |
|
1871 |
return buf_size;
|
1872 |
|
1873 |
error:
|
1874 |
if (s->current_frame.data[0]) |
1875 |
avctx->release_buffer(avctx, &s->current_frame); |
1876 |
return -1; |
1877 |
} |
1878 |
|
1879 |
/*
|
1880 |
* This is the ffmpeg/libavcodec API module cleanup function.
|
1881 |
*/
|
1882 |
static av_cold int vp3_decode_end(AVCodecContext *avctx) |
1883 |
{ |
1884 |
Vp3DecodeContext *s = avctx->priv_data; |
1885 |
int i;
|
1886 |
|
1887 |
av_free(s->superblock_coding); |
1888 |
av_free(s->all_fragments); |
1889 |
av_free(s->coded_fragment_list[0]);
|
1890 |
av_free(s->dct_tokens_base); |
1891 |
av_free(s->superblock_fragments); |
1892 |
av_free(s->macroblock_coding); |
1893 |
av_free(s->motion_val[0]);
|
1894 |
av_free(s->motion_val[1]);
|
1895 |
|
1896 |
for (i = 0; i < 16; i++) { |
1897 |
free_vlc(&s->dc_vlc[i]); |
1898 |
free_vlc(&s->ac_vlc_1[i]); |
1899 |
free_vlc(&s->ac_vlc_2[i]); |
1900 |
free_vlc(&s->ac_vlc_3[i]); |
1901 |
free_vlc(&s->ac_vlc_4[i]); |
1902 |
} |
1903 |
|
1904 |
free_vlc(&s->superblock_run_length_vlc); |
1905 |
free_vlc(&s->fragment_run_length_vlc); |
1906 |
free_vlc(&s->mode_code_vlc); |
1907 |
free_vlc(&s->motion_vector_vlc); |
1908 |
|
1909 |
/* release all frames */
|
1910 |
if (s->golden_frame.data[0]) |
1911 |
avctx->release_buffer(avctx, &s->golden_frame); |
1912 |
if (s->last_frame.data[0] && s->last_frame.type != FF_BUFFER_TYPE_COPY) |
1913 |
avctx->release_buffer(avctx, &s->last_frame); |
1914 |
/* no need to release the current_frame since it will always be pointing
|
1915 |
* to the same frame as either the golden or last frame */
|
1916 |
|
1917 |
return 0; |
1918 |
} |
1919 |
|
1920 |
static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) |
1921 |
{ |
1922 |
Vp3DecodeContext *s = avctx->priv_data; |
1923 |
|
1924 |
if (get_bits1(gb)) {
|
1925 |
int token;
|
1926 |
if (s->entries >= 32) { /* overflow */ |
1927 |
av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
|
1928 |
return -1; |
1929 |
} |
1930 |
token = get_bits(gb, 5);
|
1931 |
//av_log(avctx, AV_LOG_DEBUG, "hti %d hbits %x token %d entry : %d size %d\n", s->hti, s->hbits, token, s->entries, s->huff_code_size);
|
1932 |
s->huffman_table[s->hti][token][0] = s->hbits;
|
1933 |
s->huffman_table[s->hti][token][1] = s->huff_code_size;
|
1934 |
s->entries++; |
1935 |
} |
1936 |
else {
|
1937 |
if (s->huff_code_size >= 32) {/* overflow */ |
1938 |
av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
|
1939 |
return -1; |
1940 |
} |
1941 |
s->huff_code_size++; |
1942 |
s->hbits <<= 1;
|
1943 |
if (read_huffman_tree(avctx, gb))
|
1944 |
return -1; |
1945 |
s->hbits |= 1;
|
1946 |
if (read_huffman_tree(avctx, gb))
|
1947 |
return -1; |
1948 |
s->hbits >>= 1;
|
1949 |
s->huff_code_size--; |
1950 |
} |
1951 |
return 0; |
1952 |
} |
1953 |
|
1954 |
#if CONFIG_THEORA_DECODER
|
1955 |
static const enum PixelFormat theora_pix_fmts[4] = { |
1956 |
PIX_FMT_YUV420P, PIX_FMT_NONE, PIX_FMT_YUV422P, PIX_FMT_YUV444P |
1957 |
}; |
1958 |
|
1959 |
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) |
1960 |
{ |
1961 |
Vp3DecodeContext *s = avctx->priv_data; |
1962 |
int visible_width, visible_height, colorspace;
|
1963 |
int offset_x = 0, offset_y = 0; |
1964 |
AVRational fps, aspect; |
1965 |
|
1966 |
s->theora = get_bits_long(gb, 24);
|
1967 |
av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
|
1968 |
|
1969 |
/* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
|
1970 |
/* but previous versions have the image flipped relative to vp3 */
|
1971 |
if (s->theora < 0x030200) |
1972 |
{ |
1973 |
s->flipped_image = 1;
|
1974 |
av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
|
1975 |
} |
1976 |
|
1977 |
visible_width = s->width = get_bits(gb, 16) << 4; |
1978 |
visible_height = s->height = get_bits(gb, 16) << 4; |
1979 |
|
1980 |
if(av_image_check_size(s->width, s->height, 0, avctx)){ |
1981 |
av_log(avctx, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n", s->width, s->height);
|
1982 |
s->width= s->height= 0;
|
1983 |
return -1; |
1984 |
} |
1985 |
|
1986 |
if (s->theora >= 0x030200) { |
1987 |
visible_width = get_bits_long(gb, 24);
|
1988 |
visible_height = get_bits_long(gb, 24);
|
1989 |
|
1990 |
offset_x = get_bits(gb, 8); /* offset x */ |
1991 |
offset_y = get_bits(gb, 8); /* offset y, from bottom */ |
1992 |
} |
1993 |
|
1994 |
fps.num = get_bits_long(gb, 32);
|
1995 |
fps.den = get_bits_long(gb, 32);
|
1996 |
if (fps.num && fps.den) {
|
1997 |
av_reduce(&avctx->time_base.num, &avctx->time_base.den, |
1998 |
fps.den, fps.num, 1<<30); |
1999 |
} |
2000 |
|
2001 |
aspect.num = get_bits_long(gb, 24);
|
2002 |
aspect.den = get_bits_long(gb, 24);
|
2003 |
if (aspect.num && aspect.den) {
|
2004 |
av_reduce(&avctx->sample_aspect_ratio.num, |
2005 |
&avctx->sample_aspect_ratio.den, |
2006 |
aspect.num, aspect.den, 1<<30); |
2007 |
} |
2008 |
|
2009 |
if (s->theora < 0x030200) |
2010 |
skip_bits(gb, 5); /* keyframe frequency force */ |
2011 |
colorspace = get_bits(gb, 8);
|
2012 |
skip_bits(gb, 24); /* bitrate */ |
2013 |
|
2014 |
skip_bits(gb, 6); /* quality hint */ |
2015 |
|
2016 |
if (s->theora >= 0x030200) |
2017 |
{ |
2018 |
skip_bits(gb, 5); /* keyframe frequency force */ |
2019 |
avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
|
2020 |
skip_bits(gb, 3); /* reserved */ |
2021 |
} |
2022 |
|
2023 |
// align_get_bits(gb);
|
2024 |
|
2025 |
if ( visible_width <= s->width && visible_width > s->width-16 |
2026 |
&& visible_height <= s->height && visible_height > s->height-16
|
2027 |
&& !offset_x && (offset_y == s->height - visible_height)) |
2028 |
avcodec_set_dimensions(avctx, visible_width, visible_height); |
2029 |
else
|
2030 |
avcodec_set_dimensions(avctx, s->width, s->height); |
2031 |
|
2032 |
if (colorspace == 1) { |
2033 |
avctx->color_primaries = AVCOL_PRI_BT470M; |
2034 |
} else if (colorspace == 2) { |
2035 |
avctx->color_primaries = AVCOL_PRI_BT470BG; |
2036 |
} |
2037 |
if (colorspace == 1 || colorspace == 2) { |
2038 |
avctx->colorspace = AVCOL_SPC_BT470BG; |
2039 |
avctx->color_trc = AVCOL_TRC_BT709; |
2040 |
} |
2041 |
|
2042 |
return 0; |
2043 |
} |
2044 |
|
2045 |
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb) |
2046 |
{ |
2047 |
Vp3DecodeContext *s = avctx->priv_data; |
2048 |
int i, n, matrices, inter, plane;
|
2049 |
|
2050 |
if (s->theora >= 0x030200) { |
2051 |
n = get_bits(gb, 3);
|
2052 |
/* loop filter limit values table */
|
2053 |
if (n)
|
2054 |
for (i = 0; i < 64; i++) |
2055 |
s->filter_limit_values[i] = get_bits(gb, n); |
2056 |
} |
2057 |
|
2058 |
if (s->theora >= 0x030200) |
2059 |
n = get_bits(gb, 4) + 1; |
2060 |
else
|
2061 |
n = 16;
|
2062 |
/* quality threshold table */
|
2063 |
for (i = 0; i < 64; i++) |
2064 |
s->coded_ac_scale_factor[i] = get_bits(gb, n); |
2065 |
|
2066 |
if (s->theora >= 0x030200) |
2067 |
n = get_bits(gb, 4) + 1; |
2068 |
else
|
2069 |
n = 16;
|
2070 |
/* dc scale factor table */
|
2071 |
for (i = 0; i < 64; i++) |
2072 |
s->coded_dc_scale_factor[i] = get_bits(gb, n); |
2073 |
|
2074 |
if (s->theora >= 0x030200) |
2075 |
matrices = get_bits(gb, 9) + 1; |
2076 |
else
|
2077 |
matrices = 3;
|
2078 |
|
2079 |
if(matrices > 384){ |
2080 |
av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
|
2081 |
return -1; |
2082 |
} |
2083 |
|
2084 |
for(n=0; n<matrices; n++){ |
2085 |
for (i = 0; i < 64; i++) |
2086 |
s->base_matrix[n][i]= get_bits(gb, 8);
|
2087 |
} |
2088 |
|
2089 |
for (inter = 0; inter <= 1; inter++) { |
2090 |
for (plane = 0; plane <= 2; plane++) { |
2091 |
int newqr= 1; |
2092 |
if (inter || plane > 0) |
2093 |
newqr = get_bits1(gb); |
2094 |
if (!newqr) {
|
2095 |
int qtj, plj;
|
2096 |
if(inter && get_bits1(gb)){
|
2097 |
qtj = 0;
|
2098 |
plj = plane; |
2099 |
}else{
|
2100 |
qtj= (3*inter + plane - 1) / 3; |
2101 |
plj= (plane + 2) % 3; |
2102 |
} |
2103 |
s->qr_count[inter][plane]= s->qr_count[qtj][plj]; |
2104 |
memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0])); |
2105 |
memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0])); |
2106 |
} else {
|
2107 |
int qri= 0; |
2108 |
int qi = 0; |
2109 |
|
2110 |
for(;;){
|
2111 |
i= get_bits(gb, av_log2(matrices-1)+1); |
2112 |
if(i>= matrices){
|
2113 |
av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n");
|
2114 |
return -1; |
2115 |
} |
2116 |
s->qr_base[inter][plane][qri]= i; |
2117 |
if(qi >= 63) |
2118 |
break;
|
2119 |
i = get_bits(gb, av_log2(63-qi)+1) + 1; |
2120 |
s->qr_size[inter][plane][qri++]= i; |
2121 |
qi += i; |
2122 |
} |
2123 |
|
2124 |
if (qi > 63) { |
2125 |
av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
|
2126 |
return -1; |
2127 |
} |
2128 |
s->qr_count[inter][plane]= qri; |
2129 |
} |
2130 |
} |
2131 |
} |
2132 |
|
2133 |
/* Huffman tables */
|
2134 |
for (s->hti = 0; s->hti < 80; s->hti++) { |
2135 |
s->entries = 0;
|
2136 |
s->huff_code_size = 1;
|
2137 |
if (!get_bits1(gb)) {
|
2138 |
s->hbits = 0;
|
2139 |
if(read_huffman_tree(avctx, gb))
|
2140 |
return -1; |
2141 |
s->hbits = 1;
|
2142 |
if(read_huffman_tree(avctx, gb))
|
2143 |
return -1; |
2144 |
} |
2145 |
} |
2146 |
|
2147 |
s->theora_tables = 1;
|
2148 |
|
2149 |
return 0; |
2150 |
} |
2151 |
|
2152 |
static av_cold int theora_decode_init(AVCodecContext *avctx) |
2153 |
{ |
2154 |
Vp3DecodeContext *s = avctx->priv_data; |
2155 |
GetBitContext gb; |
2156 |
int ptype;
|
2157 |
uint8_t *header_start[3];
|
2158 |
int header_len[3]; |
2159 |
int i;
|
2160 |
|
2161 |
s->theora = 1;
|
2162 |
|
2163 |
if (!avctx->extradata_size)
|
2164 |
{ |
2165 |
av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
|
2166 |
return -1; |
2167 |
} |
2168 |
|
2169 |
if (ff_split_xiph_headers(avctx->extradata, avctx->extradata_size,
|
2170 |
42, header_start, header_len) < 0) { |
2171 |
av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
|
2172 |
return -1; |
2173 |
} |
2174 |
|
2175 |
for(i=0;i<3;i++) { |
2176 |
init_get_bits(&gb, header_start[i], header_len[i] * 8);
|
2177 |
|
2178 |
ptype = get_bits(&gb, 8);
|
2179 |
|
2180 |
if (!(ptype & 0x80)) |
2181 |
{ |
2182 |
av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
|
2183 |
// return -1;
|
2184 |
} |
2185 |
|
2186 |
// FIXME: Check for this as well.
|
2187 |
skip_bits_long(&gb, 6*8); /* "theora" */ |
2188 |
|
2189 |
switch(ptype)
|
2190 |
{ |
2191 |
case 0x80: |
2192 |
theora_decode_header(avctx, &gb); |
2193 |
break;
|
2194 |
case 0x81: |
2195 |
// FIXME: is this needed? it breaks sometimes
|
2196 |
// theora_decode_comments(avctx, gb);
|
2197 |
break;
|
2198 |
case 0x82: |
2199 |
if (theora_decode_tables(avctx, &gb))
|
2200 |
return -1; |
2201 |
break;
|
2202 |
default:
|
2203 |
av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80); |
2204 |
break;
|
2205 |
} |
2206 |
if(ptype != 0x81 && 8*header_len[i] != get_bits_count(&gb)) |
2207 |
av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype); |
2208 |
if (s->theora < 0x030200) |
2209 |
break;
|
2210 |
} |
2211 |
|
2212 |
return vp3_decode_init(avctx);
|
2213 |
} |
2214 |
|
2215 |
AVCodec ff_theora_decoder = { |
2216 |
"theora",
|
2217 |
AVMEDIA_TYPE_VIDEO, |
2218 |
CODEC_ID_THEORA, |
2219 |
sizeof(Vp3DecodeContext),
|
2220 |
theora_decode_init, |
2221 |
NULL,
|
2222 |
vp3_decode_end, |
2223 |
vp3_decode_frame, |
2224 |
CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, |
2225 |
NULL,
|
2226 |
.long_name = NULL_IF_CONFIG_SMALL("Theora"),
|
2227 |
}; |
2228 |
#endif
|
2229 |
|
2230 |
AVCodec ff_vp3_decoder = { |
2231 |
"vp3",
|
2232 |
AVMEDIA_TYPE_VIDEO, |
2233 |
CODEC_ID_VP3, |
2234 |
sizeof(Vp3DecodeContext),
|
2235 |
vp3_decode_init, |
2236 |
NULL,
|
2237 |
vp3_decode_end, |
2238 |
vp3_decode_frame, |
2239 |
CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, |
2240 |
NULL,
|
2241 |
.long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
|
2242 |
}; |