ffmpeg / libavcodec / vc1.c @ f66e4f5f
History | View | Annotate | Download (154 KB)
1 |
/*
|
---|---|
2 |
* VC-1 and WMV3 decoder
|
3 |
* Copyright (c) 2006 Konstantin Shishkov
|
4 |
* Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
|
5 |
*
|
6 |
* This file is part of FFmpeg.
|
7 |
*
|
8 |
* FFmpeg is free software; you can redistribute it and/or
|
9 |
* modify it under the terms of the GNU Lesser General Public
|
10 |
* License as published by the Free Software Foundation; either
|
11 |
* version 2.1 of the License, or (at your option) any later version.
|
12 |
*
|
13 |
* FFmpeg is distributed in the hope that it will be useful,
|
14 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
16 |
* Lesser General Public License for more details.
|
17 |
*
|
18 |
* You should have received a copy of the GNU Lesser General Public
|
19 |
* License along with FFmpeg; if not, write to the Free Software
|
20 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
21 |
*
|
22 |
*/
|
23 |
|
24 |
/**
|
25 |
* @file vc1.c
|
26 |
* VC-1 and WMV3 decoder
|
27 |
*
|
28 |
*/
|
29 |
#include "common.h" |
30 |
#include "dsputil.h" |
31 |
#include "avcodec.h" |
32 |
#include "mpegvideo.h" |
33 |
#include "vc1data.h" |
34 |
#include "vc1acdata.h" |
35 |
|
36 |
#undef NDEBUG
|
37 |
#include <assert.h> |
38 |
|
39 |
extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2]; |
40 |
extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2]; |
41 |
extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2]; |
42 |
#define MB_INTRA_VLC_BITS 9 |
43 |
extern VLC ff_msmp4_mb_i_vlc;
|
44 |
extern const uint16_t ff_msmp4_mb_i_table[64][2]; |
45 |
#define DC_VLC_BITS 9 |
46 |
#define AC_VLC_BITS 9 |
47 |
static const uint16_t table_mb_intra[64][2]; |
48 |
|
49 |
/** Markers used if VC-1 AP frame data */
|
50 |
//@{
|
51 |
enum VC1Code{
|
52 |
VC1_CODE_RES0 = 0x00000100,
|
53 |
VC1_CODE_ENDOFSEQ = 0x0000010A,
|
54 |
VC1_CODE_SLICE, |
55 |
VC1_CODE_FIELD, |
56 |
VC1_CODE_FRAME, |
57 |
VC1_CODE_ENTRYPOINT, |
58 |
VC1_CODE_SEQHDR, |
59 |
}; |
60 |
//@}
|
61 |
|
62 |
/** Available Profiles */
|
63 |
//@{
|
64 |
enum Profile {
|
65 |
PROFILE_SIMPLE, |
66 |
PROFILE_MAIN, |
67 |
PROFILE_COMPLEX, ///< TODO: WMV9 specific
|
68 |
PROFILE_ADVANCED |
69 |
}; |
70 |
//@}
|
71 |
|
72 |
/** Sequence quantizer mode */
|
73 |
//@{
|
74 |
enum QuantMode {
|
75 |
QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
|
76 |
QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
|
77 |
QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
|
78 |
QUANT_UNIFORM ///< Uniform quant used for all frames
|
79 |
}; |
80 |
//@}
|
81 |
|
82 |
/** Where quant can be changed */
|
83 |
//@{
|
84 |
enum DQProfile {
|
85 |
DQPROFILE_FOUR_EDGES, |
86 |
DQPROFILE_DOUBLE_EDGES, |
87 |
DQPROFILE_SINGLE_EDGE, |
88 |
DQPROFILE_ALL_MBS |
89 |
}; |
90 |
//@}
|
91 |
|
92 |
/** @name Where quant can be changed
|
93 |
*/
|
94 |
//@{
|
95 |
enum DQSingleEdge {
|
96 |
DQSINGLE_BEDGE_LEFT, |
97 |
DQSINGLE_BEDGE_TOP, |
98 |
DQSINGLE_BEDGE_RIGHT, |
99 |
DQSINGLE_BEDGE_BOTTOM |
100 |
}; |
101 |
//@}
|
102 |
|
103 |
/** Which pair of edges is quantized with ALTPQUANT */
|
104 |
//@{
|
105 |
enum DQDoubleEdge {
|
106 |
DQDOUBLE_BEDGE_TOPLEFT, |
107 |
DQDOUBLE_BEDGE_TOPRIGHT, |
108 |
DQDOUBLE_BEDGE_BOTTOMRIGHT, |
109 |
DQDOUBLE_BEDGE_BOTTOMLEFT |
110 |
}; |
111 |
//@}
|
112 |
|
113 |
/** MV modes for P frames */
|
114 |
//@{
|
115 |
enum MVModes {
|
116 |
MV_PMODE_1MV_HPEL_BILIN, |
117 |
MV_PMODE_1MV, |
118 |
MV_PMODE_1MV_HPEL, |
119 |
MV_PMODE_MIXED_MV, |
120 |
MV_PMODE_INTENSITY_COMP |
121 |
}; |
122 |
//@}
|
123 |
|
124 |
/** @name MV types for B frames */
|
125 |
//@{
|
126 |
enum BMVTypes {
|
127 |
BMV_TYPE_BACKWARD, |
128 |
BMV_TYPE_FORWARD, |
129 |
BMV_TYPE_INTERPOLATED |
130 |
}; |
131 |
//@}
|
132 |
|
133 |
/** @name Block types for P/B frames */
|
134 |
//@{
|
135 |
enum TransformTypes {
|
136 |
TT_8X8, |
137 |
TT_8X4_BOTTOM, |
138 |
TT_8X4_TOP, |
139 |
TT_8X4, //Both halves
|
140 |
TT_4X8_RIGHT, |
141 |
TT_4X8_LEFT, |
142 |
TT_4X8, //Both halves
|
143 |
TT_4X4 |
144 |
}; |
145 |
//@}
|
146 |
|
147 |
/** Table for conversion between TTBLK and TTMB */
|
148 |
static const int ttblk_to_tt[3][8] = { |
149 |
{ TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT }, |
150 |
{ TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP }, |
151 |
{ TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP } |
152 |
}; |
153 |
|
154 |
static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 }; |
155 |
|
156 |
/** MV P mode - the 5th element is only used for mode 1 */
|
157 |
static const uint8_t mv_pmode_table[2][5] = { |
158 |
{ MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV }, |
159 |
{ MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN } |
160 |
}; |
161 |
static const uint8_t mv_pmode_table2[2][4] = { |
162 |
{ MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV }, |
163 |
{ MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN } |
164 |
}; |
165 |
|
166 |
/** One more frame type */
|
167 |
#define BI_TYPE 7 |
168 |
|
169 |
static const int fps_nr[5] = { 24, 25, 30, 50, 60 }, |
170 |
fps_dr[2] = { 1000, 1001 }; |
171 |
static const uint8_t pquant_table[3][32] = { |
172 |
{ /* Implicit quantizer */
|
173 |
0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12, |
174 |
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31 |
175 |
}, |
176 |
{ /* Explicit quantizer, pquantizer uniform */
|
177 |
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, |
178 |
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 |
179 |
}, |
180 |
{ /* Explicit quantizer, pquantizer non-uniform */
|
181 |
0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, |
182 |
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31 |
183 |
} |
184 |
}; |
185 |
|
186 |
/** @name VC-1 VLC tables and defines
|
187 |
* @todo TODO move this into the context
|
188 |
*/
|
189 |
//@{
|
190 |
#define VC1_BFRACTION_VLC_BITS 7 |
191 |
static VLC vc1_bfraction_vlc;
|
192 |
#define VC1_IMODE_VLC_BITS 4 |
193 |
static VLC vc1_imode_vlc;
|
194 |
#define VC1_NORM2_VLC_BITS 3 |
195 |
static VLC vc1_norm2_vlc;
|
196 |
#define VC1_NORM6_VLC_BITS 9 |
197 |
static VLC vc1_norm6_vlc;
|
198 |
/* Could be optimized, one table only needs 8 bits */
|
199 |
#define VC1_TTMB_VLC_BITS 9 //12 |
200 |
static VLC vc1_ttmb_vlc[3]; |
201 |
#define VC1_MV_DIFF_VLC_BITS 9 //15 |
202 |
static VLC vc1_mv_diff_vlc[4]; |
203 |
#define VC1_CBPCY_P_VLC_BITS 9 //14 |
204 |
static VLC vc1_cbpcy_p_vlc[4]; |
205 |
#define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6 |
206 |
static VLC vc1_4mv_block_pattern_vlc[4]; |
207 |
#define VC1_TTBLK_VLC_BITS 5 |
208 |
static VLC vc1_ttblk_vlc[3]; |
209 |
#define VC1_SUBBLKPAT_VLC_BITS 6 |
210 |
static VLC vc1_subblkpat_vlc[3]; |
211 |
|
212 |
static VLC vc1_ac_coeff_table[8]; |
213 |
//@}
|
214 |
|
215 |
enum CodingSet {
|
216 |
CS_HIGH_MOT_INTRA = 0,
|
217 |
CS_HIGH_MOT_INTER, |
218 |
CS_LOW_MOT_INTRA, |
219 |
CS_LOW_MOT_INTER, |
220 |
CS_MID_RATE_INTRA, |
221 |
CS_MID_RATE_INTER, |
222 |
CS_HIGH_RATE_INTRA, |
223 |
CS_HIGH_RATE_INTER |
224 |
}; |
225 |
|
226 |
/** @name Overlap conditions for Advanced Profile */
|
227 |
//@{
|
228 |
enum COTypes {
|
229 |
CONDOVER_NONE = 0,
|
230 |
CONDOVER_ALL, |
231 |
CONDOVER_SELECT |
232 |
}; |
233 |
//@}
|
234 |
|
235 |
|
236 |
/** The VC1 Context
|
237 |
* @fixme Change size wherever another size is more efficient
|
238 |
* Many members are only used for Advanced Profile
|
239 |
*/
|
240 |
typedef struct VC1Context{ |
241 |
MpegEncContext s; |
242 |
|
243 |
int bits;
|
244 |
|
245 |
/** Simple/Main Profile sequence header */
|
246 |
//@{
|
247 |
int res_sm; ///< reserved, 2b |
248 |
int res_x8; ///< reserved |
249 |
int multires; ///< frame-level RESPIC syntax element present |
250 |
int res_fasttx; ///< reserved, always 1 |
251 |
int res_transtab; ///< reserved, always 0 |
252 |
int rangered; ///< RANGEREDFRM (range reduction) syntax element present |
253 |
///< at frame level
|
254 |
int res_rtm_flag; ///< reserved, set to 1 |
255 |
int reserved; ///< reserved |
256 |
//@}
|
257 |
|
258 |
/** Advanced Profile */
|
259 |
//@{
|
260 |
int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer |
261 |
int chromaformat; ///< 2bits, 2=4:2:0, only defined |
262 |
int postprocflag; ///< Per-frame processing suggestion flag present |
263 |
int broadcast; ///< TFF/RFF present |
264 |
int interlace; ///< Progressive/interlaced (RPTFTM syntax element) |
265 |
int tfcntrflag; ///< TFCNTR present |
266 |
int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present |
267 |
int extended_dmv; ///< Additional extended dmv range at P/B frame-level |
268 |
int color_prim; ///< 8bits, chroma coordinates of the color primaries |
269 |
int transfer_char; ///< 8bits, Opto-electronic transfer characteristics |
270 |
int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix |
271 |
int hrd_param_flag; ///< Presence of Hypothetical Reference |
272 |
///< Decoder parameters
|
273 |
int psf; ///< Progressive Segmented Frame |
274 |
//@}
|
275 |
|
276 |
/** Sequence header data for all Profiles
|
277 |
* TODO: choose between ints, uint8_ts and monobit flags
|
278 |
*/
|
279 |
//@{
|
280 |
int profile; ///< 2bits, Profile |
281 |
int frmrtq_postproc; ///< 3bits, |
282 |
int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength |
283 |
int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple) |
284 |
int extended_mv; ///< Ext MV in P/B (not in Simple) |
285 |
int dquant; ///< How qscale varies with MBs, 2bits (not in Simple) |
286 |
int vstransform; ///< variable-size [48]x[48] transform type + info |
287 |
int overlap; ///< overlapped transforms in use |
288 |
int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_* |
289 |
int finterpflag; ///< INTERPFRM present |
290 |
//@}
|
291 |
|
292 |
/** Frame decoding info for all profiles */
|
293 |
//@{
|
294 |
uint8_t mv_mode; ///< MV coding monde
|
295 |
uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
|
296 |
int k_x; ///< Number of bits for MVs (depends on MV range) |
297 |
int k_y; ///< Number of bits for MVs (depends on MV range) |
298 |
int range_x, range_y; ///< MV range |
299 |
uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
|
300 |
/** pquant parameters */
|
301 |
//@{
|
302 |
uint8_t dquantfrm; |
303 |
uint8_t dqprofile; |
304 |
uint8_t dqsbedge; |
305 |
uint8_t dqbilevel; |
306 |
//@}
|
307 |
/** AC coding set indexes
|
308 |
* @see 8.1.1.10, p(1)10
|
309 |
*/
|
310 |
//@{
|
311 |
int c_ac_table_index; ///< Chroma index from ACFRM element |
312 |
int y_ac_table_index; ///< Luma index from AC2FRM element |
313 |
//@}
|
314 |
int ttfrm; ///< Transform type info present at frame level |
315 |
uint8_t ttmbf; ///< Transform type flag
|
316 |
uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
|
317 |
int codingset; ///< index of current table set from 11.8 to use for luma block decoding |
318 |
int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding |
319 |
int pqindex; ///< raw pqindex used in coding set selection |
320 |
int a_avail, c_avail;
|
321 |
uint8_t *mb_type_base, *mb_type[3];
|
322 |
|
323 |
|
324 |
/** Luma compensation parameters */
|
325 |
//@{
|
326 |
uint8_t lumscale; |
327 |
uint8_t lumshift; |
328 |
//@}
|
329 |
int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
|
330 |
uint8_t halfpq; ///< Uniform quant over image and qp+.5
|
331 |
uint8_t respic; ///< Frame-level flag for resized images
|
332 |
int buffer_fullness; ///< HRD info |
333 |
/** Ranges:
|
334 |
* -# 0 -> [-64n 63.f] x [-32, 31.f]
|
335 |
* -# 1 -> [-128, 127.f] x [-64, 63.f]
|
336 |
* -# 2 -> [-512, 511.f] x [-128, 127.f]
|
337 |
* -# 3 -> [-1024, 1023.f] x [-256, 255.f]
|
338 |
*/
|
339 |
uint8_t mvrange; |
340 |
uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
|
341 |
VLC *cbpcy_vlc; ///< CBPCY VLC table
|
342 |
int tt_index; ///< Index for Transform Type tables |
343 |
uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
|
344 |
uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
|
345 |
int mv_type_is_raw; ///< mv type mb plane is not coded |
346 |
int dmb_is_raw; ///< direct mb plane is raw |
347 |
int skip_is_raw; ///< skip mb plane is not coded |
348 |
uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation |
349 |
int use_ic; ///< use intensity compensation in B-frames |
350 |
int rnd; ///< rounding control |
351 |
|
352 |
/** Frame decoding info for S/M profiles only */
|
353 |
//@{
|
354 |
uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
|
355 |
uint8_t interpfrm; |
356 |
//@}
|
357 |
|
358 |
/** Frame decoding info for Advanced profile */
|
359 |
//@{
|
360 |
uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
|
361 |
uint8_t numpanscanwin; |
362 |
uint8_t tfcntr; |
363 |
uint8_t rptfrm, tff, rff; |
364 |
uint16_t topleftx; |
365 |
uint16_t toplefty; |
366 |
uint16_t bottomrightx; |
367 |
uint16_t bottomrighty; |
368 |
uint8_t uvsamp; |
369 |
uint8_t postproc; |
370 |
int hrd_num_leaky_buckets;
|
371 |
uint8_t bit_rate_exponent; |
372 |
uint8_t buffer_size_exponent; |
373 |
uint8_t* acpred_plane; ///< AC prediction flags bitplane
|
374 |
int acpred_is_raw;
|
375 |
uint8_t* over_flags_plane; ///< Overflags bitplane
|
376 |
int overflg_is_raw;
|
377 |
uint8_t condover; |
378 |
uint16_t *hrd_rate, *hrd_buffer; |
379 |
uint8_t *hrd_fullness; |
380 |
uint8_t range_mapy_flag; |
381 |
uint8_t range_mapuv_flag; |
382 |
uint8_t range_mapy; |
383 |
uint8_t range_mapuv; |
384 |
//@}
|
385 |
|
386 |
int p_frame_skipped;
|
387 |
int bi_type;
|
388 |
} VC1Context; |
389 |
|
390 |
/**
|
391 |
* Get unary code of limited length
|
392 |
* @fixme FIXME Slow and ugly
|
393 |
* @param gb GetBitContext
|
394 |
* @param[in] stop The bitstop value (unary code of 1's or 0's)
|
395 |
* @param[in] len Maximum length
|
396 |
* @return Unary length/index
|
397 |
*/
|
398 |
static int get_prefix(GetBitContext *gb, int stop, int len) |
399 |
{ |
400 |
#if 1 |
401 |
int i;
|
402 |
|
403 |
for(i = 0; i < len && get_bits1(gb) != stop; i++); |
404 |
return i;
|
405 |
/* int i = 0, tmp = !stop;
|
406 |
|
407 |
while (i != len && tmp != stop)
|
408 |
{
|
409 |
tmp = get_bits(gb, 1);
|
410 |
i++;
|
411 |
}
|
412 |
if (i == len && tmp != stop) return len+1;
|
413 |
return i;*/
|
414 |
#else
|
415 |
unsigned int buf; |
416 |
int log;
|
417 |
|
418 |
OPEN_READER(re, gb); |
419 |
UPDATE_CACHE(re, gb); |
420 |
buf=GET_CACHE(re, gb); //Still not sure
|
421 |
if (stop) buf = ~buf;
|
422 |
|
423 |
log= av_log2(-buf); //FIXME: -?
|
424 |
if (log < limit){
|
425 |
LAST_SKIP_BITS(re, gb, log+1);
|
426 |
CLOSE_READER(re, gb); |
427 |
return log;
|
428 |
} |
429 |
|
430 |
LAST_SKIP_BITS(re, gb, limit); |
431 |
CLOSE_READER(re, gb); |
432 |
return limit;
|
433 |
#endif
|
434 |
} |
435 |
|
436 |
static inline int decode210(GetBitContext *gb){ |
437 |
int n;
|
438 |
n = get_bits1(gb); |
439 |
if (n == 1) |
440 |
return 0; |
441 |
else
|
442 |
return 2 - get_bits1(gb); |
443 |
} |
444 |
|
445 |
/**
|
446 |
* Init VC-1 specific tables and VC1Context members
|
447 |
* @param v The VC1Context to initialize
|
448 |
* @return Status
|
449 |
*/
|
450 |
static int vc1_init_common(VC1Context *v) |
451 |
{ |
452 |
static int done = 0; |
453 |
int i = 0; |
454 |
|
455 |
v->hrd_rate = v->hrd_buffer = NULL;
|
456 |
|
457 |
/* VLC tables */
|
458 |
if(!done)
|
459 |
{ |
460 |
done = 1;
|
461 |
init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
|
462 |
vc1_bfraction_bits, 1, 1, |
463 |
vc1_bfraction_codes, 1, 1, 1); |
464 |
init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
|
465 |
vc1_norm2_bits, 1, 1, |
466 |
vc1_norm2_codes, 1, 1, 1); |
467 |
init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
|
468 |
vc1_norm6_bits, 1, 1, |
469 |
vc1_norm6_codes, 2, 2, 1); |
470 |
init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
|
471 |
vc1_imode_bits, 1, 1, |
472 |
vc1_imode_codes, 1, 1, 1); |
473 |
for (i=0; i<3; i++) |
474 |
{ |
475 |
init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
|
476 |
vc1_ttmb_bits[i], 1, 1, |
477 |
vc1_ttmb_codes[i], 2, 2, 1); |
478 |
init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
|
479 |
vc1_ttblk_bits[i], 1, 1, |
480 |
vc1_ttblk_codes[i], 1, 1, 1); |
481 |
init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
|
482 |
vc1_subblkpat_bits[i], 1, 1, |
483 |
vc1_subblkpat_codes[i], 1, 1, 1); |
484 |
} |
485 |
for(i=0; i<4; i++) |
486 |
{ |
487 |
init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
|
488 |
vc1_4mv_block_pattern_bits[i], 1, 1, |
489 |
vc1_4mv_block_pattern_codes[i], 1, 1, 1); |
490 |
init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
|
491 |
vc1_cbpcy_p_bits[i], 1, 1, |
492 |
vc1_cbpcy_p_codes[i], 2, 2, 1); |
493 |
init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
|
494 |
vc1_mv_diff_bits[i], 1, 1, |
495 |
vc1_mv_diff_codes[i], 2, 2, 1); |
496 |
} |
497 |
for(i=0; i<8; i++) |
498 |
init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i], |
499 |
&vc1_ac_tables[i][0][1], 8, 4, |
500 |
&vc1_ac_tables[i][0][0], 8, 4, 1); |
501 |
init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
|
502 |
&ff_msmp4_mb_i_table[0][1], 4, 2, |
503 |
&ff_msmp4_mb_i_table[0][0], 4, 2, 1); |
504 |
} |
505 |
|
506 |
/* Other defaults */
|
507 |
v->pq = -1;
|
508 |
v->mvrange = 0; /* 7.1.1.18, p80 */ |
509 |
|
510 |
return 0; |
511 |
} |
512 |
|
513 |
/***********************************************************************/
|
514 |
/**
|
515 |
* @defgroup bitplane VC9 Bitplane decoding
|
516 |
* @see 8.7, p56
|
517 |
* @{
|
518 |
*/
|
519 |
|
520 |
/** @addtogroup bitplane
|
521 |
* Imode types
|
522 |
* @{
|
523 |
*/
|
524 |
enum Imode {
|
525 |
IMODE_RAW, |
526 |
IMODE_NORM2, |
527 |
IMODE_DIFF2, |
528 |
IMODE_NORM6, |
529 |
IMODE_DIFF6, |
530 |
IMODE_ROWSKIP, |
531 |
IMODE_COLSKIP |
532 |
}; |
533 |
/** @} */ //imode defines |
534 |
|
535 |
/** Decode rows by checking if they are skipped
|
536 |
* @param plane Buffer to store decoded bits
|
537 |
* @param[in] width Width of this buffer
|
538 |
* @param[in] height Height of this buffer
|
539 |
* @param[in] stride of this buffer
|
540 |
*/
|
541 |
static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){ |
542 |
int x, y;
|
543 |
|
544 |
for (y=0; y<height; y++){ |
545 |
if (!get_bits(gb, 1)) //rowskip |
546 |
memset(plane, 0, width);
|
547 |
else
|
548 |
for (x=0; x<width; x++) |
549 |
plane[x] = get_bits(gb, 1);
|
550 |
plane += stride; |
551 |
} |
552 |
} |
553 |
|
554 |
/** Decode columns by checking if they are skipped
|
555 |
* @param plane Buffer to store decoded bits
|
556 |
* @param[in] width Width of this buffer
|
557 |
* @param[in] height Height of this buffer
|
558 |
* @param[in] stride of this buffer
|
559 |
* @fixme FIXME: Optimize
|
560 |
*/
|
561 |
static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){ |
562 |
int x, y;
|
563 |
|
564 |
for (x=0; x<width; x++){ |
565 |
if (!get_bits(gb, 1)) //colskip |
566 |
for (y=0; y<height; y++) |
567 |
plane[y*stride] = 0;
|
568 |
else
|
569 |
for (y=0; y<height; y++) |
570 |
plane[y*stride] = get_bits(gb, 1);
|
571 |
plane ++; |
572 |
} |
573 |
} |
574 |
|
575 |
/** Decode a bitplane's bits
|
576 |
* @param bp Bitplane where to store the decode bits
|
577 |
* @param v VC-1 context for bit reading and logging
|
578 |
* @return Status
|
579 |
* @fixme FIXME: Optimize
|
580 |
*/
|
581 |
static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v) |
582 |
{ |
583 |
GetBitContext *gb = &v->s.gb; |
584 |
|
585 |
int imode, x, y, code, offset;
|
586 |
uint8_t invert, *planep = data; |
587 |
int width, height, stride;
|
588 |
|
589 |
width = v->s.mb_width; |
590 |
height = v->s.mb_height; |
591 |
stride = v->s.mb_stride; |
592 |
invert = get_bits(gb, 1);
|
593 |
imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
|
594 |
|
595 |
*raw_flag = 0;
|
596 |
switch (imode)
|
597 |
{ |
598 |
case IMODE_RAW:
|
599 |
//Data is actually read in the MB layer (same for all tests == "raw")
|
600 |
*raw_flag = 1; //invert ignored |
601 |
return invert;
|
602 |
case IMODE_DIFF2:
|
603 |
case IMODE_NORM2:
|
604 |
if ((height * width) & 1) |
605 |
{ |
606 |
*planep++ = get_bits(gb, 1);
|
607 |
offset = 1;
|
608 |
} |
609 |
else offset = 0; |
610 |
// decode bitplane as one long line
|
611 |
for (y = offset; y < height * width; y += 2) { |
612 |
code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
|
613 |
*planep++ = code & 1;
|
614 |
offset++; |
615 |
if(offset == width) {
|
616 |
offset = 0;
|
617 |
planep += stride - width; |
618 |
} |
619 |
*planep++ = code >> 1;
|
620 |
offset++; |
621 |
if(offset == width) {
|
622 |
offset = 0;
|
623 |
planep += stride - width; |
624 |
} |
625 |
} |
626 |
break;
|
627 |
case IMODE_DIFF6:
|
628 |
case IMODE_NORM6:
|
629 |
if(!(height % 3) && (width % 3)) { // use 2x3 decoding |
630 |
for(y = 0; y < height; y+= 3) { |
631 |
for(x = width & 1; x < width; x += 2) { |
632 |
code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
|
633 |
if(code < 0){ |
634 |
av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
|
635 |
return -1; |
636 |
} |
637 |
planep[x + 0] = (code >> 0) & 1; |
638 |
planep[x + 1] = (code >> 1) & 1; |
639 |
planep[x + 0 + stride] = (code >> 2) & 1; |
640 |
planep[x + 1 + stride] = (code >> 3) & 1; |
641 |
planep[x + 0 + stride * 2] = (code >> 4) & 1; |
642 |
planep[x + 1 + stride * 2] = (code >> 5) & 1; |
643 |
} |
644 |
planep += stride * 3;
|
645 |
} |
646 |
if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb); |
647 |
} else { // 3x2 |
648 |
planep += (height & 1) * stride;
|
649 |
for(y = height & 1; y < height; y += 2) { |
650 |
for(x = width % 3; x < width; x += 3) { |
651 |
code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
|
652 |
if(code < 0){ |
653 |
av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
|
654 |
return -1; |
655 |
} |
656 |
planep[x + 0] = (code >> 0) & 1; |
657 |
planep[x + 1] = (code >> 1) & 1; |
658 |
planep[x + 2] = (code >> 2) & 1; |
659 |
planep[x + 0 + stride] = (code >> 3) & 1; |
660 |
planep[x + 1 + stride] = (code >> 4) & 1; |
661 |
planep[x + 2 + stride] = (code >> 5) & 1; |
662 |
} |
663 |
planep += stride * 2;
|
664 |
} |
665 |
x = width % 3;
|
666 |
if(x) decode_colskip(data , x, height , stride, &v->s.gb);
|
667 |
if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb); |
668 |
} |
669 |
break;
|
670 |
case IMODE_ROWSKIP:
|
671 |
decode_rowskip(data, width, height, stride, &v->s.gb); |
672 |
break;
|
673 |
case IMODE_COLSKIP:
|
674 |
decode_colskip(data, width, height, stride, &v->s.gb); |
675 |
break;
|
676 |
default: break; |
677 |
} |
678 |
|
679 |
/* Applying diff operator */
|
680 |
if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
|
681 |
{ |
682 |
planep = data; |
683 |
planep[0] ^= invert;
|
684 |
for (x=1; x<width; x++) |
685 |
planep[x] ^= planep[x-1];
|
686 |
for (y=1; y<height; y++) |
687 |
{ |
688 |
planep += stride; |
689 |
planep[0] ^= planep[-stride];
|
690 |
for (x=1; x<width; x++) |
691 |
{ |
692 |
if (planep[x-1] != planep[x-stride]) planep[x] ^= invert; |
693 |
else planep[x] ^= planep[x-1]; |
694 |
} |
695 |
} |
696 |
} |
697 |
else if (invert) |
698 |
{ |
699 |
planep = data; |
700 |
for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride |
701 |
} |
702 |
return (imode<<1) + invert; |
703 |
} |
704 |
|
705 |
/** @} */ //Bitplane group |
706 |
|
707 |
/***********************************************************************/
|
708 |
/** VOP Dquant decoding
|
709 |
* @param v VC-1 Context
|
710 |
*/
|
711 |
static int vop_dquant_decoding(VC1Context *v) |
712 |
{ |
713 |
GetBitContext *gb = &v->s.gb; |
714 |
int pqdiff;
|
715 |
|
716 |
//variable size
|
717 |
if (v->dquant == 2) |
718 |
{ |
719 |
pqdiff = get_bits(gb, 3);
|
720 |
if (pqdiff == 7) v->altpq = get_bits(gb, 5); |
721 |
else v->altpq = v->pq + pqdiff + 1; |
722 |
} |
723 |
else
|
724 |
{ |
725 |
v->dquantfrm = get_bits(gb, 1);
|
726 |
if ( v->dquantfrm )
|
727 |
{ |
728 |
v->dqprofile = get_bits(gb, 2);
|
729 |
switch (v->dqprofile)
|
730 |
{ |
731 |
case DQPROFILE_SINGLE_EDGE:
|
732 |
case DQPROFILE_DOUBLE_EDGES:
|
733 |
v->dqsbedge = get_bits(gb, 2);
|
734 |
break;
|
735 |
case DQPROFILE_ALL_MBS:
|
736 |
v->dqbilevel = get_bits(gb, 1);
|
737 |
default: break; //Forbidden ? |
738 |
} |
739 |
if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
|
740 |
{ |
741 |
pqdiff = get_bits(gb, 3);
|
742 |
if (pqdiff == 7) v->altpq = get_bits(gb, 5); |
743 |
else v->altpq = v->pq + pqdiff + 1; |
744 |
} |
745 |
} |
746 |
} |
747 |
return 0; |
748 |
} |
749 |
|
750 |
/** Put block onto picture
|
751 |
*/
|
752 |
static void vc1_put_block(VC1Context *v, DCTELEM block[6][64]) |
753 |
{ |
754 |
uint8_t *Y; |
755 |
int ys, us, vs;
|
756 |
DSPContext *dsp = &v->s.dsp; |
757 |
|
758 |
if(v->rangeredfrm) {
|
759 |
int i, j, k;
|
760 |
for(k = 0; k < 6; k++) |
761 |
for(j = 0; j < 8; j++) |
762 |
for(i = 0; i < 8; i++) |
763 |
block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128; |
764 |
|
765 |
} |
766 |
ys = v->s.current_picture.linesize[0];
|
767 |
us = v->s.current_picture.linesize[1];
|
768 |
vs = v->s.current_picture.linesize[2];
|
769 |
Y = v->s.dest[0];
|
770 |
|
771 |
dsp->put_pixels_clamped(block[0], Y, ys);
|
772 |
dsp->put_pixels_clamped(block[1], Y + 8, ys); |
773 |
Y += ys * 8;
|
774 |
dsp->put_pixels_clamped(block[2], Y, ys);
|
775 |
dsp->put_pixels_clamped(block[3], Y + 8, ys); |
776 |
|
777 |
if(!(v->s.flags & CODEC_FLAG_GRAY)) {
|
778 |
dsp->put_pixels_clamped(block[4], v->s.dest[1], us); |
779 |
dsp->put_pixels_clamped(block[5], v->s.dest[2], vs); |
780 |
} |
781 |
} |
782 |
|
783 |
/** Do motion compensation over 1 macroblock
|
784 |
* Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
|
785 |
*/
|
786 |
static void vc1_mc_1mv(VC1Context *v, int dir) |
787 |
{ |
788 |
MpegEncContext *s = &v->s; |
789 |
DSPContext *dsp = &v->s.dsp; |
790 |
uint8_t *srcY, *srcU, *srcV; |
791 |
int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
|
792 |
|
793 |
if(!v->s.last_picture.data[0])return; |
794 |
|
795 |
mx = s->mv[dir][0][0]; |
796 |
my = s->mv[dir][0][1]; |
797 |
|
798 |
// store motion vectors for further use in B frames
|
799 |
if(s->pict_type == P_TYPE) {
|
800 |
s->current_picture.motion_val[1][s->block_index[0]][0] = mx; |
801 |
s->current_picture.motion_val[1][s->block_index[0]][1] = my; |
802 |
} |
803 |
uvmx = (mx + ((mx & 3) == 3)) >> 1; |
804 |
uvmy = (my + ((my & 3) == 3)) >> 1; |
805 |
if(v->fastuvmc) {
|
806 |
uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1)); |
807 |
uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1)); |
808 |
} |
809 |
if(!dir) {
|
810 |
srcY = s->last_picture.data[0];
|
811 |
srcU = s->last_picture.data[1];
|
812 |
srcV = s->last_picture.data[2];
|
813 |
} else {
|
814 |
srcY = s->next_picture.data[0];
|
815 |
srcU = s->next_picture.data[1];
|
816 |
srcV = s->next_picture.data[2];
|
817 |
} |
818 |
|
819 |
src_x = s->mb_x * 16 + (mx >> 2); |
820 |
src_y = s->mb_y * 16 + (my >> 2); |
821 |
uvsrc_x = s->mb_x * 8 + (uvmx >> 2); |
822 |
uvsrc_y = s->mb_y * 8 + (uvmy >> 2); |
823 |
|
824 |
src_x = av_clip( src_x, -16, s->mb_width * 16); |
825 |
src_y = av_clip( src_y, -16, s->mb_height * 16); |
826 |
uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); |
827 |
uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); |
828 |
|
829 |
srcY += src_y * s->linesize + src_x; |
830 |
srcU += uvsrc_y * s->uvlinesize + uvsrc_x; |
831 |
srcV += uvsrc_y * s->uvlinesize + uvsrc_x; |
832 |
|
833 |
/* for grayscale we should not try to read from unknown area */
|
834 |
if(s->flags & CODEC_FLAG_GRAY) {
|
835 |
srcU = s->edge_emu_buffer + 18 * s->linesize;
|
836 |
srcV = s->edge_emu_buffer + 18 * s->linesize;
|
837 |
} |
838 |
|
839 |
if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
|
840 |
|| (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3 |
841 |
|| (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){ |
842 |
uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
|
843 |
|
844 |
srcY -= s->mspel * (1 + s->linesize);
|
845 |
ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2, |
846 |
src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos); |
847 |
srcY = s->edge_emu_buffer; |
848 |
ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1, |
849 |
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
850 |
ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1, |
851 |
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
852 |
srcU = uvbuf; |
853 |
srcV = uvbuf + 16;
|
854 |
/* if we deal with range reduction we need to scale source blocks */
|
855 |
if(v->rangeredfrm) {
|
856 |
int i, j;
|
857 |
uint8_t *src, *src2; |
858 |
|
859 |
src = srcY; |
860 |
for(j = 0; j < 17 + s->mspel*2; j++) { |
861 |
for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128; |
862 |
src += s->linesize; |
863 |
} |
864 |
src = srcU; src2 = srcV; |
865 |
for(j = 0; j < 9; j++) { |
866 |
for(i = 0; i < 9; i++) { |
867 |
src[i] = ((src[i] - 128) >> 1) + 128; |
868 |
src2[i] = ((src2[i] - 128) >> 1) + 128; |
869 |
} |
870 |
src += s->uvlinesize; |
871 |
src2 += s->uvlinesize; |
872 |
} |
873 |
} |
874 |
/* if we deal with intensity compensation we need to scale source blocks */
|
875 |
if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
|
876 |
int i, j;
|
877 |
uint8_t *src, *src2; |
878 |
|
879 |
src = srcY; |
880 |
for(j = 0; j < 17 + s->mspel*2; j++) { |
881 |
for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]]; |
882 |
src += s->linesize; |
883 |
} |
884 |
src = srcU; src2 = srcV; |
885 |
for(j = 0; j < 9; j++) { |
886 |
for(i = 0; i < 9; i++) { |
887 |
src[i] = v->lutuv[src[i]]; |
888 |
src2[i] = v->lutuv[src2[i]]; |
889 |
} |
890 |
src += s->uvlinesize; |
891 |
src2 += s->uvlinesize; |
892 |
} |
893 |
} |
894 |
srcY += s->mspel * (1 + s->linesize);
|
895 |
} |
896 |
|
897 |
if(s->mspel) {
|
898 |
dxy = ((my & 3) << 2) | (mx & 3); |
899 |
dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
|
900 |
dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd); |
901 |
srcY += s->linesize * 8;
|
902 |
dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd); |
903 |
dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd); |
904 |
} else { // hpel mc - always used for luma |
905 |
dxy = (my & 2) | ((mx & 2) >> 1); |
906 |
|
907 |
if(!v->rnd)
|
908 |
dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16); |
909 |
else
|
910 |
dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16); |
911 |
} |
912 |
|
913 |
if(s->flags & CODEC_FLAG_GRAY) return; |
914 |
/* Chroma MC always uses qpel bilinear */
|
915 |
uvdxy = ((uvmy & 3) << 2) | (uvmx & 3); |
916 |
uvmx = (uvmx&3)<<1; |
917 |
uvmy = (uvmy&3)<<1; |
918 |
if(!v->rnd){
|
919 |
dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); |
920 |
dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); |
921 |
}else{
|
922 |
dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); |
923 |
dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); |
924 |
} |
925 |
} |
926 |
|
927 |
/** Do motion compensation for 4-MV macroblock - luminance block
|
928 |
*/
|
929 |
static void vc1_mc_4mv_luma(VC1Context *v, int n) |
930 |
{ |
931 |
MpegEncContext *s = &v->s; |
932 |
DSPContext *dsp = &v->s.dsp; |
933 |
uint8_t *srcY; |
934 |
int dxy, mx, my, src_x, src_y;
|
935 |
int off;
|
936 |
|
937 |
if(!v->s.last_picture.data[0])return; |
938 |
mx = s->mv[0][n][0]; |
939 |
my = s->mv[0][n][1]; |
940 |
srcY = s->last_picture.data[0];
|
941 |
|
942 |
off = s->linesize * 4 * (n&2) + (n&1) * 8; |
943 |
|
944 |
src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2); |
945 |
src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2); |
946 |
|
947 |
src_x = av_clip( src_x, -16, s->mb_width * 16); |
948 |
src_y = av_clip( src_y, -16, s->mb_height * 16); |
949 |
|
950 |
srcY += src_y * s->linesize + src_x; |
951 |
|
952 |
if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
|
953 |
|| (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2 |
954 |
|| (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){ |
955 |
srcY -= s->mspel * (1 + s->linesize);
|
956 |
ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2, |
957 |
src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos); |
958 |
srcY = s->edge_emu_buffer; |
959 |
/* if we deal with range reduction we need to scale source blocks */
|
960 |
if(v->rangeredfrm) {
|
961 |
int i, j;
|
962 |
uint8_t *src; |
963 |
|
964 |
src = srcY; |
965 |
for(j = 0; j < 9 + s->mspel*2; j++) { |
966 |
for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128; |
967 |
src += s->linesize; |
968 |
} |
969 |
} |
970 |
/* if we deal with intensity compensation we need to scale source blocks */
|
971 |
if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
|
972 |
int i, j;
|
973 |
uint8_t *src; |
974 |
|
975 |
src = srcY; |
976 |
for(j = 0; j < 9 + s->mspel*2; j++) { |
977 |
for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]]; |
978 |
src += s->linesize; |
979 |
} |
980 |
} |
981 |
srcY += s->mspel * (1 + s->linesize);
|
982 |
} |
983 |
|
984 |
if(s->mspel) {
|
985 |
dxy = ((my & 3) << 2) | (mx & 3); |
986 |
dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
|
987 |
} else { // hpel mc - always used for luma |
988 |
dxy = (my & 2) | ((mx & 2) >> 1); |
989 |
if(!v->rnd)
|
990 |
dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); |
991 |
else
|
992 |
dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); |
993 |
} |
994 |
} |
995 |
|
996 |
static inline int median4(int a, int b, int c, int d) |
997 |
{ |
998 |
if(a < b) {
|
999 |
if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2; |
1000 |
else return (FFMIN(b, c) + FFMAX(a, d)) / 2; |
1001 |
} else {
|
1002 |
if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2; |
1003 |
else return (FFMIN(a, c) + FFMAX(b, d)) / 2; |
1004 |
} |
1005 |
} |
1006 |
|
1007 |
|
1008 |
/** Do motion compensation for 4-MV macroblock - both chroma blocks
|
1009 |
*/
|
1010 |
static void vc1_mc_4mv_chroma(VC1Context *v) |
1011 |
{ |
1012 |
MpegEncContext *s = &v->s; |
1013 |
DSPContext *dsp = &v->s.dsp; |
1014 |
uint8_t *srcU, *srcV; |
1015 |
int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
|
1016 |
int i, idx, tx = 0, ty = 0; |
1017 |
int mvx[4], mvy[4], intra[4]; |
1018 |
static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4}; |
1019 |
|
1020 |
if(!v->s.last_picture.data[0])return; |
1021 |
if(s->flags & CODEC_FLAG_GRAY) return; |
1022 |
|
1023 |
for(i = 0; i < 4; i++) { |
1024 |
mvx[i] = s->mv[0][i][0]; |
1025 |
mvy[i] = s->mv[0][i][1]; |
1026 |
intra[i] = v->mb_type[0][s->block_index[i]];
|
1027 |
} |
1028 |
|
1029 |
/* calculate chroma MV vector from four luma MVs */
|
1030 |
idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0]; |
1031 |
if(!idx) { // all blocks are inter |
1032 |
tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]); |
1033 |
ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]); |
1034 |
} else if(count[idx] == 1) { // 3 inter blocks |
1035 |
switch(idx) {
|
1036 |
case 0x1: |
1037 |
tx = mid_pred(mvx[1], mvx[2], mvx[3]); |
1038 |
ty = mid_pred(mvy[1], mvy[2], mvy[3]); |
1039 |
break;
|
1040 |
case 0x2: |
1041 |
tx = mid_pred(mvx[0], mvx[2], mvx[3]); |
1042 |
ty = mid_pred(mvy[0], mvy[2], mvy[3]); |
1043 |
break;
|
1044 |
case 0x4: |
1045 |
tx = mid_pred(mvx[0], mvx[1], mvx[3]); |
1046 |
ty = mid_pred(mvy[0], mvy[1], mvy[3]); |
1047 |
break;
|
1048 |
case 0x8: |
1049 |
tx = mid_pred(mvx[0], mvx[1], mvx[2]); |
1050 |
ty = mid_pred(mvy[0], mvy[1], mvy[2]); |
1051 |
break;
|
1052 |
} |
1053 |
} else if(count[idx] == 2) { |
1054 |
int t1 = 0, t2 = 0; |
1055 |
for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;} |
1056 |
for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;} |
1057 |
tx = (mvx[t1] + mvx[t2]) / 2;
|
1058 |
ty = (mvy[t1] + mvy[t2]) / 2;
|
1059 |
} else
|
1060 |
return; //no need to do MC for inter blocks |
1061 |
|
1062 |
s->current_picture.motion_val[1][s->block_index[0]][0] = tx; |
1063 |
s->current_picture.motion_val[1][s->block_index[0]][1] = ty; |
1064 |
uvmx = (tx + ((tx&3) == 3)) >> 1; |
1065 |
uvmy = (ty + ((ty&3) == 3)) >> 1; |
1066 |
if(v->fastuvmc) {
|
1067 |
uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1)); |
1068 |
uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1)); |
1069 |
} |
1070 |
|
1071 |
uvsrc_x = s->mb_x * 8 + (uvmx >> 2); |
1072 |
uvsrc_y = s->mb_y * 8 + (uvmy >> 2); |
1073 |
|
1074 |
uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); |
1075 |
uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); |
1076 |
srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
|
1077 |
srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
|
1078 |
if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
|
1079 |
|| (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9 |
1080 |
|| (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){ |
1081 |
ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1, |
1082 |
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
1083 |
ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1, |
1084 |
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
1085 |
srcU = s->edge_emu_buffer; |
1086 |
srcV = s->edge_emu_buffer + 16;
|
1087 |
|
1088 |
/* if we deal with range reduction we need to scale source blocks */
|
1089 |
if(v->rangeredfrm) {
|
1090 |
int i, j;
|
1091 |
uint8_t *src, *src2; |
1092 |
|
1093 |
src = srcU; src2 = srcV; |
1094 |
for(j = 0; j < 9; j++) { |
1095 |
for(i = 0; i < 9; i++) { |
1096 |
src[i] = ((src[i] - 128) >> 1) + 128; |
1097 |
src2[i] = ((src2[i] - 128) >> 1) + 128; |
1098 |
} |
1099 |
src += s->uvlinesize; |
1100 |
src2 += s->uvlinesize; |
1101 |
} |
1102 |
} |
1103 |
/* if we deal with intensity compensation we need to scale source blocks */
|
1104 |
if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
|
1105 |
int i, j;
|
1106 |
uint8_t *src, *src2; |
1107 |
|
1108 |
src = srcU; src2 = srcV; |
1109 |
for(j = 0; j < 9; j++) { |
1110 |
for(i = 0; i < 9; i++) { |
1111 |
src[i] = v->lutuv[src[i]]; |
1112 |
src2[i] = v->lutuv[src2[i]]; |
1113 |
} |
1114 |
src += s->uvlinesize; |
1115 |
src2 += s->uvlinesize; |
1116 |
} |
1117 |
} |
1118 |
} |
1119 |
|
1120 |
/* Chroma MC always uses qpel bilinear */
|
1121 |
uvdxy = ((uvmy & 3) << 2) | (uvmx & 3); |
1122 |
uvmx = (uvmx&3)<<1; |
1123 |
uvmy = (uvmy&3)<<1; |
1124 |
if(!v->rnd){
|
1125 |
dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); |
1126 |
dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); |
1127 |
}else{
|
1128 |
dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); |
1129 |
dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); |
1130 |
} |
1131 |
} |
1132 |
|
1133 |
static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb); |
1134 |
|
1135 |
/**
|
1136 |
* Decode Simple/Main Profiles sequence header
|
1137 |
* @see Figure 7-8, p16-17
|
1138 |
* @param avctx Codec context
|
1139 |
* @param gb GetBit context initialized from Codec context extra_data
|
1140 |
* @return Status
|
1141 |
*/
|
1142 |
static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb) |
1143 |
{ |
1144 |
VC1Context *v = avctx->priv_data; |
1145 |
|
1146 |
av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32)); |
1147 |
v->profile = get_bits(gb, 2);
|
1148 |
if (v->profile == 2) |
1149 |
{ |
1150 |
av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
|
1151 |
return -1; |
1152 |
} |
1153 |
|
1154 |
if (v->profile == PROFILE_ADVANCED)
|
1155 |
{ |
1156 |
return decode_sequence_header_adv(v, gb);
|
1157 |
} |
1158 |
else
|
1159 |
{ |
1160 |
v->res_sm = get_bits(gb, 2); //reserved |
1161 |
if (v->res_sm)
|
1162 |
{ |
1163 |
av_log(avctx, AV_LOG_ERROR, |
1164 |
"Reserved RES_SM=%i is forbidden\n", v->res_sm);
|
1165 |
return -1; |
1166 |
} |
1167 |
} |
1168 |
|
1169 |
// (fps-2)/4 (->30)
|
1170 |
v->frmrtq_postproc = get_bits(gb, 3); //common |
1171 |
// (bitrate-32kbps)/64kbps
|
1172 |
v->bitrtq_postproc = get_bits(gb, 5); //common |
1173 |
v->s.loop_filter = get_bits(gb, 1); //common |
1174 |
if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE) |
1175 |
{ |
1176 |
av_log(avctx, AV_LOG_ERROR, |
1177 |
"LOOPFILTER shell not be enabled in simple profile\n");
|
1178 |
} |
1179 |
|
1180 |
v->res_x8 = get_bits(gb, 1); //reserved |
1181 |
if (v->res_x8)
|
1182 |
{ |
1183 |
av_log(avctx, AV_LOG_ERROR, |
1184 |
"1 for reserved RES_X8 is forbidden\n");
|
1185 |
//return -1;
|
1186 |
} |
1187 |
v->multires = get_bits(gb, 1);
|
1188 |
v->res_fasttx = get_bits(gb, 1);
|
1189 |
if (!v->res_fasttx)
|
1190 |
{ |
1191 |
av_log(avctx, AV_LOG_ERROR, |
1192 |
"0 for reserved RES_FASTTX is forbidden\n");
|
1193 |
//return -1;
|
1194 |
} |
1195 |
|
1196 |
v->fastuvmc = get_bits(gb, 1); //common |
1197 |
if (!v->profile && !v->fastuvmc)
|
1198 |
{ |
1199 |
av_log(avctx, AV_LOG_ERROR, |
1200 |
"FASTUVMC unavailable in Simple Profile\n");
|
1201 |
return -1; |
1202 |
} |
1203 |
v->extended_mv = get_bits(gb, 1); //common |
1204 |
if (!v->profile && v->extended_mv)
|
1205 |
{ |
1206 |
av_log(avctx, AV_LOG_ERROR, |
1207 |
"Extended MVs unavailable in Simple Profile\n");
|
1208 |
return -1; |
1209 |
} |
1210 |
v->dquant = get_bits(gb, 2); //common |
1211 |
v->vstransform = get_bits(gb, 1); //common |
1212 |
|
1213 |
v->res_transtab = get_bits(gb, 1);
|
1214 |
if (v->res_transtab)
|
1215 |
{ |
1216 |
av_log(avctx, AV_LOG_ERROR, |
1217 |
"1 for reserved RES_TRANSTAB is forbidden\n");
|
1218 |
return -1; |
1219 |
} |
1220 |
|
1221 |
v->overlap = get_bits(gb, 1); //common |
1222 |
|
1223 |
v->s.resync_marker = get_bits(gb, 1);
|
1224 |
v->rangered = get_bits(gb, 1);
|
1225 |
if (v->rangered && v->profile == PROFILE_SIMPLE)
|
1226 |
{ |
1227 |
av_log(avctx, AV_LOG_INFO, |
1228 |
"RANGERED should be set to 0 in simple profile\n");
|
1229 |
} |
1230 |
|
1231 |
v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common |
1232 |
v->quantizer_mode = get_bits(gb, 2); //common |
1233 |
|
1234 |
v->finterpflag = get_bits(gb, 1); //common |
1235 |
v->res_rtm_flag = get_bits(gb, 1); //reserved |
1236 |
if (!v->res_rtm_flag)
|
1237 |
{ |
1238 |
// av_log(avctx, AV_LOG_ERROR,
|
1239 |
// "0 for reserved RES_RTM_FLAG is forbidden\n");
|
1240 |
av_log(avctx, AV_LOG_ERROR, |
1241 |
"Old WMV3 version detected, only I-frames will be decoded\n");
|
1242 |
//return -1;
|
1243 |
} |
1244 |
av_log(avctx, AV_LOG_DEBUG, |
1245 |
"Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
|
1246 |
"LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
|
1247 |
"Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
|
1248 |
"DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
|
1249 |
v->profile, v->frmrtq_postproc, v->bitrtq_postproc, |
1250 |
v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv, |
1251 |
v->rangered, v->vstransform, v->overlap, v->s.resync_marker, |
1252 |
v->dquant, v->quantizer_mode, avctx->max_b_frames |
1253 |
); |
1254 |
return 0; |
1255 |
} |
1256 |
|
1257 |
static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb) |
1258 |
{ |
1259 |
v->res_rtm_flag = 1;
|
1260 |
v->level = get_bits(gb, 3);
|
1261 |
if(v->level >= 5) |
1262 |
{ |
1263 |
av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
|
1264 |
} |
1265 |
v->chromaformat = get_bits(gb, 2);
|
1266 |
if (v->chromaformat != 1) |
1267 |
{ |
1268 |
av_log(v->s.avctx, AV_LOG_ERROR, |
1269 |
"Only 4:2:0 chroma format supported\n");
|
1270 |
return -1; |
1271 |
} |
1272 |
|
1273 |
// (fps-2)/4 (->30)
|
1274 |
v->frmrtq_postproc = get_bits(gb, 3); //common |
1275 |
// (bitrate-32kbps)/64kbps
|
1276 |
v->bitrtq_postproc = get_bits(gb, 5); //common |
1277 |
v->postprocflag = get_bits(gb, 1); //common |
1278 |
|
1279 |
v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1; |
1280 |
v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1; |
1281 |
v->s.avctx->width = v->s.avctx->coded_width; |
1282 |
v->s.avctx->height = v->s.avctx->coded_height; |
1283 |
v->broadcast = get_bits1(gb); |
1284 |
v->interlace = get_bits1(gb); |
1285 |
v->tfcntrflag = get_bits1(gb); |
1286 |
v->finterpflag = get_bits1(gb); |
1287 |
get_bits1(gb); // reserved
|
1288 |
|
1289 |
av_log(v->s.avctx, AV_LOG_DEBUG, |
1290 |
"Advanced Profile level %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
|
1291 |
"LoopFilter=%i, ChromaFormat=%i, Pulldown=%i, Interlace: %i\n"
|
1292 |
"TFCTRflag=%i, FINTERPflag=%i\n",
|
1293 |
v->level, v->frmrtq_postproc, v->bitrtq_postproc, |
1294 |
v->s.loop_filter, v->chromaformat, v->broadcast, v->interlace, |
1295 |
v->tfcntrflag, v->finterpflag |
1296 |
); |
1297 |
|
1298 |
v->psf = get_bits1(gb); |
1299 |
if(v->psf) { //PsF, 6.1.13 |
1300 |
av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
|
1301 |
return -1; |
1302 |
} |
1303 |
v->s.max_b_frames = v->s.avctx->max_b_frames = 7;
|
1304 |
if(get_bits1(gb)) { //Display Info - decoding is not affected by it |
1305 |
int w, h, ar = 0; |
1306 |
av_log(v->s.avctx, AV_LOG_DEBUG, "Display extended info:\n");
|
1307 |
v->s.avctx->width = v->s.width = w = get_bits(gb, 14) + 1; |
1308 |
v->s.avctx->height = v->s.height = h = get_bits(gb, 14) + 1; |
1309 |
av_log(v->s.avctx, AV_LOG_DEBUG, "Display dimensions: %ix%i\n", w, h);
|
1310 |
if(get_bits1(gb))
|
1311 |
ar = get_bits(gb, 4);
|
1312 |
if(ar && ar < 14){ |
1313 |
v->s.avctx->sample_aspect_ratio = vc1_pixel_aspect[ar]; |
1314 |
}else if(ar == 15){ |
1315 |
w = get_bits(gb, 8);
|
1316 |
h = get_bits(gb, 8);
|
1317 |
v->s.avctx->sample_aspect_ratio = (AVRational){w, h}; |
1318 |
} |
1319 |
|
1320 |
if(get_bits1(gb)){ //framerate stuff |
1321 |
if(get_bits1(gb)) {
|
1322 |
v->s.avctx->time_base.num = 32;
|
1323 |
v->s.avctx->time_base.den = get_bits(gb, 16) + 1; |
1324 |
} else {
|
1325 |
int nr, dr;
|
1326 |
nr = get_bits(gb, 8);
|
1327 |
dr = get_bits(gb, 4);
|
1328 |
if(nr && nr < 8 && dr && dr < 3){ |
1329 |
v->s.avctx->time_base.num = fps_dr[dr - 1];
|
1330 |
v->s.avctx->time_base.den = fps_nr[nr - 1] * 1000; |
1331 |
} |
1332 |
} |
1333 |
} |
1334 |
|
1335 |
if(get_bits1(gb)){
|
1336 |
v->color_prim = get_bits(gb, 8);
|
1337 |
v->transfer_char = get_bits(gb, 8);
|
1338 |
v->matrix_coef = get_bits(gb, 8);
|
1339 |
} |
1340 |
} |
1341 |
|
1342 |
v->hrd_param_flag = get_bits1(gb); |
1343 |
if(v->hrd_param_flag) {
|
1344 |
int i;
|
1345 |
v->hrd_num_leaky_buckets = get_bits(gb, 5);
|
1346 |
get_bits(gb, 4); //bitrate exponent |
1347 |
get_bits(gb, 4); //buffer size exponent |
1348 |
for(i = 0; i < v->hrd_num_leaky_buckets; i++) { |
1349 |
get_bits(gb, 16); //hrd_rate[n] |
1350 |
get_bits(gb, 16); //hrd_buffer[n] |
1351 |
} |
1352 |
} |
1353 |
return 0; |
1354 |
} |
1355 |
|
1356 |
static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb) |
1357 |
{ |
1358 |
VC1Context *v = avctx->priv_data; |
1359 |
int i, blink, clentry, refdist;
|
1360 |
|
1361 |
av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32)); |
1362 |
blink = get_bits1(gb); // broken link
|
1363 |
clentry = get_bits1(gb); // closed entry
|
1364 |
v->panscanflag = get_bits1(gb); |
1365 |
refdist = get_bits1(gb); // refdist flag
|
1366 |
v->s.loop_filter = get_bits1(gb); |
1367 |
v->fastuvmc = get_bits1(gb); |
1368 |
v->extended_mv = get_bits1(gb); |
1369 |
v->dquant = get_bits(gb, 2);
|
1370 |
v->vstransform = get_bits1(gb); |
1371 |
v->overlap = get_bits1(gb); |
1372 |
v->quantizer_mode = get_bits(gb, 2);
|
1373 |
|
1374 |
if(v->hrd_param_flag){
|
1375 |
for(i = 0; i < v->hrd_num_leaky_buckets; i++) { |
1376 |
get_bits(gb, 8); //hrd_full[n] |
1377 |
} |
1378 |
} |
1379 |
|
1380 |
if(get_bits1(gb)){
|
1381 |
avctx->coded_width = (get_bits(gb, 12)+1)<<1; |
1382 |
avctx->coded_height = (get_bits(gb, 12)+1)<<1; |
1383 |
} |
1384 |
if(v->extended_mv)
|
1385 |
v->extended_dmv = get_bits1(gb); |
1386 |
if(get_bits1(gb)) {
|
1387 |
av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
|
1388 |
skip_bits(gb, 3); // Y range, ignored for now |
1389 |
} |
1390 |
if(get_bits1(gb)) {
|
1391 |
av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
|
1392 |
skip_bits(gb, 3); // UV range, ignored for now |
1393 |
} |
1394 |
|
1395 |
av_log(avctx, AV_LOG_DEBUG, "Entry point info:\n"
|
1396 |
"BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\n"
|
1397 |
"RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\n"
|
1398 |
"DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\n",
|
1399 |
blink, clentry, v->panscanflag, refdist, v->s.loop_filter, |
1400 |
v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode); |
1401 |
|
1402 |
return 0; |
1403 |
} |
1404 |
|
1405 |
static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb) |
1406 |
{ |
1407 |
int pqindex, lowquant, status;
|
1408 |
|
1409 |
if(v->finterpflag) v->interpfrm = get_bits(gb, 1); |
1410 |
skip_bits(gb, 2); //framecnt unused |
1411 |
v->rangeredfrm = 0;
|
1412 |
if (v->rangered) v->rangeredfrm = get_bits(gb, 1); |
1413 |
v->s.pict_type = get_bits(gb, 1);
|
1414 |
if (v->s.avctx->max_b_frames) {
|
1415 |
if (!v->s.pict_type) {
|
1416 |
if (get_bits(gb, 1)) v->s.pict_type = I_TYPE; |
1417 |
else v->s.pict_type = B_TYPE;
|
1418 |
} else v->s.pict_type = P_TYPE;
|
1419 |
} else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
|
1420 |
|
1421 |
v->bi_type = 0;
|
1422 |
if(v->s.pict_type == B_TYPE) {
|
1423 |
v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
|
1424 |
v->bfraction = vc1_bfraction_lut[v->bfraction]; |
1425 |
if(v->bfraction == 0) { |
1426 |
v->s.pict_type = BI_TYPE; |
1427 |
} |
1428 |
} |
1429 |
if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
|
1430 |
get_bits(gb, 7); // skip buffer fullness |
1431 |
|
1432 |
/* calculate RND */
|
1433 |
if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
|
1434 |
v->rnd = 1;
|
1435 |
if(v->s.pict_type == P_TYPE)
|
1436 |
v->rnd ^= 1;
|
1437 |
|
1438 |
/* Quantizer stuff */
|
1439 |
pqindex = get_bits(gb, 5);
|
1440 |
if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
|
1441 |
v->pq = pquant_table[0][pqindex];
|
1442 |
else
|
1443 |
v->pq = pquant_table[1][pqindex];
|
1444 |
|
1445 |
v->pquantizer = 1;
|
1446 |
if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
|
1447 |
v->pquantizer = pqindex < 9;
|
1448 |
if (v->quantizer_mode == QUANT_NON_UNIFORM)
|
1449 |
v->pquantizer = 0;
|
1450 |
v->pqindex = pqindex; |
1451 |
if (pqindex < 9) v->halfpq = get_bits(gb, 1); |
1452 |
else v->halfpq = 0; |
1453 |
if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
|
1454 |
v->pquantizer = get_bits(gb, 1);
|
1455 |
v->dquantfrm = 0;
|
1456 |
if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3); |
1457 |
v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13 |
1458 |
v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11 |
1459 |
v->range_x = 1 << (v->k_x - 1); |
1460 |
v->range_y = 1 << (v->k_y - 1); |
1461 |
if (v->profile == PROFILE_ADVANCED)
|
1462 |
{ |
1463 |
if (v->postprocflag) v->postproc = get_bits(gb, 1); |
1464 |
} |
1465 |
else
|
1466 |
if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2); |
1467 |
|
1468 |
//av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
|
1469 |
// (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
|
1470 |
|
1471 |
if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0; |
1472 |
|
1473 |
switch(v->s.pict_type) {
|
1474 |
case P_TYPE:
|
1475 |
if (v->pq < 5) v->tt_index = 0; |
1476 |
else if(v->pq < 13) v->tt_index = 1; |
1477 |
else v->tt_index = 2; |
1478 |
|
1479 |
lowquant = (v->pq > 12) ? 0 : 1; |
1480 |
v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)]; |
1481 |
if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
|
1482 |
{ |
1483 |
int scale, shift, i;
|
1484 |
v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)]; |
1485 |
v->lumscale = get_bits(gb, 6);
|
1486 |
v->lumshift = get_bits(gb, 6);
|
1487 |
v->use_ic = 1;
|
1488 |
/* fill lookup tables for intensity compensation */
|
1489 |
if(!v->lumscale) {
|
1490 |
scale = -64;
|
1491 |
shift = (255 - v->lumshift * 2) << 6; |
1492 |
if(v->lumshift > 31) |
1493 |
shift += 128 << 6; |
1494 |
} else {
|
1495 |
scale = v->lumscale + 32;
|
1496 |
if(v->lumshift > 31) |
1497 |
shift = (v->lumshift - 64) << 6; |
1498 |
else
|
1499 |
shift = v->lumshift << 6;
|
1500 |
} |
1501 |
for(i = 0; i < 256; i++) { |
1502 |
v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6); |
1503 |
v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6); |
1504 |
} |
1505 |
} |
1506 |
if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
|
1507 |
v->s.quarter_sample = 0;
|
1508 |
else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) { |
1509 |
if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
|
1510 |
v->s.quarter_sample = 0;
|
1511 |
else
|
1512 |
v->s.quarter_sample = 1;
|
1513 |
} else
|
1514 |
v->s.quarter_sample = 1;
|
1515 |
v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)); |
1516 |
|
1517 |
if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
|
1518 |
v->mv_mode2 == MV_PMODE_MIXED_MV) |
1519 |
|| v->mv_mode == MV_PMODE_MIXED_MV) |
1520 |
{ |
1521 |
status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v); |
1522 |
if (status < 0) return -1; |
1523 |
av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
|
1524 |
"Imode: %i, Invert: %i\n", status>>1, status&1); |
1525 |
} else {
|
1526 |
v->mv_type_is_raw = 0;
|
1527 |
memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
|
1528 |
} |
1529 |
status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v); |
1530 |
if (status < 0) return -1; |
1531 |
av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
|
1532 |
"Imode: %i, Invert: %i\n", status>>1, status&1); |
1533 |
|
1534 |
/* Hopefully this is correct for P frames */
|
1535 |
v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables |
1536 |
v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
|
1537 |
|
1538 |
if (v->dquant)
|
1539 |
{ |
1540 |
av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
|
1541 |
vop_dquant_decoding(v); |
1542 |
} |
1543 |
|
1544 |
v->ttfrm = 0; //FIXME Is that so ? |
1545 |
if (v->vstransform)
|
1546 |
{ |
1547 |
v->ttmbf = get_bits(gb, 1);
|
1548 |
if (v->ttmbf)
|
1549 |
{ |
1550 |
v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
|
1551 |
} |
1552 |
} else {
|
1553 |
v->ttmbf = 1;
|
1554 |
v->ttfrm = TT_8X8; |
1555 |
} |
1556 |
break;
|
1557 |
case B_TYPE:
|
1558 |
if (v->pq < 5) v->tt_index = 0; |
1559 |
else if(v->pq < 13) v->tt_index = 1; |
1560 |
else v->tt_index = 2; |
1561 |
|
1562 |
lowquant = (v->pq > 12) ? 0 : 1; |
1563 |
v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN; |
1564 |
v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV); |
1565 |
v->s.mspel = v->s.quarter_sample; |
1566 |
|
1567 |
status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v); |
1568 |
if (status < 0) return -1; |
1569 |
av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
|
1570 |
"Imode: %i, Invert: %i\n", status>>1, status&1); |
1571 |
status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v); |
1572 |
if (status < 0) return -1; |
1573 |
av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
|
1574 |
"Imode: %i, Invert: %i\n", status>>1, status&1); |
1575 |
|
1576 |
v->s.mv_table_index = get_bits(gb, 2);
|
1577 |
v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
|
1578 |
|
1579 |
if (v->dquant)
|
1580 |
{ |
1581 |
av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
|
1582 |
vop_dquant_decoding(v); |
1583 |
} |
1584 |
|
1585 |
v->ttfrm = 0;
|
1586 |
if (v->vstransform)
|
1587 |
{ |
1588 |
v->ttmbf = get_bits(gb, 1);
|
1589 |
if (v->ttmbf)
|
1590 |
{ |
1591 |
v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
|
1592 |
} |
1593 |
} else {
|
1594 |
v->ttmbf = 1;
|
1595 |
v->ttfrm = TT_8X8; |
1596 |
} |
1597 |
break;
|
1598 |
} |
1599 |
|
1600 |
/* AC Syntax */
|
1601 |
v->c_ac_table_index = decode012(gb); |
1602 |
if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
|
1603 |
{ |
1604 |
v->y_ac_table_index = decode012(gb); |
1605 |
} |
1606 |
/* DC Syntax */
|
1607 |
v->s.dc_table_index = get_bits(gb, 1);
|
1608 |
|
1609 |
if(v->s.pict_type == BI_TYPE) {
|
1610 |
v->s.pict_type = B_TYPE; |
1611 |
v->bi_type = 1;
|
1612 |
} |
1613 |
return 0; |
1614 |
} |
1615 |
|
1616 |
static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb) |
1617 |
{ |
1618 |
int pqindex, lowquant;
|
1619 |
int status;
|
1620 |
|
1621 |
v->p_frame_skipped = 0;
|
1622 |
|
1623 |
if(v->interlace){
|
1624 |
v->fcm = decode012(gb); |
1625 |
if(v->fcm) return -1; // interlaced frames/fields are not implemented |
1626 |
} |
1627 |
switch(get_prefix(gb, 0, 4)) { |
1628 |
case 0: |
1629 |
v->s.pict_type = P_TYPE; |
1630 |
break;
|
1631 |
case 1: |
1632 |
v->s.pict_type = B_TYPE; |
1633 |
break;
|
1634 |
case 2: |
1635 |
v->s.pict_type = I_TYPE; |
1636 |
break;
|
1637 |
case 3: |
1638 |
v->s.pict_type = BI_TYPE; |
1639 |
break;
|
1640 |
case 4: |
1641 |
v->s.pict_type = P_TYPE; // skipped pic
|
1642 |
v->p_frame_skipped = 1;
|
1643 |
return 0; |
1644 |
} |
1645 |
if(v->tfcntrflag)
|
1646 |
get_bits(gb, 8);
|
1647 |
if(v->broadcast) {
|
1648 |
if(!v->interlace || v->psf) {
|
1649 |
v->rptfrm = get_bits(gb, 2);
|
1650 |
} else {
|
1651 |
v->tff = get_bits1(gb); |
1652 |
v->rptfrm = get_bits1(gb); |
1653 |
} |
1654 |
} |
1655 |
if(v->panscanflag) {
|
1656 |
//...
|
1657 |
} |
1658 |
v->rnd = get_bits1(gb); |
1659 |
if(v->interlace)
|
1660 |
v->uvsamp = get_bits1(gb); |
1661 |
if(v->finterpflag) v->interpfrm = get_bits(gb, 1); |
1662 |
if(v->s.pict_type == B_TYPE) {
|
1663 |
v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
|
1664 |
v->bfraction = vc1_bfraction_lut[v->bfraction]; |
1665 |
if(v->bfraction == 0) { |
1666 |
v->s.pict_type = BI_TYPE; /* XXX: should not happen here */
|
1667 |
} |
1668 |
} |
1669 |
pqindex = get_bits(gb, 5);
|
1670 |
v->pqindex = pqindex; |
1671 |
if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
|
1672 |
v->pq = pquant_table[0][pqindex];
|
1673 |
else
|
1674 |
v->pq = pquant_table[1][pqindex];
|
1675 |
|
1676 |
v->pquantizer = 1;
|
1677 |
if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
|
1678 |
v->pquantizer = pqindex < 9;
|
1679 |
if (v->quantizer_mode == QUANT_NON_UNIFORM)
|
1680 |
v->pquantizer = 0;
|
1681 |
v->pqindex = pqindex; |
1682 |
if (pqindex < 9) v->halfpq = get_bits(gb, 1); |
1683 |
else v->halfpq = 0; |
1684 |
if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
|
1685 |
v->pquantizer = get_bits(gb, 1);
|
1686 |
|
1687 |
if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0; |
1688 |
|
1689 |
switch(v->s.pict_type) {
|
1690 |
case I_TYPE:
|
1691 |
case BI_TYPE:
|
1692 |
status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v); |
1693 |
if (status < 0) return -1; |
1694 |
av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
|
1695 |
"Imode: %i, Invert: %i\n", status>>1, status&1); |
1696 |
v->condover = CONDOVER_NONE; |
1697 |
if(v->overlap && v->pq <= 8) { |
1698 |
v->condover = decode012(gb); |
1699 |
if(v->condover == CONDOVER_SELECT) {
|
1700 |
status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v); |
1701 |
if (status < 0) return -1; |
1702 |
av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
|
1703 |
"Imode: %i, Invert: %i\n", status>>1, status&1); |
1704 |
} |
1705 |
} |
1706 |
break;
|
1707 |
case P_TYPE:
|
1708 |
if(v->postprocflag)
|
1709 |
v->postproc = get_bits1(gb); |
1710 |
if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3); |
1711 |
else v->mvrange = 0; |
1712 |
v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13 |
1713 |
v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11 |
1714 |
v->range_x = 1 << (v->k_x - 1); |
1715 |
v->range_y = 1 << (v->k_y - 1); |
1716 |
|
1717 |
if (v->pq < 5) v->tt_index = 0; |
1718 |
else if(v->pq < 13) v->tt_index = 1; |
1719 |
else v->tt_index = 2; |
1720 |
|
1721 |
lowquant = (v->pq > 12) ? 0 : 1; |
1722 |
v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)]; |
1723 |
if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
|
1724 |
{ |
1725 |
int scale, shift, i;
|
1726 |
v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)]; |
1727 |
v->lumscale = get_bits(gb, 6);
|
1728 |
v->lumshift = get_bits(gb, 6);
|
1729 |
/* fill lookup tables for intensity compensation */
|
1730 |
if(!v->lumscale) {
|
1731 |
scale = -64;
|
1732 |
shift = (255 - v->lumshift * 2) << 6; |
1733 |
if(v->lumshift > 31) |
1734 |
shift += 128 << 6; |
1735 |
} else {
|
1736 |
scale = v->lumscale + 32;
|
1737 |
if(v->lumshift > 31) |
1738 |
shift = (v->lumshift - 64) << 6; |
1739 |
else
|
1740 |
shift = v->lumshift << 6;
|
1741 |
} |
1742 |
for(i = 0; i < 256; i++) { |
1743 |
v->luty[i] = av_clip_uint8((scale * i + shift + 32) >> 6); |
1744 |
v->lutuv[i] = av_clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6); |
1745 |
} |
1746 |
v->use_ic = 1;
|
1747 |
} |
1748 |
if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
|
1749 |
v->s.quarter_sample = 0;
|
1750 |
else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) { |
1751 |
if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
|
1752 |
v->s.quarter_sample = 0;
|
1753 |
else
|
1754 |
v->s.quarter_sample = 1;
|
1755 |
} else
|
1756 |
v->s.quarter_sample = 1;
|
1757 |
v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)); |
1758 |
|
1759 |
if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
|
1760 |
v->mv_mode2 == MV_PMODE_MIXED_MV) |
1761 |
|| v->mv_mode == MV_PMODE_MIXED_MV) |
1762 |
{ |
1763 |
status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v); |
1764 |
if (status < 0) return -1; |
1765 |
av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
|
1766 |
"Imode: %i, Invert: %i\n", status>>1, status&1); |
1767 |
} else {
|
1768 |
v->mv_type_is_raw = 0;
|
1769 |
memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
|
1770 |
} |
1771 |
status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v); |
1772 |
if (status < 0) return -1; |
1773 |
av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
|
1774 |
"Imode: %i, Invert: %i\n", status>>1, status&1); |
1775 |
|
1776 |
/* Hopefully this is correct for P frames */
|
1777 |
v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables |
1778 |
v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
|
1779 |
if (v->dquant)
|
1780 |
{ |
1781 |
av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
|
1782 |
vop_dquant_decoding(v); |
1783 |
} |
1784 |
|
1785 |
v->ttfrm = 0; //FIXME Is that so ? |
1786 |
if (v->vstransform)
|
1787 |
{ |
1788 |
v->ttmbf = get_bits(gb, 1);
|
1789 |
if (v->ttmbf)
|
1790 |
{ |
1791 |
v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
|
1792 |
} |
1793 |
} else {
|
1794 |
v->ttmbf = 1;
|
1795 |
v->ttfrm = TT_8X8; |
1796 |
} |
1797 |
break;
|
1798 |
case B_TYPE:
|
1799 |
if(v->postprocflag)
|
1800 |
v->postproc = get_bits1(gb); |
1801 |
if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3); |
1802 |
else v->mvrange = 0; |
1803 |
v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13 |
1804 |
v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11 |
1805 |
v->range_x = 1 << (v->k_x - 1); |
1806 |
v->range_y = 1 << (v->k_y - 1); |
1807 |
|
1808 |
if (v->pq < 5) v->tt_index = 0; |
1809 |
else if(v->pq < 13) v->tt_index = 1; |
1810 |
else v->tt_index = 2; |
1811 |
|
1812 |
lowquant = (v->pq > 12) ? 0 : 1; |
1813 |
v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN; |
1814 |
v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV); |
1815 |
v->s.mspel = v->s.quarter_sample; |
1816 |
|
1817 |
status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v); |
1818 |
if (status < 0) return -1; |
1819 |
av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
|
1820 |
"Imode: %i, Invert: %i\n", status>>1, status&1); |
1821 |
status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v); |
1822 |
if (status < 0) return -1; |
1823 |
av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
|
1824 |
"Imode: %i, Invert: %i\n", status>>1, status&1); |
1825 |
|
1826 |
v->s.mv_table_index = get_bits(gb, 2);
|
1827 |
v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
|
1828 |
|
1829 |
if (v->dquant)
|
1830 |
{ |
1831 |
av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
|
1832 |
vop_dquant_decoding(v); |
1833 |
} |
1834 |
|
1835 |
v->ttfrm = 0;
|
1836 |
if (v->vstransform)
|
1837 |
{ |
1838 |
v->ttmbf = get_bits(gb, 1);
|
1839 |
if (v->ttmbf)
|
1840 |
{ |
1841 |
v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
|
1842 |
} |
1843 |
} else {
|
1844 |
v->ttmbf = 1;
|
1845 |
v->ttfrm = TT_8X8; |
1846 |
} |
1847 |
break;
|
1848 |
} |
1849 |
|
1850 |
/* AC Syntax */
|
1851 |
v->c_ac_table_index = decode012(gb); |
1852 |
if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
|
1853 |
{ |
1854 |
v->y_ac_table_index = decode012(gb); |
1855 |
} |
1856 |
/* DC Syntax */
|
1857 |
v->s.dc_table_index = get_bits(gb, 1);
|
1858 |
if ((v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE) && v->dquant) {
|
1859 |
av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
|
1860 |
vop_dquant_decoding(v); |
1861 |
} |
1862 |
|
1863 |
v->bi_type = 0;
|
1864 |
if(v->s.pict_type == BI_TYPE) {
|
1865 |
v->s.pict_type = B_TYPE; |
1866 |
v->bi_type = 1;
|
1867 |
} |
1868 |
return 0; |
1869 |
} |
1870 |
|
1871 |
/***********************************************************************/
|
1872 |
/**
|
1873 |
* @defgroup block VC-1 Block-level functions
|
1874 |
* @see 7.1.4, p91 and 8.1.1.7, p(1)04
|
1875 |
* @{
|
1876 |
*/
|
1877 |
|
1878 |
/**
|
1879 |
* @def GET_MQUANT
|
1880 |
* @brief Get macroblock-level quantizer scale
|
1881 |
*/
|
1882 |
#define GET_MQUANT() \
|
1883 |
if (v->dquantfrm) \
|
1884 |
{ \ |
1885 |
int edges = 0; \ |
1886 |
if (v->dqprofile == DQPROFILE_ALL_MBS) \
|
1887 |
{ \ |
1888 |
if (v->dqbilevel) \
|
1889 |
{ \ |
1890 |
mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
|
1891 |
} \ |
1892 |
else \
|
1893 |
{ \ |
1894 |
mqdiff = get_bits(gb, 3); \
|
1895 |
if (mqdiff != 7) mquant = v->pq + mqdiff; \ |
1896 |
else mquant = get_bits(gb, 5); \ |
1897 |
} \ |
1898 |
} \ |
1899 |
if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
|
1900 |
edges = 1 << v->dqsbedge; \
|
1901 |
else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \ |
1902 |
edges = (3 << v->dqsbedge) % 15; \ |
1903 |
else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \ |
1904 |
edges = 15; \
|
1905 |
if((edges&1) && !s->mb_x) \ |
1906 |
mquant = v->altpq; \ |
1907 |
if((edges&2) && s->first_slice_line) \ |
1908 |
mquant = v->altpq; \ |
1909 |
if((edges&4) && s->mb_x == (s->mb_width - 1)) \ |
1910 |
mquant = v->altpq; \ |
1911 |
if((edges&8) && s->mb_y == (s->mb_height - 1)) \ |
1912 |
mquant = v->altpq; \ |
1913 |
} |
1914 |
|
1915 |
/**
|
1916 |
* @def GET_MVDATA(_dmv_x, _dmv_y)
|
1917 |
* @brief Get MV differentials
|
1918 |
* @see MVDATA decoding from 8.3.5.2, p(1)20
|
1919 |
* @param _dmv_x Horizontal differential for decoded MV
|
1920 |
* @param _dmv_y Vertical differential for decoded MV
|
1921 |
*/
|
1922 |
#define GET_MVDATA(_dmv_x, _dmv_y) \
|
1923 |
index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
|
1924 |
VC1_MV_DIFF_VLC_BITS, 2); \
|
1925 |
if (index > 36) \ |
1926 |
{ \ |
1927 |
mb_has_coeffs = 1; \
|
1928 |
index -= 37; \
|
1929 |
} \ |
1930 |
else mb_has_coeffs = 0; \ |
1931 |
s->mb_intra = 0; \
|
1932 |
if (!index) { _dmv_x = _dmv_y = 0; } \ |
1933 |
else if (index == 35) \ |
1934 |
{ \ |
1935 |
_dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
|
1936 |
_dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
|
1937 |
} \ |
1938 |
else if (index == 36) \ |
1939 |
{ \ |
1940 |
_dmv_x = 0; \
|
1941 |
_dmv_y = 0; \
|
1942 |
s->mb_intra = 1; \
|
1943 |
} \ |
1944 |
else \
|
1945 |
{ \ |
1946 |
index1 = index%6; \
|
1947 |
if (!s->quarter_sample && index1 == 5) val = 1; \ |
1948 |
else val = 0; \ |
1949 |
if(size_table[index1] - val > 0) \ |
1950 |
val = get_bits(gb, size_table[index1] - val); \ |
1951 |
else val = 0; \ |
1952 |
sign = 0 - (val&1); \ |
1953 |
_dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
|
1954 |
\ |
1955 |
index1 = index/6; \
|
1956 |
if (!s->quarter_sample && index1 == 5) val = 1; \ |
1957 |
else val = 0; \ |
1958 |
if(size_table[index1] - val > 0) \ |
1959 |
val = get_bits(gb, size_table[index1] - val); \ |
1960 |
else val = 0; \ |
1961 |
sign = 0 - (val&1); \ |
1962 |
_dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
|
1963 |
} |
1964 |
|
1965 |
/** Predict and set motion vector
|
1966 |
*/
|
1967 |
static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra) |
1968 |
{ |
1969 |
int xy, wrap, off = 0; |
1970 |
int16_t *A, *B, *C; |
1971 |
int px, py;
|
1972 |
int sum;
|
1973 |
|
1974 |
/* scale MV difference to be quad-pel */
|
1975 |
dmv_x <<= 1 - s->quarter_sample;
|
1976 |
dmv_y <<= 1 - s->quarter_sample;
|
1977 |
|
1978 |
wrap = s->b8_stride; |
1979 |
xy = s->block_index[n]; |
1980 |
|
1981 |
if(s->mb_intra){
|
1982 |
s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0; |
1983 |
s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0; |
1984 |
if(mv1) { /* duplicate motion data for 1-MV block */ |
1985 |
s->current_picture.motion_val[0][xy + 1][0] = 0; |
1986 |
s->current_picture.motion_val[0][xy + 1][1] = 0; |
1987 |
s->current_picture.motion_val[0][xy + wrap][0] = 0; |
1988 |
s->current_picture.motion_val[0][xy + wrap][1] = 0; |
1989 |
s->current_picture.motion_val[0][xy + wrap + 1][0] = 0; |
1990 |
s->current_picture.motion_val[0][xy + wrap + 1][1] = 0; |
1991 |
} |
1992 |
return;
|
1993 |
} |
1994 |
|
1995 |
C = s->current_picture.motion_val[0][xy - 1]; |
1996 |
A = s->current_picture.motion_val[0][xy - wrap];
|
1997 |
if(mv1)
|
1998 |
off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2; |
1999 |
else {
|
2000 |
//in 4-MV mode different blocks have different B predictor position
|
2001 |
switch(n){
|
2002 |
case 0: |
2003 |
off = (s->mb_x > 0) ? -1 : 1; |
2004 |
break;
|
2005 |
case 1: |
2006 |
off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1; |
2007 |
break;
|
2008 |
case 2: |
2009 |
off = 1;
|
2010 |
break;
|
2011 |
case 3: |
2012 |
off = -1;
|
2013 |
} |
2014 |
} |
2015 |
B = s->current_picture.motion_val[0][xy - wrap + off];
|
2016 |
|
2017 |
if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds |
2018 |
if(s->mb_width == 1) { |
2019 |
px = A[0];
|
2020 |
py = A[1];
|
2021 |
} else {
|
2022 |
px = mid_pred(A[0], B[0], C[0]); |
2023 |
py = mid_pred(A[1], B[1], C[1]); |
2024 |
} |
2025 |
} else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds |
2026 |
px = C[0];
|
2027 |
py = C[1];
|
2028 |
} else {
|
2029 |
px = py = 0;
|
2030 |
} |
2031 |
/* Pullback MV as specified in 8.3.5.3.4 */
|
2032 |
{ |
2033 |
int qx, qy, X, Y;
|
2034 |
qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0); |
2035 |
qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0); |
2036 |
X = (s->mb_width << 6) - 4; |
2037 |
Y = (s->mb_height << 6) - 4; |
2038 |
if(mv1) {
|
2039 |
if(qx + px < -60) px = -60 - qx; |
2040 |
if(qy + py < -60) py = -60 - qy; |
2041 |
} else {
|
2042 |
if(qx + px < -28) px = -28 - qx; |
2043 |
if(qy + py < -28) py = -28 - qy; |
2044 |
} |
2045 |
if(qx + px > X) px = X - qx;
|
2046 |
if(qy + py > Y) py = Y - qy;
|
2047 |
} |
2048 |
/* Calculate hybrid prediction as specified in 8.3.5.3.5 */
|
2049 |
if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) { |
2050 |
if(is_intra[xy - wrap])
|
2051 |
sum = FFABS(px) + FFABS(py); |
2052 |
else
|
2053 |
sum = FFABS(px - A[0]) + FFABS(py - A[1]); |
2054 |
if(sum > 32) { |
2055 |
if(get_bits1(&s->gb)) {
|
2056 |
px = A[0];
|
2057 |
py = A[1];
|
2058 |
} else {
|
2059 |
px = C[0];
|
2060 |
py = C[1];
|
2061 |
} |
2062 |
} else {
|
2063 |
if(is_intra[xy - 1]) |
2064 |
sum = FFABS(px) + FFABS(py); |
2065 |
else
|
2066 |
sum = FFABS(px - C[0]) + FFABS(py - C[1]); |
2067 |
if(sum > 32) { |
2068 |
if(get_bits1(&s->gb)) {
|
2069 |
px = A[0];
|
2070 |
py = A[1];
|
2071 |
} else {
|
2072 |
px = C[0];
|
2073 |
py = C[1];
|
2074 |
} |
2075 |
} |
2076 |
} |
2077 |
} |
2078 |
/* store MV using signed modulus of MV range defined in 4.11 */
|
2079 |
s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; |
2080 |
s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y; |
2081 |
if(mv1) { /* duplicate motion data for 1-MV block */ |
2082 |
s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0]; |
2083 |
s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1]; |
2084 |
s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0]; |
2085 |
s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1]; |
2086 |
s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0]; |
2087 |
s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1]; |
2088 |
} |
2089 |
} |
2090 |
|
2091 |
/** Motion compensation for direct or interpolated blocks in B-frames
|
2092 |
*/
|
2093 |
static void vc1_interp_mc(VC1Context *v) |
2094 |
{ |
2095 |
MpegEncContext *s = &v->s; |
2096 |
DSPContext *dsp = &v->s.dsp; |
2097 |
uint8_t *srcY, *srcU, *srcV; |
2098 |
int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
|
2099 |
|
2100 |
if(!v->s.next_picture.data[0])return; |
2101 |
|
2102 |
mx = s->mv[1][0][0]; |
2103 |
my = s->mv[1][0][1]; |
2104 |
uvmx = (mx + ((mx & 3) == 3)) >> 1; |
2105 |
uvmy = (my + ((my & 3) == 3)) >> 1; |
2106 |
if(v->fastuvmc) {
|
2107 |
uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1)); |
2108 |
uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1)); |
2109 |
} |
2110 |
srcY = s->next_picture.data[0];
|
2111 |
srcU = s->next_picture.data[1];
|
2112 |
srcV = s->next_picture.data[2];
|
2113 |
|
2114 |
src_x = s->mb_x * 16 + (mx >> 2); |
2115 |
src_y = s->mb_y * 16 + (my >> 2); |
2116 |
uvsrc_x = s->mb_x * 8 + (uvmx >> 2); |
2117 |
uvsrc_y = s->mb_y * 8 + (uvmy >> 2); |
2118 |
|
2119 |
src_x = av_clip( src_x, -16, s->mb_width * 16); |
2120 |
src_y = av_clip( src_y, -16, s->mb_height * 16); |
2121 |
uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); |
2122 |
uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); |
2123 |
|
2124 |
srcY += src_y * s->linesize + src_x; |
2125 |
srcU += uvsrc_y * s->uvlinesize + uvsrc_x; |
2126 |
srcV += uvsrc_y * s->uvlinesize + uvsrc_x; |
2127 |
|
2128 |
/* for grayscale we should not try to read from unknown area */
|
2129 |
if(s->flags & CODEC_FLAG_GRAY) {
|
2130 |
srcU = s->edge_emu_buffer + 18 * s->linesize;
|
2131 |
srcV = s->edge_emu_buffer + 18 * s->linesize;
|
2132 |
} |
2133 |
|
2134 |
if(v->rangeredfrm
|
2135 |
|| (unsigned)src_x > s->h_edge_pos - (mx&3) - 16 |
2136 |
|| (unsigned)src_y > s->v_edge_pos - (my&3) - 16){ |
2137 |
uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
|
2138 |
|
2139 |
srcY -= s->mspel * (1 + s->linesize);
|
2140 |
ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2, |
2141 |
src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos); |
2142 |
srcY = s->edge_emu_buffer; |
2143 |
ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1, |
2144 |
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
2145 |
ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1, |
2146 |
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
2147 |
srcU = uvbuf; |
2148 |
srcV = uvbuf + 16;
|
2149 |
/* if we deal with range reduction we need to scale source blocks */
|
2150 |
if(v->rangeredfrm) {
|
2151 |
int i, j;
|
2152 |
uint8_t *src, *src2; |
2153 |
|
2154 |
src = srcY; |
2155 |
for(j = 0; j < 17 + s->mspel*2; j++) { |
2156 |
for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128; |
2157 |
src += s->linesize; |
2158 |
} |
2159 |
src = srcU; src2 = srcV; |
2160 |
for(j = 0; j < 9; j++) { |
2161 |
for(i = 0; i < 9; i++) { |
2162 |
src[i] = ((src[i] - 128) >> 1) + 128; |
2163 |
src2[i] = ((src2[i] - 128) >> 1) + 128; |
2164 |
} |
2165 |
src += s->uvlinesize; |
2166 |
src2 += s->uvlinesize; |
2167 |
} |
2168 |
} |
2169 |
srcY += s->mspel * (1 + s->linesize);
|
2170 |
} |
2171 |
|
2172 |
mx >>= 1;
|
2173 |
my >>= 1;
|
2174 |
dxy = ((my & 1) << 1) | (mx & 1); |
2175 |
|
2176 |
dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16); |
2177 |
|
2178 |
if(s->flags & CODEC_FLAG_GRAY) return; |
2179 |
/* Chroma MC always uses qpel blilinear */
|
2180 |
uvdxy = ((uvmy & 3) << 2) | (uvmx & 3); |
2181 |
uvmx = (uvmx&3)<<1; |
2182 |
uvmy = (uvmy&3)<<1; |
2183 |
dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); |
2184 |
dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); |
2185 |
} |
2186 |
|
2187 |
static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs) |
2188 |
{ |
2189 |
int n = bfrac;
|
2190 |
|
2191 |
#if B_FRACTION_DEN==256 |
2192 |
if(inv)
|
2193 |
n -= 256;
|
2194 |
if(!qs)
|
2195 |
return 2 * ((value * n + 255) >> 9); |
2196 |
return (value * n + 128) >> 8; |
2197 |
#else
|
2198 |
if(inv)
|
2199 |
n -= B_FRACTION_DEN; |
2200 |
if(!qs)
|
2201 |
return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN)); |
2202 |
return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN; |
2203 |
#endif
|
2204 |
} |
2205 |
|
2206 |
/** Reconstruct motion vector for B-frame and do motion compensation
|
2207 |
*/
|
2208 |
static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode) |
2209 |
{ |
2210 |
if(v->use_ic) {
|
2211 |
v->mv_mode2 = v->mv_mode; |
2212 |
v->mv_mode = MV_PMODE_INTENSITY_COMP; |
2213 |
} |
2214 |
if(direct) {
|
2215 |
vc1_mc_1mv(v, 0);
|
2216 |
vc1_interp_mc(v); |
2217 |
if(v->use_ic) v->mv_mode = v->mv_mode2;
|
2218 |
return;
|
2219 |
} |
2220 |
if(mode == BMV_TYPE_INTERPOLATED) {
|
2221 |
vc1_mc_1mv(v, 0);
|
2222 |
vc1_interp_mc(v); |
2223 |
if(v->use_ic) v->mv_mode = v->mv_mode2;
|
2224 |
return;
|
2225 |
} |
2226 |
|
2227 |
if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
|
2228 |
vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD)); |
2229 |
if(v->use_ic) v->mv_mode = v->mv_mode2;
|
2230 |
} |
2231 |
|
2232 |
static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype) |
2233 |
{ |
2234 |
MpegEncContext *s = &v->s; |
2235 |
int xy, wrap, off = 0; |
2236 |
int16_t *A, *B, *C; |
2237 |
int px, py;
|
2238 |
int sum;
|
2239 |
int r_x, r_y;
|
2240 |
const uint8_t *is_intra = v->mb_type[0]; |
2241 |
|
2242 |
r_x = v->range_x; |
2243 |
r_y = v->range_y; |
2244 |
/* scale MV difference to be quad-pel */
|
2245 |
dmv_x[0] <<= 1 - s->quarter_sample; |
2246 |
dmv_y[0] <<= 1 - s->quarter_sample; |
2247 |
dmv_x[1] <<= 1 - s->quarter_sample; |
2248 |
dmv_y[1] <<= 1 - s->quarter_sample; |
2249 |
|
2250 |
wrap = s->b8_stride; |
2251 |
xy = s->block_index[0];
|
2252 |
|
2253 |
if(s->mb_intra) {
|
2254 |
s->current_picture.motion_val[0][xy][0] = |
2255 |
s->current_picture.motion_val[0][xy][1] = |
2256 |
s->current_picture.motion_val[1][xy][0] = |
2257 |
s->current_picture.motion_val[1][xy][1] = 0; |
2258 |
return;
|
2259 |
} |
2260 |
s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample); |
2261 |
s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample); |
2262 |
s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample); |
2263 |
s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample); |
2264 |
if(direct) {
|
2265 |
s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; |
2266 |
s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; |
2267 |
s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; |
2268 |
s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; |
2269 |
return;
|
2270 |
} |
2271 |
|
2272 |
if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
|
2273 |
C = s->current_picture.motion_val[0][xy - 2]; |
2274 |
A = s->current_picture.motion_val[0][xy - wrap*2]; |
2275 |
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; |
2276 |
B = s->current_picture.motion_val[0][xy - wrap*2 + off]; |
2277 |
|
2278 |
if(!s->first_slice_line) { // predictor A is not out of bounds |
2279 |
if(s->mb_width == 1) { |
2280 |
px = A[0];
|
2281 |
py = A[1];
|
2282 |
} else {
|
2283 |
px = mid_pred(A[0], B[0], C[0]); |
2284 |
py = mid_pred(A[1], B[1], C[1]); |
2285 |
} |
2286 |
} else if(s->mb_x) { // predictor C is not out of bounds |
2287 |
px = C[0];
|
2288 |
py = C[1];
|
2289 |
} else {
|
2290 |
px = py = 0;
|
2291 |
} |
2292 |
/* Pullback MV as specified in 8.3.5.3.4 */
|
2293 |
{ |
2294 |
int qx, qy, X, Y;
|
2295 |
if(v->profile < PROFILE_ADVANCED) {
|
2296 |
qx = (s->mb_x << 5);
|
2297 |
qy = (s->mb_y << 5);
|
2298 |
X = (s->mb_width << 5) - 4; |
2299 |
Y = (s->mb_height << 5) - 4; |
2300 |
if(qx + px < -28) px = -28 - qx; |
2301 |
if(qy + py < -28) py = -28 - qy; |
2302 |
if(qx + px > X) px = X - qx;
|
2303 |
if(qy + py > Y) py = Y - qy;
|
2304 |
} else {
|
2305 |
qx = (s->mb_x << 6);
|
2306 |
qy = (s->mb_y << 6);
|
2307 |
X = (s->mb_width << 6) - 4; |
2308 |
Y = (s->mb_height << 6) - 4; |
2309 |
if(qx + px < -60) px = -60 - qx; |
2310 |
if(qy + py < -60) py = -60 - qy; |
2311 |
if(qx + px > X) px = X - qx;
|
2312 |
if(qy + py > Y) py = Y - qy;
|
2313 |
} |
2314 |
} |
2315 |
/* Calculate hybrid prediction as specified in 8.3.5.3.5 */
|
2316 |
if(0 && !s->first_slice_line && s->mb_x) { |
2317 |
if(is_intra[xy - wrap])
|
2318 |
sum = FFABS(px) + FFABS(py); |
2319 |
else
|
2320 |
sum = FFABS(px - A[0]) + FFABS(py - A[1]); |
2321 |
if(sum > 32) { |
2322 |
if(get_bits1(&s->gb)) {
|
2323 |
px = A[0];
|
2324 |
py = A[1];
|
2325 |
} else {
|
2326 |
px = C[0];
|
2327 |
py = C[1];
|
2328 |
} |
2329 |
} else {
|
2330 |
if(is_intra[xy - 2]) |
2331 |
sum = FFABS(px) + FFABS(py); |
2332 |
else
|
2333 |
sum = FFABS(px - C[0]) + FFABS(py - C[1]); |
2334 |
if(sum > 32) { |
2335 |
if(get_bits1(&s->gb)) {
|
2336 |
px = A[0];
|
2337 |
py = A[1];
|
2338 |
} else {
|
2339 |
px = C[0];
|
2340 |
py = C[1];
|
2341 |
} |
2342 |
} |
2343 |
} |
2344 |
} |
2345 |
/* store MV using signed modulus of MV range defined in 4.11 */
|
2346 |
s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x; |
2347 |
s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y; |
2348 |
} |
2349 |
if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
|
2350 |
C = s->current_picture.motion_val[1][xy - 2]; |
2351 |
A = s->current_picture.motion_val[1][xy - wrap*2]; |
2352 |
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; |
2353 |
B = s->current_picture.motion_val[1][xy - wrap*2 + off]; |
2354 |
|
2355 |
if(!s->first_slice_line) { // predictor A is not out of bounds |
2356 |
if(s->mb_width == 1) { |
2357 |
px = A[0];
|
2358 |
py = A[1];
|
2359 |
} else {
|
2360 |
px = mid_pred(A[0], B[0], C[0]); |
2361 |
py = mid_pred(A[1], B[1], C[1]); |
2362 |
} |
2363 |
} else if(s->mb_x) { // predictor C is not out of bounds |
2364 |
px = C[0];
|
2365 |
py = C[1];
|
2366 |
} else {
|
2367 |
px = py = 0;
|
2368 |
} |
2369 |
/* Pullback MV as specified in 8.3.5.3.4 */
|
2370 |
{ |
2371 |
int qx, qy, X, Y;
|
2372 |
if(v->profile < PROFILE_ADVANCED) {
|
2373 |
qx = (s->mb_x << 5);
|
2374 |
qy = (s->mb_y << 5);
|
2375 |
X = (s->mb_width << 5) - 4; |
2376 |
Y = (s->mb_height << 5) - 4; |
2377 |
if(qx + px < -28) px = -28 - qx; |
2378 |
if(qy + py < -28) py = -28 - qy; |
2379 |
if(qx + px > X) px = X - qx;
|
2380 |
if(qy + py > Y) py = Y - qy;
|
2381 |
} else {
|
2382 |
qx = (s->mb_x << 6);
|
2383 |
qy = (s->mb_y << 6);
|
2384 |
X = (s->mb_width << 6) - 4; |
2385 |
Y = (s->mb_height << 6) - 4; |
2386 |
if(qx + px < -60) px = -60 - qx; |
2387 |
if(qy + py < -60) py = -60 - qy; |
2388 |
if(qx + px > X) px = X - qx;
|
2389 |
if(qy + py > Y) py = Y - qy;
|
2390 |
} |
2391 |
} |
2392 |
/* Calculate hybrid prediction as specified in 8.3.5.3.5 */
|
2393 |
if(0 && !s->first_slice_line && s->mb_x) { |
2394 |
if(is_intra[xy - wrap])
|
2395 |
sum = FFABS(px) + FFABS(py); |
2396 |
else
|
2397 |
sum = FFABS(px - A[0]) + FFABS(py - A[1]); |
2398 |
if(sum > 32) { |
2399 |
if(get_bits1(&s->gb)) {
|
2400 |
px = A[0];
|
2401 |
py = A[1];
|
2402 |
} else {
|
2403 |
px = C[0];
|
2404 |
py = C[1];
|
2405 |
} |
2406 |
} else {
|
2407 |
if(is_intra[xy - 2]) |
2408 |
sum = FFABS(px) + FFABS(py); |
2409 |
else
|
2410 |
sum = FFABS(px - C[0]) + FFABS(py - C[1]); |
2411 |
if(sum > 32) { |
2412 |
if(get_bits1(&s->gb)) {
|
2413 |
px = A[0];
|
2414 |
py = A[1];
|
2415 |
} else {
|
2416 |
px = C[0];
|
2417 |
py = C[1];
|
2418 |
} |
2419 |
} |
2420 |
} |
2421 |
} |
2422 |
/* store MV using signed modulus of MV range defined in 4.11 */
|
2423 |
|
2424 |
s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x; |
2425 |
s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y; |
2426 |
} |
2427 |
s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; |
2428 |
s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; |
2429 |
s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; |
2430 |
s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; |
2431 |
} |
2432 |
|
2433 |
/** Get predicted DC value for I-frames only
|
2434 |
* prediction dir: left=0, top=1
|
2435 |
* @param s MpegEncContext
|
2436 |
* @param[in] n block index in the current MB
|
2437 |
* @param dc_val_ptr Pointer to DC predictor
|
2438 |
* @param dir_ptr Prediction direction for use in AC prediction
|
2439 |
*/
|
2440 |
static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n, |
2441 |
int16_t **dc_val_ptr, int *dir_ptr)
|
2442 |
{ |
2443 |
int a, b, c, wrap, pred, scale;
|
2444 |
int16_t *dc_val; |
2445 |
static const uint16_t dcpred[32] = { |
2446 |
-1, 1024, 512, 341, 256, 205, 171, 146, 128, |
2447 |
114, 102, 93, 85, 79, 73, 68, 64, |
2448 |
60, 57, 54, 51, 49, 47, 45, 43, |
2449 |
41, 39, 38, 37, 35, 34, 33 |
2450 |
}; |
2451 |
|
2452 |
/* find prediction - wmv3_dc_scale always used here in fact */
|
2453 |
if (n < 4) scale = s->y_dc_scale; |
2454 |
else scale = s->c_dc_scale;
|
2455 |
|
2456 |
wrap = s->block_wrap[n]; |
2457 |
dc_val= s->dc_val[0] + s->block_index[n];
|
2458 |
|
2459 |
/* B A
|
2460 |
* C X
|
2461 |
*/
|
2462 |
c = dc_val[ - 1];
|
2463 |
b = dc_val[ - 1 - wrap];
|
2464 |
a = dc_val[ - wrap]; |
2465 |
|
2466 |
if (pq < 9 || !overlap) |
2467 |
{ |
2468 |
/* Set outer values */
|
2469 |
if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale]; |
2470 |
if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale]; |
2471 |
} |
2472 |
else
|
2473 |
{ |
2474 |
/* Set outer values */
|
2475 |
if (s->first_slice_line && (n!=2 && n!=3)) b=a=0; |
2476 |
if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0; |
2477 |
} |
2478 |
|
2479 |
if (abs(a - b) <= abs(b - c)) {
|
2480 |
pred = c; |
2481 |
*dir_ptr = 1;//left |
2482 |
} else {
|
2483 |
pred = a; |
2484 |
*dir_ptr = 0;//top |
2485 |
} |
2486 |
|
2487 |
/* update predictor */
|
2488 |
*dc_val_ptr = &dc_val[0];
|
2489 |
return pred;
|
2490 |
} |
2491 |
|
2492 |
|
2493 |
/** Get predicted DC value
|
2494 |
* prediction dir: left=0, top=1
|
2495 |
* @param s MpegEncContext
|
2496 |
* @param[in] n block index in the current MB
|
2497 |
* @param dc_val_ptr Pointer to DC predictor
|
2498 |
* @param dir_ptr Prediction direction for use in AC prediction
|
2499 |
*/
|
2500 |
static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, |
2501 |
int a_avail, int c_avail, |
2502 |
int16_t **dc_val_ptr, int *dir_ptr)
|
2503 |
{ |
2504 |
int a, b, c, wrap, pred, scale;
|
2505 |
int16_t *dc_val; |
2506 |
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
2507 |
int q1, q2 = 0; |
2508 |
|
2509 |
/* find prediction - wmv3_dc_scale always used here in fact */
|
2510 |
if (n < 4) scale = s->y_dc_scale; |
2511 |
else scale = s->c_dc_scale;
|
2512 |
|
2513 |
wrap = s->block_wrap[n]; |
2514 |
dc_val= s->dc_val[0] + s->block_index[n];
|
2515 |
|
2516 |
/* B A
|
2517 |
* C X
|
2518 |
*/
|
2519 |
c = dc_val[ - 1];
|
2520 |
b = dc_val[ - 1 - wrap];
|
2521 |
a = dc_val[ - wrap]; |
2522 |
/* scale predictors if needed */
|
2523 |
q1 = s->current_picture.qscale_table[mb_pos]; |
2524 |
if(c_avail && (n!= 1 && n!=3)) { |
2525 |
q2 = s->current_picture.qscale_table[mb_pos - 1];
|
2526 |
if(q2 && q2 != q1)
|
2527 |
c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; |
2528 |
} |
2529 |
if(a_avail && (n!= 2 && n!=3)) { |
2530 |
q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride]; |
2531 |
if(q2 && q2 != q1)
|
2532 |
a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; |
2533 |
} |
2534 |
if(a_avail && c_avail && (n!=3)) { |
2535 |
int off = mb_pos;
|
2536 |
if(n != 1) off--; |
2537 |
if(n != 2) off -= s->mb_stride; |
2538 |
q2 = s->current_picture.qscale_table[off]; |
2539 |
if(q2 && q2 != q1)
|
2540 |
b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18; |
2541 |
} |
2542 |
|
2543 |
if(a_avail && c_avail) {
|
2544 |
if(abs(a - b) <= abs(b - c)) {
|
2545 |
pred = c; |
2546 |
*dir_ptr = 1;//left |
2547 |
} else {
|
2548 |
pred = a; |
2549 |
*dir_ptr = 0;//top |
2550 |
} |
2551 |
} else if(a_avail) { |
2552 |
pred = a; |
2553 |
*dir_ptr = 0;//top |
2554 |
} else if(c_avail) { |
2555 |
pred = c; |
2556 |
*dir_ptr = 1;//left |
2557 |
} else {
|
2558 |
pred = 0;
|
2559 |
*dir_ptr = 1;//left |
2560 |
} |
2561 |
|
2562 |
/* update predictor */
|
2563 |
*dc_val_ptr = &dc_val[0];
|
2564 |
return pred;
|
2565 |
} |
2566 |
|
2567 |
|
2568 |
/**
|
2569 |
* @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
|
2570 |
* @see 7.1.4, p91 and 8.1.1.7, p(1)04
|
2571 |
* @{
|
2572 |
*/
|
2573 |
|
2574 |
static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr) |
2575 |
{ |
2576 |
int xy, wrap, pred, a, b, c;
|
2577 |
|
2578 |
xy = s->block_index[n]; |
2579 |
wrap = s->b8_stride; |
2580 |
|
2581 |
/* B C
|
2582 |
* A X
|
2583 |
*/
|
2584 |
a = s->coded_block[xy - 1 ];
|
2585 |
b = s->coded_block[xy - 1 - wrap];
|
2586 |
c = s->coded_block[xy - wrap]; |
2587 |
|
2588 |
if (b == c) {
|
2589 |
pred = a; |
2590 |
} else {
|
2591 |
pred = c; |
2592 |
} |
2593 |
|
2594 |
/* store value */
|
2595 |
*coded_block_ptr = &s->coded_block[xy]; |
2596 |
|
2597 |
return pred;
|
2598 |
} |
2599 |
|
2600 |
/**
|
2601 |
* Decode one AC coefficient
|
2602 |
* @param v The VC1 context
|
2603 |
* @param last Last coefficient
|
2604 |
* @param skip How much zero coefficients to skip
|
2605 |
* @param value Decoded AC coefficient value
|
2606 |
* @see 8.1.3.4
|
2607 |
*/
|
2608 |
static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset) |
2609 |
{ |
2610 |
GetBitContext *gb = &v->s.gb; |
2611 |
int index, escape, run = 0, level = 0, lst = 0; |
2612 |
|
2613 |
index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
|
2614 |
if (index != vc1_ac_sizes[codingset] - 1) { |
2615 |
run = vc1_index_decode_table[codingset][index][0];
|
2616 |
level = vc1_index_decode_table[codingset][index][1];
|
2617 |
lst = index >= vc1_last_decode_table[codingset]; |
2618 |
if(get_bits(gb, 1)) |
2619 |
level = -level; |
2620 |
} else {
|
2621 |
escape = decode210(gb); |
2622 |
if (escape != 2) { |
2623 |
index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
|
2624 |
run = vc1_index_decode_table[codingset][index][0];
|
2625 |
level = vc1_index_decode_table[codingset][index][1];
|
2626 |
lst = index >= vc1_last_decode_table[codingset]; |
2627 |
if(escape == 0) { |
2628 |
if(lst)
|
2629 |
level += vc1_last_delta_level_table[codingset][run]; |
2630 |
else
|
2631 |
level += vc1_delta_level_table[codingset][run]; |
2632 |
} else {
|
2633 |
if(lst)
|
2634 |
run += vc1_last_delta_run_table[codingset][level] + 1;
|
2635 |
else
|
2636 |
run += vc1_delta_run_table[codingset][level] + 1;
|
2637 |
} |
2638 |
if(get_bits(gb, 1)) |
2639 |
level = -level; |
2640 |
} else {
|
2641 |
int sign;
|
2642 |
lst = get_bits(gb, 1);
|
2643 |
if(v->s.esc3_level_length == 0) { |
2644 |
if(v->pq < 8 || v->dquantfrm) { // table 59 |
2645 |
v->s.esc3_level_length = get_bits(gb, 3);
|
2646 |
if(!v->s.esc3_level_length)
|
2647 |
v->s.esc3_level_length = get_bits(gb, 2) + 8; |
2648 |
} else { //table 60 |
2649 |
v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2; |
2650 |
} |
2651 |
v->s.esc3_run_length = 3 + get_bits(gb, 2); |
2652 |
} |
2653 |
run = get_bits(gb, v->s.esc3_run_length); |
2654 |
sign = get_bits(gb, 1);
|
2655 |
level = get_bits(gb, v->s.esc3_level_length); |
2656 |
if(sign)
|
2657 |
level = -level; |
2658 |
} |
2659 |
} |
2660 |
|
2661 |
*last = lst; |
2662 |
*skip = run; |
2663 |
*value = level; |
2664 |
} |
2665 |
|
2666 |
/** Decode intra block in intra frames - should be faster than decode_intra_block
|
2667 |
* @param v VC1Context
|
2668 |
* @param block block to decode
|
2669 |
* @param coded are AC coeffs present or not
|
2670 |
* @param codingset set of VLC to decode data
|
2671 |
*/
|
2672 |
static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset) |
2673 |
{ |
2674 |
GetBitContext *gb = &v->s.gb; |
2675 |
MpegEncContext *s = &v->s; |
2676 |
int dc_pred_dir = 0; /* Direction of the DC prediction used */ |
2677 |
int run_diff, i;
|
2678 |
int16_t *dc_val; |
2679 |
int16_t *ac_val, *ac_val2; |
2680 |
int dcdiff;
|
2681 |
|
2682 |
/* Get DC differential */
|
2683 |
if (n < 4) { |
2684 |
dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
|
2685 |
} else {
|
2686 |
dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
|
2687 |
} |
2688 |
if (dcdiff < 0){ |
2689 |
av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
|
2690 |
return -1; |
2691 |
} |
2692 |
if (dcdiff)
|
2693 |
{ |
2694 |
if (dcdiff == 119 /* ESC index value */) |
2695 |
{ |
2696 |
/* TODO: Optimize */
|
2697 |
if (v->pq == 1) dcdiff = get_bits(gb, 10); |
2698 |
else if (v->pq == 2) dcdiff = get_bits(gb, 9); |
2699 |
else dcdiff = get_bits(gb, 8); |
2700 |
} |
2701 |
else
|
2702 |
{ |
2703 |
if (v->pq == 1) |
2704 |
dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3; |
2705 |
else if (v->pq == 2) |
2706 |
dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1; |
2707 |
} |
2708 |
if (get_bits(gb, 1)) |
2709 |
dcdiff = -dcdiff; |
2710 |
} |
2711 |
|
2712 |
/* Prediction */
|
2713 |
dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir); |
2714 |
*dc_val = dcdiff; |
2715 |
|
2716 |
/* Store the quantized DC coeff, used for prediction */
|
2717 |
if (n < 4) { |
2718 |
block[0] = dcdiff * s->y_dc_scale;
|
2719 |
} else {
|
2720 |
block[0] = dcdiff * s->c_dc_scale;
|
2721 |
} |
2722 |
/* Skip ? */
|
2723 |
run_diff = 0;
|
2724 |
i = 0;
|
2725 |
if (!coded) {
|
2726 |
goto not_coded;
|
2727 |
} |
2728 |
|
2729 |
//AC Decoding
|
2730 |
i = 1;
|
2731 |
|
2732 |
{ |
2733 |
int last = 0, skip, value; |
2734 |
const int8_t *zz_table;
|
2735 |
int scale;
|
2736 |
int k;
|
2737 |
|
2738 |
scale = v->pq * 2 + v->halfpq;
|
2739 |
|
2740 |
if(v->s.ac_pred) {
|
2741 |
if(!dc_pred_dir)
|
2742 |
zz_table = vc1_horizontal_zz; |
2743 |
else
|
2744 |
zz_table = vc1_vertical_zz; |
2745 |
} else
|
2746 |
zz_table = vc1_normal_zz; |
2747 |
|
2748 |
ac_val = s->ac_val[0][0] + s->block_index[n] * 16; |
2749 |
ac_val2 = ac_val; |
2750 |
if(dc_pred_dir) //left |
2751 |
ac_val -= 16;
|
2752 |
else //top |
2753 |
ac_val -= 16 * s->block_wrap[n];
|
2754 |
|
2755 |
while (!last) {
|
2756 |
vc1_decode_ac_coeff(v, &last, &skip, &value, codingset); |
2757 |
i += skip; |
2758 |
if(i > 63) |
2759 |
break;
|
2760 |
block[zz_table[i++]] = value; |
2761 |
} |
2762 |
|
2763 |
/* apply AC prediction if needed */
|
2764 |
if(s->ac_pred) {
|
2765 |
if(dc_pred_dir) { //left |
2766 |
for(k = 1; k < 8; k++) |
2767 |
block[k << 3] += ac_val[k];
|
2768 |
} else { //top |
2769 |
for(k = 1; k < 8; k++) |
2770 |
block[k] += ac_val[k + 8];
|
2771 |
} |
2772 |
} |
2773 |
/* save AC coeffs for further prediction */
|
2774 |
for(k = 1; k < 8; k++) { |
2775 |
ac_val2[k] = block[k << 3];
|
2776 |
ac_val2[k + 8] = block[k];
|
2777 |
} |
2778 |
|
2779 |
/* scale AC coeffs */
|
2780 |
for(k = 1; k < 64; k++) |
2781 |
if(block[k]) {
|
2782 |
block[k] *= scale; |
2783 |
if(!v->pquantizer)
|
2784 |
block[k] += (block[k] < 0) ? -v->pq : v->pq;
|
2785 |
} |
2786 |
|
2787 |
if(s->ac_pred) i = 63; |
2788 |
} |
2789 |
|
2790 |
not_coded:
|
2791 |
if(!coded) {
|
2792 |
int k, scale;
|
2793 |
ac_val = s->ac_val[0][0] + s->block_index[n] * 16; |
2794 |
ac_val2 = ac_val; |
2795 |
|
2796 |
scale = v->pq * 2 + v->halfpq;
|
2797 |
memset(ac_val2, 0, 16 * 2); |
2798 |
if(dc_pred_dir) {//left |
2799 |
ac_val -= 16;
|
2800 |
if(s->ac_pred)
|
2801 |
memcpy(ac_val2, ac_val, 8 * 2); |
2802 |
} else {//top |
2803 |
ac_val -= 16 * s->block_wrap[n];
|
2804 |
if(s->ac_pred)
|
2805 |
memcpy(ac_val2 + 8, ac_val + 8, 8 * 2); |
2806 |
} |
2807 |
|
2808 |
/* apply AC prediction if needed */
|
2809 |
if(s->ac_pred) {
|
2810 |
if(dc_pred_dir) { //left |
2811 |
for(k = 1; k < 8; k++) { |
2812 |
block[k << 3] = ac_val[k] * scale;
|
2813 |
if(!v->pquantizer && block[k << 3]) |
2814 |
block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq; |
2815 |
} |
2816 |
} else { //top |
2817 |
for(k = 1; k < 8; k++) { |
2818 |
block[k] = ac_val[k + 8] * scale;
|
2819 |
if(!v->pquantizer && block[k])
|
2820 |
block[k] += (block[k] < 0) ? -v->pq : v->pq;
|
2821 |
} |
2822 |
} |
2823 |
i = 63;
|
2824 |
} |
2825 |
} |
2826 |
s->block_last_index[n] = i; |
2827 |
|
2828 |
return 0; |
2829 |
} |
2830 |
|
2831 |
/** Decode intra block in intra frames - should be faster than decode_intra_block
|
2832 |
* @param v VC1Context
|
2833 |
* @param block block to decode
|
2834 |
* @param coded are AC coeffs present or not
|
2835 |
* @param codingset set of VLC to decode data
|
2836 |
*/
|
2837 |
static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant) |
2838 |
{ |
2839 |
GetBitContext *gb = &v->s.gb; |
2840 |
MpegEncContext *s = &v->s; |
2841 |
int dc_pred_dir = 0; /* Direction of the DC prediction used */ |
2842 |
int run_diff, i;
|
2843 |
int16_t *dc_val; |
2844 |
int16_t *ac_val, *ac_val2; |
2845 |
int dcdiff;
|
2846 |
int a_avail = v->a_avail, c_avail = v->c_avail;
|
2847 |
int use_pred = s->ac_pred;
|
2848 |
int scale;
|
2849 |
int q1, q2 = 0; |
2850 |
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
2851 |
|
2852 |
/* Get DC differential */
|
2853 |
if (n < 4) { |
2854 |
dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
|
2855 |
} else {
|
2856 |
dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
|
2857 |
} |
2858 |
if (dcdiff < 0){ |
2859 |
av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
|
2860 |
return -1; |
2861 |
} |
2862 |
if (dcdiff)
|
2863 |
{ |
2864 |
if (dcdiff == 119 /* ESC index value */) |
2865 |
{ |
2866 |
/* TODO: Optimize */
|
2867 |
if (mquant == 1) dcdiff = get_bits(gb, 10); |
2868 |
else if (mquant == 2) dcdiff = get_bits(gb, 9); |
2869 |
else dcdiff = get_bits(gb, 8); |
2870 |
} |
2871 |
else
|
2872 |
{ |
2873 |
if (mquant == 1) |
2874 |
dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3; |
2875 |
else if (mquant == 2) |
2876 |
dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1; |
2877 |
} |
2878 |
if (get_bits(gb, 1)) |
2879 |
dcdiff = -dcdiff; |
2880 |
} |
2881 |
|
2882 |
/* Prediction */
|
2883 |
dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir); |
2884 |
*dc_val = dcdiff; |
2885 |
|
2886 |
/* Store the quantized DC coeff, used for prediction */
|
2887 |
if (n < 4) { |
2888 |
block[0] = dcdiff * s->y_dc_scale;
|
2889 |
} else {
|
2890 |
block[0] = dcdiff * s->c_dc_scale;
|
2891 |
} |
2892 |
/* Skip ? */
|
2893 |
run_diff = 0;
|
2894 |
i = 0;
|
2895 |
|
2896 |
//AC Decoding
|
2897 |
i = 1;
|
2898 |
|
2899 |
/* check if AC is needed at all and adjust direction if needed */
|
2900 |
if(!a_avail) dc_pred_dir = 1; |
2901 |
if(!c_avail) dc_pred_dir = 0; |
2902 |
if(!a_avail && !c_avail) use_pred = 0; |
2903 |
ac_val = s->ac_val[0][0] + s->block_index[n] * 16; |
2904 |
ac_val2 = ac_val; |
2905 |
|
2906 |
scale = mquant * 2 + v->halfpq;
|
2907 |
|
2908 |
if(dc_pred_dir) //left |
2909 |
ac_val -= 16;
|
2910 |
else //top |
2911 |
ac_val -= 16 * s->block_wrap[n];
|
2912 |
|
2913 |
q1 = s->current_picture.qscale_table[mb_pos]; |
2914 |
if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1]; |
2915 |
if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
|
2916 |
if(n && n<4) q2 = q1; |
2917 |
|
2918 |
if(coded) {
|
2919 |
int last = 0, skip, value; |
2920 |
const int8_t *zz_table;
|
2921 |
int k;
|
2922 |
|
2923 |
if(v->s.ac_pred) {
|
2924 |
if(!dc_pred_dir)
|
2925 |
zz_table = vc1_horizontal_zz; |
2926 |
else
|
2927 |
zz_table = vc1_vertical_zz; |
2928 |
} else
|
2929 |
zz_table = vc1_normal_zz; |
2930 |
|
2931 |
while (!last) {
|
2932 |
vc1_decode_ac_coeff(v, &last, &skip, &value, codingset); |
2933 |
i += skip; |
2934 |
if(i > 63) |
2935 |
break;
|
2936 |
block[zz_table[i++]] = value; |
2937 |
} |
2938 |
|
2939 |
/* apply AC prediction if needed */
|
2940 |
if(use_pred) {
|
2941 |
/* scale predictors if needed*/
|
2942 |
if(q2 && q1!=q2) {
|
2943 |
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; |
2944 |
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; |
2945 |
|
2946 |
if(dc_pred_dir) { //left |
2947 |
for(k = 1; k < 8; k++) |
2948 |
block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; |
2949 |
} else { //top |
2950 |
for(k = 1; k < 8; k++) |
2951 |
block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; |
2952 |
} |
2953 |
} else {
|
2954 |
if(dc_pred_dir) { //left |
2955 |
for(k = 1; k < 8; k++) |
2956 |
block[k << 3] += ac_val[k];
|
2957 |
} else { //top |
2958 |
for(k = 1; k < 8; k++) |
2959 |
block[k] += ac_val[k + 8];
|
2960 |
} |
2961 |
} |
2962 |
} |
2963 |
/* save AC coeffs for further prediction */
|
2964 |
for(k = 1; k < 8; k++) { |
2965 |
ac_val2[k] = block[k << 3];
|
2966 |
ac_val2[k + 8] = block[k];
|
2967 |
} |
2968 |
|
2969 |
/* scale AC coeffs */
|
2970 |
for(k = 1; k < 64; k++) |
2971 |
if(block[k]) {
|
2972 |
block[k] *= scale; |
2973 |
if(!v->pquantizer)
|
2974 |
block[k] += (block[k] < 0) ? -mquant : mquant;
|
2975 |
} |
2976 |
|
2977 |
if(use_pred) i = 63; |
2978 |
} else { // no AC coeffs |
2979 |
int k;
|
2980 |
|
2981 |
memset(ac_val2, 0, 16 * 2); |
2982 |
if(dc_pred_dir) {//left |
2983 |
if(use_pred) {
|
2984 |
memcpy(ac_val2, ac_val, 8 * 2); |
2985 |
if(q2 && q1!=q2) {
|
2986 |
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; |
2987 |
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; |
2988 |
for(k = 1; k < 8; k++) |
2989 |
ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; |
2990 |
} |
2991 |
} |
2992 |
} else {//top |
2993 |
if(use_pred) {
|
2994 |
memcpy(ac_val2 + 8, ac_val + 8, 8 * 2); |
2995 |
if(q2 && q1!=q2) {
|
2996 |
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; |
2997 |
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; |
2998 |
for(k = 1; k < 8; k++) |
2999 |
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; |
3000 |
} |
3001 |
} |
3002 |
} |
3003 |
|
3004 |
/* apply AC prediction if needed */
|
3005 |
if(use_pred) {
|
3006 |
if(dc_pred_dir) { //left |
3007 |
for(k = 1; k < 8; k++) { |
3008 |
block[k << 3] = ac_val2[k] * scale;
|
3009 |
if(!v->pquantizer && block[k << 3]) |
3010 |
block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant; |
3011 |
} |
3012 |
} else { //top |
3013 |
for(k = 1; k < 8; k++) { |
3014 |
block[k] = ac_val2[k + 8] * scale;
|
3015 |
if(!v->pquantizer && block[k])
|
3016 |
block[k] += (block[k] < 0) ? -mquant : mquant;
|
3017 |
} |
3018 |
} |
3019 |
i = 63;
|
3020 |
} |
3021 |
} |
3022 |
s->block_last_index[n] = i; |
3023 |
|
3024 |
return 0; |
3025 |
} |
3026 |
|
3027 |
/** Decode intra block in inter frames - more generic version than vc1_decode_i_block
|
3028 |
* @param v VC1Context
|
3029 |
* @param block block to decode
|
3030 |
* @param coded are AC coeffs present or not
|
3031 |
* @param mquant block quantizer
|
3032 |
* @param codingset set of VLC to decode data
|
3033 |
*/
|
3034 |
static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset) |
3035 |
{ |
3036 |
GetBitContext *gb = &v->s.gb; |
3037 |
MpegEncContext *s = &v->s; |
3038 |
int dc_pred_dir = 0; /* Direction of the DC prediction used */ |
3039 |
int run_diff, i;
|
3040 |
int16_t *dc_val; |
3041 |
int16_t *ac_val, *ac_val2; |
3042 |
int dcdiff;
|
3043 |
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
3044 |
int a_avail = v->a_avail, c_avail = v->c_avail;
|
3045 |
int use_pred = s->ac_pred;
|
3046 |
int scale;
|
3047 |
int q1, q2 = 0; |
3048 |
|
3049 |
/* XXX: Guard against dumb values of mquant */
|
3050 |
mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant ); |
3051 |
|
3052 |
/* Set DC scale - y and c use the same */
|
3053 |
s->y_dc_scale = s->y_dc_scale_table[mquant]; |
3054 |
s->c_dc_scale = s->c_dc_scale_table[mquant]; |
3055 |
|
3056 |
/* Get DC differential */
|
3057 |
if (n < 4) { |
3058 |
dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
|
3059 |
} else {
|
3060 |
dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
|
3061 |
} |
3062 |
if (dcdiff < 0){ |
3063 |
av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
|
3064 |
return -1; |
3065 |
} |
3066 |
if (dcdiff)
|
3067 |
{ |
3068 |
if (dcdiff == 119 /* ESC index value */) |
3069 |
{ |
3070 |
/* TODO: Optimize */
|
3071 |
if (mquant == 1) dcdiff = get_bits(gb, 10); |
3072 |
else if (mquant == 2) dcdiff = get_bits(gb, 9); |
3073 |
else dcdiff = get_bits(gb, 8); |
3074 |
} |
3075 |
else
|
3076 |
{ |
3077 |
if (mquant == 1) |
3078 |
dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3; |
3079 |
else if (mquant == 2) |
3080 |
dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1; |
3081 |
} |
3082 |
if (get_bits(gb, 1)) |
3083 |
dcdiff = -dcdiff; |
3084 |
} |
3085 |
|
3086 |
/* Prediction */
|
3087 |
dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir); |
3088 |
*dc_val = dcdiff; |
3089 |
|
3090 |
/* Store the quantized DC coeff, used for prediction */
|
3091 |
|
3092 |
if (n < 4) { |
3093 |
block[0] = dcdiff * s->y_dc_scale;
|
3094 |
} else {
|
3095 |
block[0] = dcdiff * s->c_dc_scale;
|
3096 |
} |
3097 |
/* Skip ? */
|
3098 |
run_diff = 0;
|
3099 |
i = 0;
|
3100 |
|
3101 |
//AC Decoding
|
3102 |
i = 1;
|
3103 |
|
3104 |
/* check if AC is needed at all and adjust direction if needed */
|
3105 |
if(!a_avail) dc_pred_dir = 1; |
3106 |
if(!c_avail) dc_pred_dir = 0; |
3107 |
if(!a_avail && !c_avail) use_pred = 0; |
3108 |
ac_val = s->ac_val[0][0] + s->block_index[n] * 16; |
3109 |
ac_val2 = ac_val; |
3110 |
|
3111 |
scale = mquant * 2 + v->halfpq;
|
3112 |
|
3113 |
if(dc_pred_dir) //left |
3114 |
ac_val -= 16;
|
3115 |
else //top |
3116 |
ac_val -= 16 * s->block_wrap[n];
|
3117 |
|
3118 |
q1 = s->current_picture.qscale_table[mb_pos]; |
3119 |
if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1]; |
3120 |
if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
|
3121 |
if(n && n<4) q2 = q1; |
3122 |
|
3123 |
if(coded) {
|
3124 |
int last = 0, skip, value; |
3125 |
const int8_t *zz_table;
|
3126 |
int k;
|
3127 |
|
3128 |
zz_table = vc1_simple_progressive_8x8_zz; |
3129 |
|
3130 |
while (!last) {
|
3131 |
vc1_decode_ac_coeff(v, &last, &skip, &value, codingset); |
3132 |
i += skip; |
3133 |
if(i > 63) |
3134 |
break;
|
3135 |
block[zz_table[i++]] = value; |
3136 |
} |
3137 |
|
3138 |
/* apply AC prediction if needed */
|
3139 |
if(use_pred) {
|
3140 |
/* scale predictors if needed*/
|
3141 |
if(q2 && q1!=q2) {
|
3142 |
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; |
3143 |
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; |
3144 |
|
3145 |
if(dc_pred_dir) { //left |
3146 |
for(k = 1; k < 8; k++) |
3147 |
block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; |
3148 |
} else { //top |
3149 |
for(k = 1; k < 8; k++) |
3150 |
block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; |
3151 |
} |
3152 |
} else {
|
3153 |
if(dc_pred_dir) { //left |
3154 |
for(k = 1; k < 8; k++) |
3155 |
block[k << 3] += ac_val[k];
|
3156 |
} else { //top |
3157 |
for(k = 1; k < 8; k++) |
3158 |
block[k] += ac_val[k + 8];
|
3159 |
} |
3160 |
} |
3161 |
} |
3162 |
/* save AC coeffs for further prediction */
|
3163 |
for(k = 1; k < 8; k++) { |
3164 |
ac_val2[k] = block[k << 3];
|
3165 |
ac_val2[k + 8] = block[k];
|
3166 |
} |
3167 |
|
3168 |
/* scale AC coeffs */
|
3169 |
for(k = 1; k < 64; k++) |
3170 |
if(block[k]) {
|
3171 |
block[k] *= scale; |
3172 |
if(!v->pquantizer)
|
3173 |
block[k] += (block[k] < 0) ? -mquant : mquant;
|
3174 |
} |
3175 |
|
3176 |
if(use_pred) i = 63; |
3177 |
} else { // no AC coeffs |
3178 |
int k;
|
3179 |
|
3180 |
memset(ac_val2, 0, 16 * 2); |
3181 |
if(dc_pred_dir) {//left |
3182 |
if(use_pred) {
|
3183 |
memcpy(ac_val2, ac_val, 8 * 2); |
3184 |
if(q2 && q1!=q2) {
|
3185 |
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; |
3186 |
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; |
3187 |
for(k = 1; k < 8; k++) |
3188 |
ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; |
3189 |
} |
3190 |
} |
3191 |
} else {//top |
3192 |
if(use_pred) {
|
3193 |
memcpy(ac_val2 + 8, ac_val + 8, 8 * 2); |
3194 |
if(q2 && q1!=q2) {
|
3195 |
q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; |
3196 |
q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; |
3197 |
for(k = 1; k < 8; k++) |
3198 |
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; |
3199 |
} |
3200 |
} |
3201 |
} |
3202 |
|
3203 |
/* apply AC prediction if needed */
|
3204 |
if(use_pred) {
|
3205 |
if(dc_pred_dir) { //left |
3206 |
for(k = 1; k < 8; k++) { |
3207 |
block[k << 3] = ac_val2[k] * scale;
|
3208 |
if(!v->pquantizer && block[k << 3]) |
3209 |
block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant; |
3210 |
} |
3211 |
} else { //top |
3212 |
for(k = 1; k < 8; k++) { |
3213 |
block[k] = ac_val2[k + 8] * scale;
|
3214 |
if(!v->pquantizer && block[k])
|
3215 |
block[k] += (block[k] < 0) ? -mquant : mquant;
|
3216 |
} |
3217 |
} |
3218 |
i = 63;
|
3219 |
} |
3220 |
} |
3221 |
s->block_last_index[n] = i; |
3222 |
|
3223 |
return 0; |
3224 |
} |
3225 |
|
3226 |
/** Decode P block
|
3227 |
*/
|
3228 |
static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block) |
3229 |
{ |
3230 |
MpegEncContext *s = &v->s; |
3231 |
GetBitContext *gb = &s->gb; |
3232 |
int i, j;
|
3233 |
int subblkpat = 0; |
3234 |
int scale, off, idx, last, skip, value;
|
3235 |
int ttblk = ttmb & 7; |
3236 |
|
3237 |
if(ttmb == -1) { |
3238 |
ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
|
3239 |
} |
3240 |
if(ttblk == TT_4X4) {
|
3241 |
subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1); |
3242 |
} |
3243 |
if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) { |
3244 |
subblkpat = decode012(gb); |
3245 |
if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits |
3246 |
if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
|
3247 |
if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
|
3248 |
} |
3249 |
scale = 2 * mquant + v->halfpq;
|
3250 |
|
3251 |
// convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
|
3252 |
if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
|
3253 |
subblkpat = 2 - (ttblk == TT_8X4_TOP);
|
3254 |
ttblk = TT_8X4; |
3255 |
} |
3256 |
if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
|
3257 |
subblkpat = 2 - (ttblk == TT_4X8_LEFT);
|
3258 |
ttblk = TT_4X8; |
3259 |
} |
3260 |
switch(ttblk) {
|
3261 |
case TT_8X8:
|
3262 |
i = 0;
|
3263 |
last = 0;
|
3264 |
while (!last) {
|
3265 |
vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); |
3266 |
i += skip; |
3267 |
if(i > 63) |
3268 |
break;
|
3269 |
idx = vc1_simple_progressive_8x8_zz[i++]; |
3270 |
block[idx] = value * scale; |
3271 |
if(!v->pquantizer)
|
3272 |
block[idx] += (block[idx] < 0) ? -mquant : mquant;
|
3273 |
} |
3274 |
s->dsp.vc1_inv_trans_8x8(block); |
3275 |
break;
|
3276 |
case TT_4X4:
|
3277 |
for(j = 0; j < 4; j++) { |
3278 |
last = subblkpat & (1 << (3 - j)); |
3279 |
i = 0;
|
3280 |
off = (j & 1) * 4 + (j & 2) * 16; |
3281 |
while (!last) {
|
3282 |
vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); |
3283 |
i += skip; |
3284 |
if(i > 15) |
3285 |
break;
|
3286 |
idx = vc1_simple_progressive_4x4_zz[i++]; |
3287 |
block[idx + off] = value * scale; |
3288 |
if(!v->pquantizer)
|
3289 |
block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
|
3290 |
} |
3291 |
if(!(subblkpat & (1 << (3 - j)))) |
3292 |
s->dsp.vc1_inv_trans_4x4(block, j); |
3293 |
} |
3294 |
break;
|
3295 |
case TT_8X4:
|
3296 |
for(j = 0; j < 2; j++) { |
3297 |
last = subblkpat & (1 << (1 - j)); |
3298 |
i = 0;
|
3299 |
off = j * 32;
|
3300 |
while (!last) {
|
3301 |
vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); |
3302 |
i += skip; |
3303 |
if(i > 31) |
3304 |
break;
|
3305 |
if(v->profile < PROFILE_ADVANCED)
|
3306 |
idx = vc1_simple_progressive_8x4_zz[i++]; |
3307 |
else
|
3308 |
idx = vc1_adv_progressive_8x4_zz[i++]; |
3309 |
block[idx + off] = value * scale; |
3310 |
if(!v->pquantizer)
|
3311 |
block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
|
3312 |
} |
3313 |
if(!(subblkpat & (1 << (1 - j)))) |
3314 |
s->dsp.vc1_inv_trans_8x4(block, j); |
3315 |
} |
3316 |
break;
|
3317 |
case TT_4X8:
|
3318 |
for(j = 0; j < 2; j++) { |
3319 |
last = subblkpat & (1 << (1 - j)); |
3320 |
i = 0;
|
3321 |
off = j * 4;
|
3322 |
while (!last) {
|
3323 |
vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2); |
3324 |
i += skip; |
3325 |
if(i > 31) |
3326 |
break;
|
3327 |
if(v->profile < PROFILE_ADVANCED)
|
3328 |
idx = vc1_simple_progressive_4x8_zz[i++]; |
3329 |
else
|
3330 |
idx = vc1_adv_progressive_4x8_zz[i++]; |
3331 |
block[idx + off] = value * scale; |
3332 |
if(!v->pquantizer)
|
3333 |
block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
|
3334 |
} |
3335 |
if(!(subblkpat & (1 << (1 - j)))) |
3336 |
s->dsp.vc1_inv_trans_4x8(block, j); |
3337 |
} |
3338 |
break;
|
3339 |
} |
3340 |
return 0; |
3341 |
} |
3342 |
|
3343 |
|
3344 |
/** Decode one P-frame MB (in Simple/Main profile)
|
3345 |
*/
|
3346 |
static int vc1_decode_p_mb(VC1Context *v) |
3347 |
{ |
3348 |
MpegEncContext *s = &v->s; |
3349 |
GetBitContext *gb = &s->gb; |
3350 |
int i, j;
|
3351 |
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
3352 |
int cbp; /* cbp decoding stuff */ |
3353 |
int mqdiff, mquant; /* MB quantization */ |
3354 |
int ttmb = v->ttfrm; /* MB Transform type */ |
3355 |
int status;
|
3356 |
|
3357 |
static const int size_table[6] = { 0, 2, 3, 4, 5, 8 }, |
3358 |
offset_table[6] = { 0, 1, 3, 7, 15, 31 }; |
3359 |
int mb_has_coeffs = 1; /* last_flag */ |
3360 |
int dmv_x, dmv_y; /* Differential MV components */ |
3361 |
int index, index1; /* LUT indices */ |
3362 |
int val, sign; /* temp values */ |
3363 |
int first_block = 1; |
3364 |
int dst_idx, off;
|
3365 |
int skipped, fourmv;
|
3366 |
|
3367 |
mquant = v->pq; /* Loosy initialization */
|
3368 |
|
3369 |
if (v->mv_type_is_raw)
|
3370 |
fourmv = get_bits1(gb); |
3371 |
else
|
3372 |
fourmv = v->mv_type_mb_plane[mb_pos]; |
3373 |
if (v->skip_is_raw)
|
3374 |
skipped = get_bits1(gb); |
3375 |
else
|
3376 |
skipped = v->s.mbskip_table[mb_pos]; |
3377 |
|
3378 |
s->dsp.clear_blocks(s->block[0]);
|
3379 |
|
3380 |
if (!fourmv) /* 1MV mode */ |
3381 |
{ |
3382 |
if (!skipped)
|
3383 |
{ |
3384 |
GET_MVDATA(dmv_x, dmv_y); |
3385 |
|
3386 |
if (s->mb_intra) {
|
3387 |
s->current_picture.motion_val[1][s->block_index[0]][0] = 0; |
3388 |
s->current_picture.motion_val[1][s->block_index[0]][1] = 0; |
3389 |
} |
3390 |
s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16; |
3391 |
vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]); |
3392 |
|
3393 |
/* FIXME Set DC val for inter block ? */
|
3394 |
if (s->mb_intra && !mb_has_coeffs)
|
3395 |
{ |
3396 |
GET_MQUANT(); |
3397 |
s->ac_pred = get_bits(gb, 1);
|
3398 |
cbp = 0;
|
3399 |
} |
3400 |
else if (mb_has_coeffs) |
3401 |
{ |
3402 |
if (s->mb_intra) s->ac_pred = get_bits(gb, 1); |
3403 |
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
|
3404 |
GET_MQUANT(); |
3405 |
} |
3406 |
else
|
3407 |
{ |
3408 |
mquant = v->pq; |
3409 |
cbp = 0;
|
3410 |
} |
3411 |
s->current_picture.qscale_table[mb_pos] = mquant; |
3412 |
|
3413 |
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
|
3414 |
ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, |
3415 |
VC1_TTMB_VLC_BITS, 2);
|
3416 |
if(!s->mb_intra) vc1_mc_1mv(v, 0); |
3417 |
dst_idx = 0;
|
3418 |
for (i=0; i<6; i++) |
3419 |
{ |
3420 |
s->dc_val[0][s->block_index[i]] = 0; |
3421 |
dst_idx += i >> 2;
|
3422 |
val = ((cbp >> (5 - i)) & 1); |
3423 |
off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize); |
3424 |
v->mb_type[0][s->block_index[i]] = s->mb_intra;
|
3425 |
if(s->mb_intra) {
|
3426 |
/* check if prediction blocks A and C are available */
|
3427 |
v->a_avail = v->c_avail = 0;
|
3428 |
if(i == 2 || i == 3 || !s->first_slice_line) |
3429 |
v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
|
3430 |
if(i == 1 || i == 3 || s->mb_x) |
3431 |
v->c_avail = v->mb_type[0][s->block_index[i] - 1]; |
3432 |
|
3433 |
vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
|
3434 |
if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue; |
3435 |
s->dsp.vc1_inv_trans_8x8(s->block[i]); |
3436 |
if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1; |
3437 |
for(j = 0; j < 64; j++) s->block[i][j] += 128; |
3438 |
s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); |
3439 |
if(v->pq >= 9 && v->overlap) { |
3440 |
if(v->c_avail)
|
3441 |
s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); |
3442 |
if(v->a_avail)
|
3443 |
s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); |
3444 |
} |
3445 |
} else if(val) { |
3446 |
vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block); |
3447 |
if(!v->ttmbf && ttmb < 8) ttmb = -1; |
3448 |
first_block = 0;
|
3449 |
if((i<4) || !(s->flags & CODEC_FLAG_GRAY)) |
3450 |
s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
|
3451 |
} |
3452 |
} |
3453 |
} |
3454 |
else //Skipped |
3455 |
{ |
3456 |
s->mb_intra = 0;
|
3457 |
for(i = 0; i < 6; i++) { |
3458 |
v->mb_type[0][s->block_index[i]] = 0; |
3459 |
s->dc_val[0][s->block_index[i]] = 0; |
3460 |
} |
3461 |
s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP; |
3462 |
s->current_picture.qscale_table[mb_pos] = 0;
|
3463 |
vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]); |
3464 |
vc1_mc_1mv(v, 0);
|
3465 |
return 0; |
3466 |
} |
3467 |
} //1MV mode
|
3468 |
else //4MV mode |
3469 |
{ |
3470 |
if (!skipped /* unskipped MB */) |
3471 |
{ |
3472 |
int intra_count = 0, coded_inter = 0; |
3473 |
int is_intra[6], is_coded[6]; |
3474 |
/* Get CBPCY */
|
3475 |
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
|
3476 |
for (i=0; i<6; i++) |
3477 |
{ |
3478 |
val = ((cbp >> (5 - i)) & 1); |
3479 |
s->dc_val[0][s->block_index[i]] = 0; |
3480 |
s->mb_intra = 0;
|
3481 |
if(i < 4) { |
3482 |
dmv_x = dmv_y = 0;
|
3483 |
s->mb_intra = 0;
|
3484 |
mb_has_coeffs = 0;
|
3485 |
if(val) {
|
3486 |
GET_MVDATA(dmv_x, dmv_y); |
3487 |
} |
3488 |
vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]); |
3489 |
if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
|
3490 |
intra_count += s->mb_intra; |
3491 |
is_intra[i] = s->mb_intra; |
3492 |
is_coded[i] = mb_has_coeffs; |
3493 |
} |
3494 |
if(i&4){ |
3495 |
is_intra[i] = (intra_count >= 3);
|
3496 |
is_coded[i] = val; |
3497 |
} |
3498 |
if(i == 4) vc1_mc_4mv_chroma(v); |
3499 |
v->mb_type[0][s->block_index[i]] = is_intra[i];
|
3500 |
if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
|
3501 |
} |
3502 |
// if there are no coded blocks then don't do anything more
|
3503 |
if(!intra_count && !coded_inter) return 0; |
3504 |
dst_idx = 0;
|
3505 |
GET_MQUANT(); |
3506 |
s->current_picture.qscale_table[mb_pos] = mquant; |
3507 |
/* test if block is intra and has pred */
|
3508 |
{ |
3509 |
int intrapred = 0; |
3510 |
for(i=0; i<6; i++) |
3511 |
if(is_intra[i]) {
|
3512 |
if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]]) |
3513 |
|| ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) { |
3514 |
intrapred = 1;
|
3515 |
break;
|
3516 |
} |
3517 |
} |
3518 |
if(intrapred)s->ac_pred = get_bits(gb, 1); |
3519 |
else s->ac_pred = 0; |
3520 |
} |
3521 |
if (!v->ttmbf && coded_inter)
|
3522 |
ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
|
3523 |
for (i=0; i<6; i++) |
3524 |
{ |
3525 |
dst_idx += i >> 2;
|
3526 |
off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize); |
3527 |
s->mb_intra = is_intra[i]; |
3528 |
if (is_intra[i]) {
|
3529 |
/* check if prediction blocks A and C are available */
|
3530 |
v->a_avail = v->c_avail = 0;
|
3531 |
if(i == 2 || i == 3 || !s->first_slice_line) |
3532 |
v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
|
3533 |
if(i == 1 || i == 3 || s->mb_x) |
3534 |
v->c_avail = v->mb_type[0][s->block_index[i] - 1]; |
3535 |
|
3536 |
vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
|
3537 |
if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue; |
3538 |
s->dsp.vc1_inv_trans_8x8(s->block[i]); |
3539 |
if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1; |
3540 |
for(j = 0; j < 64; j++) s->block[i][j] += 128; |
3541 |
s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
|
3542 |
if(v->pq >= 9 && v->overlap) { |
3543 |
if(v->c_avail)
|
3544 |
s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); |
3545 |
if(v->a_avail)
|
3546 |
s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); |
3547 |
} |
3548 |
} else if(is_coded[i]) { |
3549 |
status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block); |
3550 |
if(!v->ttmbf && ttmb < 8) ttmb = -1; |
3551 |
first_block = 0;
|
3552 |
if((i<4) || !(s->flags & CODEC_FLAG_GRAY)) |
3553 |
s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
|
3554 |
} |
3555 |
} |
3556 |
return status;
|
3557 |
} |
3558 |
else //Skipped MB |
3559 |
{ |
3560 |
s->mb_intra = 0;
|
3561 |
s->current_picture.qscale_table[mb_pos] = 0;
|
3562 |
for (i=0; i<6; i++) { |
3563 |
v->mb_type[0][s->block_index[i]] = 0; |
3564 |
s->dc_val[0][s->block_index[i]] = 0; |
3565 |
} |
3566 |
for (i=0; i<4; i++) |
3567 |
{ |
3568 |
vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]); |
3569 |
vc1_mc_4mv_luma(v, i); |
3570 |
} |
3571 |
vc1_mc_4mv_chroma(v); |
3572 |
s->current_picture.qscale_table[mb_pos] = 0;
|
3573 |
return 0; |
3574 |
} |
3575 |
} |
3576 |
|
3577 |
/* Should never happen */
|
3578 |
return -1; |
3579 |
} |
3580 |
|
3581 |
/** Decode one B-frame MB (in Main profile)
|
3582 |
*/
|
3583 |
static void vc1_decode_b_mb(VC1Context *v) |
3584 |
{ |
3585 |
MpegEncContext *s = &v->s; |
3586 |
GetBitContext *gb = &s->gb; |
3587 |
int i, j;
|
3588 |
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
3589 |
int cbp = 0; /* cbp decoding stuff */ |
3590 |
int mqdiff, mquant; /* MB quantization */ |
3591 |
int ttmb = v->ttfrm; /* MB Transform type */ |
3592 |
|
3593 |
static const int size_table[6] = { 0, 2, 3, 4, 5, 8 }, |
3594 |
offset_table[6] = { 0, 1, 3, 7, 15, 31 }; |
3595 |
int mb_has_coeffs = 0; /* last_flag */ |
3596 |
int index, index1; /* LUT indices */ |
3597 |
int val, sign; /* temp values */ |
3598 |
int first_block = 1; |
3599 |
int dst_idx, off;
|
3600 |
int skipped, direct;
|
3601 |
int dmv_x[2], dmv_y[2]; |
3602 |
int bmvtype = BMV_TYPE_BACKWARD;
|
3603 |
|
3604 |
mquant = v->pq; /* Loosy initialization */
|
3605 |
s->mb_intra = 0;
|
3606 |
|
3607 |
if (v->dmb_is_raw)
|
3608 |
direct = get_bits1(gb); |
3609 |
else
|
3610 |
direct = v->direct_mb_plane[mb_pos]; |
3611 |
if (v->skip_is_raw)
|
3612 |
skipped = get_bits1(gb); |
3613 |
else
|
3614 |
skipped = v->s.mbskip_table[mb_pos]; |
3615 |
|
3616 |
s->dsp.clear_blocks(s->block[0]);
|
3617 |
dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0; |
3618 |
for(i = 0; i < 6; i++) { |
3619 |
v->mb_type[0][s->block_index[i]] = 0; |
3620 |
s->dc_val[0][s->block_index[i]] = 0; |
3621 |
} |
3622 |
s->current_picture.qscale_table[mb_pos] = 0;
|
3623 |
|
3624 |
if (!direct) {
|
3625 |
if (!skipped) {
|
3626 |
GET_MVDATA(dmv_x[0], dmv_y[0]); |
3627 |
dmv_x[1] = dmv_x[0]; |
3628 |
dmv_y[1] = dmv_y[0]; |
3629 |
} |
3630 |
if(skipped || !s->mb_intra) {
|
3631 |
bmvtype = decode012(gb); |
3632 |
switch(bmvtype) {
|
3633 |
case 0: |
3634 |
bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
|
3635 |
break;
|
3636 |
case 1: |
3637 |
bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
|
3638 |
break;
|
3639 |
case 2: |
3640 |
bmvtype = BMV_TYPE_INTERPOLATED; |
3641 |
dmv_x[0] = dmv_y[0] = 0; |
3642 |
} |
3643 |
} |
3644 |
} |
3645 |
for(i = 0; i < 6; i++) |
3646 |
v->mb_type[0][s->block_index[i]] = s->mb_intra;
|
3647 |
|
3648 |
if (skipped) {
|
3649 |
if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
|
3650 |
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); |
3651 |
vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); |
3652 |
return;
|
3653 |
} |
3654 |
if (direct) {
|
3655 |
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
|
3656 |
GET_MQUANT(); |
3657 |
s->mb_intra = 0;
|
3658 |
mb_has_coeffs = 0;
|
3659 |
s->current_picture.qscale_table[mb_pos] = mquant; |
3660 |
if(!v->ttmbf)
|
3661 |
ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
|
3662 |
dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0; |
3663 |
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); |
3664 |
vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); |
3665 |
} else {
|
3666 |
if(!mb_has_coeffs && !s->mb_intra) {
|
3667 |
/* no coded blocks - effectively skipped */
|
3668 |
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); |
3669 |
vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); |
3670 |
return;
|
3671 |
} |
3672 |
if(s->mb_intra && !mb_has_coeffs) {
|
3673 |
GET_MQUANT(); |
3674 |
s->current_picture.qscale_table[mb_pos] = mquant; |
3675 |
s->ac_pred = get_bits1(gb); |
3676 |
cbp = 0;
|
3677 |
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); |
3678 |
} else {
|
3679 |
if(bmvtype == BMV_TYPE_INTERPOLATED) {
|
3680 |
GET_MVDATA(dmv_x[0], dmv_y[0]); |
3681 |
if(!mb_has_coeffs) {
|
3682 |
/* interpolated skipped block */
|
3683 |
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); |
3684 |
vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); |
3685 |
return;
|
3686 |
} |
3687 |
} |
3688 |
vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype); |
3689 |
if(!s->mb_intra) {
|
3690 |
vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); |
3691 |
} |
3692 |
if(s->mb_intra)
|
3693 |
s->ac_pred = get_bits1(gb); |
3694 |
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
|
3695 |
GET_MQUANT(); |
3696 |
s->current_picture.qscale_table[mb_pos] = mquant; |
3697 |
if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
|
3698 |
ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
|
3699 |
} |
3700 |
} |
3701 |
dst_idx = 0;
|
3702 |
for (i=0; i<6; i++) |
3703 |
{ |
3704 |
s->dc_val[0][s->block_index[i]] = 0; |
3705 |
dst_idx += i >> 2;
|
3706 |
val = ((cbp >> (5 - i)) & 1); |
3707 |
off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize); |
3708 |
v->mb_type[0][s->block_index[i]] = s->mb_intra;
|
3709 |
if(s->mb_intra) {
|
3710 |
/* check if prediction blocks A and C are available */
|
3711 |
v->a_avail = v->c_avail = 0;
|
3712 |
if(i == 2 || i == 3 || !s->first_slice_line) |
3713 |
v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
|
3714 |
if(i == 1 || i == 3 || s->mb_x) |
3715 |
v->c_avail = v->mb_type[0][s->block_index[i] - 1]; |
3716 |
|
3717 |
vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
|
3718 |
if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue; |
3719 |
s->dsp.vc1_inv_trans_8x8(s->block[i]); |
3720 |
if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1; |
3721 |
for(j = 0; j < 64; j++) s->block[i][j] += 128; |
3722 |
s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2)); |
3723 |
} else if(val) { |
3724 |
vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block); |
3725 |
if(!v->ttmbf && ttmb < 8) ttmb = -1; |
3726 |
first_block = 0;
|
3727 |
if((i<4) || !(s->flags & CODEC_FLAG_GRAY)) |
3728 |
s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
|
3729 |
} |
3730 |
} |
3731 |
} |
3732 |
|
3733 |
/** Decode blocks of I-frame
|
3734 |
*/
|
3735 |
static void vc1_decode_i_blocks(VC1Context *v) |
3736 |
{ |
3737 |
int k, j;
|
3738 |
MpegEncContext *s = &v->s; |
3739 |
int cbp, val;
|
3740 |
uint8_t *coded_val; |
3741 |
int mb_pos;
|
3742 |
|
3743 |
/* select codingmode used for VLC tables selection */
|
3744 |
switch(v->y_ac_table_index){
|
3745 |
case 0: |
3746 |
v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
|
3747 |
break;
|
3748 |
case 1: |
3749 |
v->codingset = CS_HIGH_MOT_INTRA; |
3750 |
break;
|
3751 |
case 2: |
3752 |
v->codingset = CS_MID_RATE_INTRA; |
3753 |
break;
|
3754 |
} |
3755 |
|
3756 |
switch(v->c_ac_table_index){
|
3757 |
case 0: |
3758 |
v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
|
3759 |
break;
|
3760 |
case 1: |
3761 |
v->codingset2 = CS_HIGH_MOT_INTER; |
3762 |
break;
|
3763 |
case 2: |
3764 |
v->codingset2 = CS_MID_RATE_INTER; |
3765 |
break;
|
3766 |
} |
3767 |
|
3768 |
/* Set DC scale - y and c use the same */
|
3769 |
s->y_dc_scale = s->y_dc_scale_table[v->pq]; |
3770 |
s->c_dc_scale = s->c_dc_scale_table[v->pq]; |
3771 |
|
3772 |
//do frame decode
|
3773 |
s->mb_x = s->mb_y = 0;
|
3774 |
s->mb_intra = 1;
|
3775 |
s->first_slice_line = 1;
|
3776 |
ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); |
3777 |
for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { |
3778 |
for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) { |
3779 |
ff_init_block_index(s); |
3780 |
ff_update_block_index(s); |
3781 |
s->dsp.clear_blocks(s->block[0]);
|
3782 |
mb_pos = s->mb_x + s->mb_y * s->mb_width; |
3783 |
s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA; |
3784 |
s->current_picture.qscale_table[mb_pos] = v->pq; |
3785 |
s->current_picture.motion_val[1][s->block_index[0]][0] = 0; |
3786 |
s->current_picture.motion_val[1][s->block_index[0]][1] = 0; |
3787 |
|
3788 |
// do actual MB decoding and displaying
|
3789 |
cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
|
3790 |
v->s.ac_pred = get_bits(&v->s.gb, 1);
|
3791 |
|
3792 |
for(k = 0; k < 6; k++) { |
3793 |
val = ((cbp >> (5 - k)) & 1); |
3794 |
|
3795 |
if (k < 4) { |
3796 |
int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
|
3797 |
val = val ^ pred; |
3798 |
*coded_val = val; |
3799 |
} |
3800 |
cbp |= val << (5 - k);
|
3801 |
|
3802 |
vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
|
3803 |
|
3804 |
s->dsp.vc1_inv_trans_8x8(s->block[k]); |
3805 |
if(v->pq >= 9 && v->overlap) { |
3806 |
for(j = 0; j < 64; j++) s->block[k][j] += 128; |
3807 |
} |
3808 |
} |
3809 |
|
3810 |
vc1_put_block(v, s->block); |
3811 |
if(v->pq >= 9 && v->overlap) { |
3812 |
if(s->mb_x) {
|
3813 |
s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
|
3814 |
s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize); |
3815 |
if(!(s->flags & CODEC_FLAG_GRAY)) {
|
3816 |
s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
|
3817 |
s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
|
3818 |
} |
3819 |
} |
3820 |
s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize); |
3821 |
s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize); |
3822 |
if(!s->first_slice_line) {
|
3823 |
s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
|
3824 |
s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize); |
3825 |
if(!(s->flags & CODEC_FLAG_GRAY)) {
|
3826 |
s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
|
3827 |
s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
|
3828 |
} |
3829 |
} |
3830 |
s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize); |
3831 |
s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize); |
3832 |
} |
3833 |
|
3834 |
if(get_bits_count(&s->gb) > v->bits) {
|
3835 |
av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
|
3836 |
return;
|
3837 |
} |
3838 |
} |
3839 |
ff_draw_horiz_band(s, s->mb_y * 16, 16); |
3840 |
s->first_slice_line = 0;
|
3841 |
} |
3842 |
} |
3843 |
|
3844 |
/** Decode blocks of I-frame for advanced profile
|
3845 |
*/
|
3846 |
static void vc1_decode_i_blocks_adv(VC1Context *v) |
3847 |
{ |
3848 |
int k, j;
|
3849 |
MpegEncContext *s = &v->s; |
3850 |
int cbp, val;
|
3851 |
uint8_t *coded_val; |
3852 |
int mb_pos;
|
3853 |
int mquant = v->pq;
|
3854 |
int mqdiff;
|
3855 |
int overlap;
|
3856 |
GetBitContext *gb = &s->gb; |
3857 |
|
3858 |
/* select codingmode used for VLC tables selection */
|
3859 |
switch(v->y_ac_table_index){
|
3860 |
case 0: |
3861 |
v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
|
3862 |
break;
|
3863 |
case 1: |
3864 |
v->codingset = CS_HIGH_MOT_INTRA; |
3865 |
break;
|
3866 |
case 2: |
3867 |
v->codingset = CS_MID_RATE_INTRA; |
3868 |
break;
|
3869 |
} |
3870 |
|
3871 |
switch(v->c_ac_table_index){
|
3872 |
case 0: |
3873 |
v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
|
3874 |
break;
|
3875 |
case 1: |
3876 |
v->codingset2 = CS_HIGH_MOT_INTER; |
3877 |
break;
|
3878 |
case 2: |
3879 |
v->codingset2 = CS_MID_RATE_INTER; |
3880 |
break;
|
3881 |
} |
3882 |
|
3883 |
//do frame decode
|
3884 |
s->mb_x = s->mb_y = 0;
|
3885 |
s->mb_intra = 1;
|
3886 |
s->first_slice_line = 1;
|
3887 |
ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); |
3888 |
for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { |
3889 |
for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) { |
3890 |
ff_init_block_index(s); |
3891 |
ff_update_block_index(s); |
3892 |
s->dsp.clear_blocks(s->block[0]);
|
3893 |
mb_pos = s->mb_x + s->mb_y * s->mb_stride; |
3894 |
s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA; |
3895 |
s->current_picture.motion_val[1][s->block_index[0]][0] = 0; |
3896 |
s->current_picture.motion_val[1][s->block_index[0]][1] = 0; |
3897 |
|
3898 |
// do actual MB decoding and displaying
|
3899 |
cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
|
3900 |
if(v->acpred_is_raw)
|
3901 |
v->s.ac_pred = get_bits(&v->s.gb, 1);
|
3902 |
else
|
3903 |
v->s.ac_pred = v->acpred_plane[mb_pos]; |
3904 |
|
3905 |
if(v->condover == CONDOVER_SELECT) {
|
3906 |
if(v->overflg_is_raw)
|
3907 |
overlap = get_bits(&v->s.gb, 1);
|
3908 |
else
|
3909 |
overlap = v->over_flags_plane[mb_pos]; |
3910 |
} else
|
3911 |
overlap = (v->condover == CONDOVER_ALL); |
3912 |
|
3913 |
GET_MQUANT(); |
3914 |
|
3915 |
s->current_picture.qscale_table[mb_pos] = mquant; |
3916 |
/* Set DC scale - y and c use the same */
|
3917 |
s->y_dc_scale = s->y_dc_scale_table[mquant]; |
3918 |
s->c_dc_scale = s->c_dc_scale_table[mquant]; |
3919 |
|
3920 |
for(k = 0; k < 6; k++) { |
3921 |
val = ((cbp >> (5 - k)) & 1); |
3922 |
|
3923 |
if (k < 4) { |
3924 |
int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
|
3925 |
val = val ^ pred; |
3926 |
*coded_val = val; |
3927 |
} |
3928 |
cbp |= val << (5 - k);
|
3929 |
|
3930 |
v->a_avail = !s->first_slice_line || (k==2 || k==3); |
3931 |
v->c_avail = !!s->mb_x || (k==1 || k==3); |
3932 |
|
3933 |
vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
|
3934 |
|
3935 |
s->dsp.vc1_inv_trans_8x8(s->block[k]); |
3936 |
for(j = 0; j < 64; j++) s->block[k][j] += 128; |
3937 |
} |
3938 |
|
3939 |
vc1_put_block(v, s->block); |
3940 |
if(overlap) {
|
3941 |
if(s->mb_x) {
|
3942 |
s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
|
3943 |
s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize); |
3944 |
if(!(s->flags & CODEC_FLAG_GRAY)) {
|
3945 |
s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
|
3946 |
s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
|
3947 |
} |
3948 |
} |
3949 |
s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize); |
3950 |
s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize); |
3951 |
if(!s->first_slice_line) {
|
3952 |
s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
|
3953 |
s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize); |
3954 |
if(!(s->flags & CODEC_FLAG_GRAY)) {
|
3955 |
s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
|
3956 |
s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
|
3957 |
} |
3958 |
} |
3959 |
s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize); |
3960 |
s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize); |
3961 |
} |
3962 |
|
3963 |
if(get_bits_count(&s->gb) > v->bits) {
|
3964 |
av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
|
3965 |
return;
|
3966 |
} |
3967 |
} |
3968 |
ff_draw_horiz_band(s, s->mb_y * 16, 16); |
3969 |
s->first_slice_line = 0;
|
3970 |
} |
3971 |
} |
3972 |
|
3973 |
static void vc1_decode_p_blocks(VC1Context *v) |
3974 |
{ |
3975 |
MpegEncContext *s = &v->s; |
3976 |
|
3977 |
/* select codingmode used for VLC tables selection */
|
3978 |
switch(v->c_ac_table_index){
|
3979 |
case 0: |
3980 |
v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
|
3981 |
break;
|
3982 |
case 1: |
3983 |
v->codingset = CS_HIGH_MOT_INTRA; |
3984 |
break;
|
3985 |
case 2: |
3986 |
v->codingset = CS_MID_RATE_INTRA; |
3987 |
break;
|
3988 |
} |
3989 |
|
3990 |
switch(v->c_ac_table_index){
|
3991 |
case 0: |
3992 |
v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
|
3993 |
break;
|
3994 |
case 1: |
3995 |
v->codingset2 = CS_HIGH_MOT_INTER; |
3996 |
break;
|
3997 |
case 2: |
3998 |
v->codingset2 = CS_MID_RATE_INTER; |
3999 |
break;
|
4000 |
} |
4001 |
|
4002 |
ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); |
4003 |
s->first_slice_line = 1;
|
4004 |
for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { |
4005 |
for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) { |
4006 |
ff_init_block_index(s); |
4007 |
ff_update_block_index(s); |
4008 |
s->dsp.clear_blocks(s->block[0]);
|
4009 |
|
4010 |
vc1_decode_p_mb(v); |
4011 |
if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) { |
4012 |
av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
|
4013 |
return;
|
4014 |
} |
4015 |
} |
4016 |
ff_draw_horiz_band(s, s->mb_y * 16, 16); |
4017 |
s->first_slice_line = 0;
|
4018 |
} |
4019 |
} |
4020 |
|
4021 |
static void vc1_decode_b_blocks(VC1Context *v) |
4022 |
{ |
4023 |
MpegEncContext *s = &v->s; |
4024 |
|
4025 |
/* select codingmode used for VLC tables selection */
|
4026 |
switch(v->c_ac_table_index){
|
4027 |
case 0: |
4028 |
v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
|
4029 |
break;
|
4030 |
case 1: |
4031 |
v->codingset = CS_HIGH_MOT_INTRA; |
4032 |
break;
|
4033 |
case 2: |
4034 |
v->codingset = CS_MID_RATE_INTRA; |
4035 |
break;
|
4036 |
} |
4037 |
|
4038 |
switch(v->c_ac_table_index){
|
4039 |
case 0: |
4040 |
v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
|
4041 |
break;
|
4042 |
case 1: |
4043 |
v->codingset2 = CS_HIGH_MOT_INTER; |
4044 |
break;
|
4045 |
case 2: |
4046 |
v->codingset2 = CS_MID_RATE_INTER; |
4047 |
break;
|
4048 |
} |
4049 |
|
4050 |
ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); |
4051 |
s->first_slice_line = 1;
|
4052 |
for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { |
4053 |
for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) { |
4054 |
ff_init_block_index(s); |
4055 |
ff_update_block_index(s); |
4056 |
s->dsp.clear_blocks(s->block[0]);
|
4057 |
|
4058 |
vc1_decode_b_mb(v); |
4059 |
if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) { |
4060 |
av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
|
4061 |
return;
|
4062 |
} |
4063 |
} |
4064 |
ff_draw_horiz_band(s, s->mb_y * 16, 16); |
4065 |
s->first_slice_line = 0;
|
4066 |
} |
4067 |
} |
4068 |
|
4069 |
static void vc1_decode_skip_blocks(VC1Context *v) |
4070 |
{ |
4071 |
MpegEncContext *s = &v->s; |
4072 |
|
4073 |
ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); |
4074 |
s->first_slice_line = 1;
|
4075 |
for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { |
4076 |
s->mb_x = 0;
|
4077 |
ff_init_block_index(s); |
4078 |
ff_update_block_index(s); |
4079 |
memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16); |
4080 |
memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); |
4081 |
memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); |
4082 |
ff_draw_horiz_band(s, s->mb_y * 16, 16); |
4083 |
s->first_slice_line = 0;
|
4084 |
} |
4085 |
s->pict_type = P_TYPE; |
4086 |
} |
4087 |
|
4088 |
static void vc1_decode_blocks(VC1Context *v) |
4089 |
{ |
4090 |
|
4091 |
v->s.esc3_level_length = 0;
|
4092 |
|
4093 |
switch(v->s.pict_type) {
|
4094 |
case I_TYPE:
|
4095 |
if(v->profile == PROFILE_ADVANCED)
|
4096 |
vc1_decode_i_blocks_adv(v); |
4097 |
else
|
4098 |
vc1_decode_i_blocks(v); |
4099 |
break;
|
4100 |
case P_TYPE:
|
4101 |
if(v->p_frame_skipped)
|
4102 |
vc1_decode_skip_blocks(v); |
4103 |
else
|
4104 |
vc1_decode_p_blocks(v); |
4105 |
break;
|
4106 |
case B_TYPE:
|
4107 |
if(v->bi_type){
|
4108 |
if(v->profile == PROFILE_ADVANCED)
|
4109 |
vc1_decode_i_blocks_adv(v); |
4110 |
else
|
4111 |
vc1_decode_i_blocks(v); |
4112 |
}else
|
4113 |
vc1_decode_b_blocks(v); |
4114 |
break;
|
4115 |
} |
4116 |
} |
4117 |
|
4118 |
#define IS_MARKER(x) (((x) & ~0xFF) == VC1_CODE_RES0) |
4119 |
|
4120 |
/** Find VC-1 marker in buffer
|
4121 |
* @return position where next marker starts or end of buffer if no marker found
|
4122 |
*/
|
4123 |
static av_always_inline uint8_t* find_next_marker(uint8_t *src, uint8_t *end)
|
4124 |
{ |
4125 |
uint32_t mrk = 0xFFFFFFFF;
|
4126 |
|
4127 |
if(end-src < 4) return end; |
4128 |
while(src < end){
|
4129 |
mrk = (mrk << 8) | *src++;
|
4130 |
if(IS_MARKER(mrk))
|
4131 |
return src-4; |
4132 |
} |
4133 |
return end;
|
4134 |
} |
4135 |
|
4136 |
static av_always_inline int vc1_unescape_buffer(uint8_t *src, int size, uint8_t *dst) |
4137 |
{ |
4138 |
int dsize = 0, i; |
4139 |
|
4140 |
if(size < 4){ |
4141 |
for(dsize = 0; dsize < size; dsize++) *dst++ = *src++; |
4142 |
return size;
|
4143 |
} |
4144 |
for(i = 0; i < size; i++, src++) { |
4145 |
if(src[0] == 3 && i >= 2 && !src[-1] && !src[-2] && i < size-1 && src[1] < 4) { |
4146 |
dst[dsize++] = src[1];
|
4147 |
src++; |
4148 |
i++; |
4149 |
} else
|
4150 |
dst[dsize++] = *src; |
4151 |
} |
4152 |
return dsize;
|
4153 |
} |
4154 |
|
4155 |
/** Initialize a VC1/WMV3 decoder
|
4156 |
* @todo TODO: Handle VC-1 IDUs (Transport level?)
|
4157 |
* @todo TODO: Decypher remaining bits in extra_data
|
4158 |
*/
|
4159 |
static int vc1_decode_init(AVCodecContext *avctx) |
4160 |
{ |
4161 |
VC1Context *v = avctx->priv_data; |
4162 |
MpegEncContext *s = &v->s; |
4163 |
GetBitContext gb; |
4164 |
|
4165 |
if (!avctx->extradata_size || !avctx->extradata) return -1; |
4166 |
if (!(avctx->flags & CODEC_FLAG_GRAY))
|
4167 |
avctx->pix_fmt = PIX_FMT_YUV420P; |
4168 |
else
|
4169 |
avctx->pix_fmt = PIX_FMT_GRAY8; |
4170 |
v->s.avctx = avctx; |
4171 |
avctx->flags |= CODEC_FLAG_EMU_EDGE; |
4172 |
v->s.flags |= CODEC_FLAG_EMU_EDGE; |
4173 |
|
4174 |
if(ff_h263_decode_init(avctx) < 0) |
4175 |
return -1; |
4176 |
if (vc1_init_common(v) < 0) return -1; |
4177 |
|
4178 |
avctx->coded_width = avctx->width; |
4179 |
avctx->coded_height = avctx->height; |
4180 |
if (avctx->codec_id == CODEC_ID_WMV3)
|
4181 |
{ |
4182 |
int count = 0; |
4183 |
|
4184 |
// looks like WMV3 has a sequence header stored in the extradata
|
4185 |
// advanced sequence header may be before the first frame
|
4186 |
// the last byte of the extradata is a version number, 1 for the
|
4187 |
// samples we can decode
|
4188 |
|
4189 |
init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
|
4190 |
|
4191 |
if (decode_sequence_header(avctx, &gb) < 0) |
4192 |
return -1; |
4193 |
|
4194 |
count = avctx->extradata_size*8 - get_bits_count(&gb);
|
4195 |
if (count>0) |
4196 |
{ |
4197 |
av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
|
4198 |
count, get_bits(&gb, count)); |
4199 |
} |
4200 |
else if (count < 0) |
4201 |
{ |
4202 |
av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
|
4203 |
} |
4204 |
} else { // VC1/WVC1 |
4205 |
uint8_t *start = avctx->extradata, *end = avctx->extradata + avctx->extradata_size; |
4206 |
uint8_t *next; int size, buf2_size;
|
4207 |
uint8_t *buf2 = NULL;
|
4208 |
int seq_inited = 0, ep_inited = 0; |
4209 |
|
4210 |
if(avctx->extradata_size < 16) { |
4211 |
av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
|
4212 |
return -1; |
4213 |
} |
4214 |
|
4215 |
buf2 = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); |
4216 |
if(start[0]) start++; // in WVC1 extradata first byte is its size |
4217 |
next = start; |
4218 |
for(; next < end; start = next){
|
4219 |
next = find_next_marker(start + 4, end);
|
4220 |
size = next - start - 4;
|
4221 |
if(size <= 0) continue; |
4222 |
buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
|
4223 |
init_get_bits(&gb, buf2, buf2_size * 8);
|
4224 |
switch(AV_RB32(start)){
|
4225 |
case VC1_CODE_SEQHDR:
|
4226 |
if(decode_sequence_header(avctx, &gb) < 0){ |
4227 |
av_free(buf2); |
4228 |
return -1; |
4229 |
} |
4230 |
seq_inited = 1;
|
4231 |
break;
|
4232 |
case VC1_CODE_ENTRYPOINT:
|
4233 |
if(decode_entry_point(avctx, &gb) < 0){ |
4234 |
av_free(buf2); |
4235 |
return -1; |
4236 |
} |
4237 |
ep_inited = 1;
|
4238 |
break;
|
4239 |
} |
4240 |
} |
4241 |
av_free(buf2); |
4242 |
if(!seq_inited || !ep_inited){
|
4243 |
av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
|
4244 |
return -1; |
4245 |
} |
4246 |
} |
4247 |
avctx->has_b_frames= !!(avctx->max_b_frames); |
4248 |
s->low_delay = !avctx->has_b_frames; |
4249 |
|
4250 |
s->mb_width = (avctx->coded_width+15)>>4; |
4251 |
s->mb_height = (avctx->coded_height+15)>>4; |
4252 |
|
4253 |
/* Allocate mb bitplanes */
|
4254 |
v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height); |
4255 |
v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height); |
4256 |
v->acpred_plane = av_malloc(s->mb_stride * s->mb_height); |
4257 |
v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height); |
4258 |
|
4259 |
/* allocate block type info in that way so it could be used with s->block_index[] */
|
4260 |
v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2); |
4261 |
v->mb_type[0] = v->mb_type_base + s->b8_stride + 1; |
4262 |
v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1; |
4263 |
v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1); |
4264 |
|
4265 |
/* Init coded blocks info */
|
4266 |
if (v->profile == PROFILE_ADVANCED)
|
4267 |
{ |
4268 |
// if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
|
4269 |
// return -1;
|
4270 |
// if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
|
4271 |
// return -1;
|
4272 |
} |
4273 |
|
4274 |
return 0; |
4275 |
} |
4276 |
|
4277 |
|
4278 |
/** Decode a VC1/WMV3 frame
|
4279 |
* @todo TODO: Handle VC-1 IDUs (Transport level?)
|
4280 |
*/
|
4281 |
static int vc1_decode_frame(AVCodecContext *avctx, |
4282 |
void *data, int *data_size, |
4283 |
uint8_t *buf, int buf_size)
|
4284 |
{ |
4285 |
VC1Context *v = avctx->priv_data; |
4286 |
MpegEncContext *s = &v->s; |
4287 |
AVFrame *pict = data; |
4288 |
uint8_t *buf2 = NULL;
|
4289 |
|
4290 |
/* no supplementary picture */
|
4291 |
if (buf_size == 0) { |
4292 |
/* special case for last picture */
|
4293 |
if (s->low_delay==0 && s->next_picture_ptr) { |
4294 |
*pict= *(AVFrame*)s->next_picture_ptr; |
4295 |
s->next_picture_ptr= NULL;
|
4296 |
|
4297 |
*data_size = sizeof(AVFrame);
|
4298 |
} |
4299 |
|
4300 |
return 0; |
4301 |
} |
4302 |
|
4303 |
//we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
|
4304 |
if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ |
4305 |
int i= ff_find_unused_picture(s, 0); |
4306 |
s->current_picture_ptr= &s->picture[i]; |
4307 |
} |
4308 |
|
4309 |
//for advanced profile we may need to parse and unescape data
|
4310 |
if (avctx->codec_id == CODEC_ID_VC1) {
|
4311 |
int buf_size2 = 0; |
4312 |
buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); |
4313 |
|
4314 |
if(IS_MARKER(AV_RB32(buf))){ /* frame starts with marker and needs to be parsed */ |
4315 |
uint8_t *dst = buf2, *start, *end, *next; |
4316 |
int size;
|
4317 |
|
4318 |
next = buf; |
4319 |
for(start = buf, end = buf + buf_size; next < end; start = next){
|
4320 |
next = find_next_marker(start + 4, end);
|
4321 |
size = next - start - 4;
|
4322 |
if(size <= 0) continue; |
4323 |
switch(AV_RB32(start)){
|
4324 |
case VC1_CODE_FRAME:
|
4325 |
buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
|
4326 |
break;
|
4327 |
case VC1_CODE_ENTRYPOINT: /* it should be before frame data */ |
4328 |
buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
|
4329 |
init_get_bits(&s->gb, buf2, buf_size2*8);
|
4330 |
decode_entry_point(avctx, &s->gb); |
4331 |
break;
|
4332 |
case VC1_CODE_SLICE:
|
4333 |
av_log(avctx, AV_LOG_ERROR, "Sliced decoding is not implemented (yet)\n");
|
4334 |
av_free(buf2); |
4335 |
return -1; |
4336 |
} |
4337 |
} |
4338 |
}else if(v->interlace && ((buf[0] & 0xC0) == 0xC0)){ /* WVC1 interlaced stores both fields divided by marker */ |
4339 |
uint8_t *divider; |
4340 |
|
4341 |
divider = find_next_marker(buf, buf + buf_size); |
4342 |
if((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD){
|
4343 |
av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
|
4344 |
return -1; |
4345 |
} |
4346 |
|
4347 |
buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2); |
4348 |
// TODO
|
4349 |
av_free(buf2);return -1; |
4350 |
}else{
|
4351 |
buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2); |
4352 |
} |
4353 |
init_get_bits(&s->gb, buf2, buf_size2*8);
|
4354 |
} else
|
4355 |
init_get_bits(&s->gb, buf, buf_size*8);
|
4356 |
// do parse frame header
|
4357 |
if(v->profile < PROFILE_ADVANCED) {
|
4358 |
if(vc1_parse_frame_header(v, &s->gb) == -1) { |
4359 |
av_free(buf2); |
4360 |
return -1; |
4361 |
} |
4362 |
} else {
|
4363 |
if(vc1_parse_frame_header_adv(v, &s->gb) == -1) { |
4364 |
av_free(buf2); |
4365 |
return -1; |
4366 |
} |
4367 |
} |
4368 |
|
4369 |
if(s->pict_type != I_TYPE && !v->res_rtm_flag){
|
4370 |
av_free(buf2); |
4371 |
return -1; |
4372 |
} |
4373 |
|
4374 |
// for hurry_up==5
|
4375 |
s->current_picture.pict_type= s->pict_type; |
4376 |
s->current_picture.key_frame= s->pict_type == I_TYPE; |
4377 |
|
4378 |
/* skip B-frames if we don't have reference frames */
|
4379 |
if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){ |
4380 |
av_free(buf2); |
4381 |
return -1;//buf_size; |
4382 |
} |
4383 |
/* skip b frames if we are in a hurry */
|
4384 |
if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size; |
4385 |
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
|
4386 |
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE) |
4387 |
|| avctx->skip_frame >= AVDISCARD_ALL) { |
4388 |
av_free(buf2); |
4389 |
return buf_size;
|
4390 |
} |
4391 |
/* skip everything if we are in a hurry>=5 */
|
4392 |
if(avctx->hurry_up>=5) { |
4393 |
av_free(buf2); |
4394 |
return -1;//buf_size; |
4395 |
} |
4396 |
|
4397 |
if(s->next_p_frame_damaged){
|
4398 |
if(s->pict_type==B_TYPE)
|
4399 |
return buf_size;
|
4400 |
else
|
4401 |
s->next_p_frame_damaged=0;
|
4402 |
} |
4403 |
|
4404 |
if(MPV_frame_start(s, avctx) < 0) { |
4405 |
av_free(buf2); |
4406 |
return -1; |
4407 |
} |
4408 |
|
4409 |
ff_er_frame_start(s); |
4410 |
|
4411 |
v->bits = buf_size * 8;
|
4412 |
vc1_decode_blocks(v); |
4413 |
//av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
|
4414 |
// if(get_bits_count(&s->gb) > buf_size * 8)
|
4415 |
// return -1;
|
4416 |
ff_er_frame_end(s); |
4417 |
|
4418 |
MPV_frame_end(s); |
4419 |
|
4420 |
assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); |
4421 |
assert(s->current_picture.pict_type == s->pict_type); |
4422 |
if (s->pict_type == B_TYPE || s->low_delay) {
|
4423 |
*pict= *(AVFrame*)s->current_picture_ptr; |
4424 |
} else if (s->last_picture_ptr != NULL) { |
4425 |
*pict= *(AVFrame*)s->last_picture_ptr; |
4426 |
} |
4427 |
|
4428 |
if(s->last_picture_ptr || s->low_delay){
|
4429 |
*data_size = sizeof(AVFrame);
|
4430 |
ff_print_debug_info(s, pict); |
4431 |
} |
4432 |
|
4433 |
/* Return the Picture timestamp as the frame number */
|
4434 |
/* we substract 1 because it is added on utils.c */
|
4435 |
avctx->frame_number = s->picture_number - 1;
|
4436 |
|
4437 |
av_free(buf2); |
4438 |
return buf_size;
|
4439 |
} |
4440 |
|
4441 |
|
4442 |
/** Close a VC1/WMV3 decoder
|
4443 |
* @warning Initial try at using MpegEncContext stuff
|
4444 |
*/
|
4445 |
static int vc1_decode_end(AVCodecContext *avctx) |
4446 |
{ |
4447 |
VC1Context *v = avctx->priv_data; |
4448 |
|
4449 |
av_freep(&v->hrd_rate); |
4450 |
av_freep(&v->hrd_buffer); |
4451 |
MPV_common_end(&v->s); |
4452 |
av_freep(&v->mv_type_mb_plane); |
4453 |
av_freep(&v->direct_mb_plane); |
4454 |
av_freep(&v->acpred_plane); |
4455 |
av_freep(&v->over_flags_plane); |
4456 |
av_freep(&v->mb_type_base); |
4457 |
return 0; |
4458 |
} |
4459 |
|
4460 |
|
4461 |
AVCodec vc1_decoder = { |
4462 |
"vc1",
|
4463 |
CODEC_TYPE_VIDEO, |
4464 |
CODEC_ID_VC1, |
4465 |
sizeof(VC1Context),
|
4466 |
vc1_decode_init, |
4467 |
NULL,
|
4468 |
vc1_decode_end, |
4469 |
vc1_decode_frame, |
4470 |
CODEC_CAP_DELAY, |
4471 |
NULL
|
4472 |
}; |
4473 |
|
4474 |
AVCodec wmv3_decoder = { |
4475 |
"wmv3",
|
4476 |
CODEC_TYPE_VIDEO, |
4477 |
CODEC_ID_WMV3, |
4478 |
sizeof(VC1Context),
|
4479 |
vc1_decode_init, |
4480 |
NULL,
|
4481 |
vc1_decode_end, |
4482 |
vc1_decode_frame, |
4483 |
CODEC_CAP_DELAY, |
4484 |
NULL
|
4485 |
}; |
4486 |
|
4487 |
#ifdef CONFIG_VC1_PARSER
|
4488 |
/**
|
4489 |
* finds the end of the current frame in the bitstream.
|
4490 |
* @return the position of the first byte of the next frame, or -1
|
4491 |
*/
|
4492 |
static int vc1_find_frame_end(ParseContext *pc, const uint8_t *buf, |
4493 |
int buf_size) {
|
4494 |
int pic_found, i;
|
4495 |
uint32_t state; |
4496 |
|
4497 |
pic_found= pc->frame_start_found; |
4498 |
state= pc->state; |
4499 |
|
4500 |
i=0;
|
4501 |
if(!pic_found){
|
4502 |
for(i=0; i<buf_size; i++){ |
4503 |
state= (state<<8) | buf[i];
|
4504 |
if(state == VC1_CODE_FRAME || state == VC1_CODE_FIELD){
|
4505 |
i++; |
4506 |
pic_found=1;
|
4507 |
break;
|
4508 |
} |
4509 |
} |
4510 |
} |
4511 |
|
4512 |
if(pic_found){
|
4513 |
/* EOF considered as end of frame */
|
4514 |
if (buf_size == 0) |
4515 |
return 0; |
4516 |
for(; i<buf_size; i++){
|
4517 |
state= (state<<8) | buf[i];
|
4518 |
if(IS_MARKER(state) && state != VC1_CODE_FIELD && state != VC1_CODE_SLICE){
|
4519 |
pc->frame_start_found=0;
|
4520 |
pc->state=-1;
|
4521 |
return i-3; |
4522 |
} |
4523 |
} |
4524 |
} |
4525 |
pc->frame_start_found= pic_found; |
4526 |
pc->state= state; |
4527 |
return END_NOT_FOUND;
|
4528 |
} |
4529 |
|
4530 |
static int vc1_parse(AVCodecParserContext *s, |
4531 |
AVCodecContext *avctx, |
4532 |
uint8_t **poutbuf, int *poutbuf_size,
|
4533 |
const uint8_t *buf, int buf_size) |
4534 |
{ |
4535 |
ParseContext *pc = s->priv_data; |
4536 |
int next;
|
4537 |
|
4538 |
if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){
|
4539 |
next= buf_size; |
4540 |
}else{
|
4541 |
next= vc1_find_frame_end(pc, buf, buf_size); |
4542 |
|
4543 |
if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) { |
4544 |
*poutbuf = NULL;
|
4545 |
*poutbuf_size = 0;
|
4546 |
return buf_size;
|
4547 |
} |
4548 |
} |
4549 |
*poutbuf = (uint8_t *)buf; |
4550 |
*poutbuf_size = buf_size; |
4551 |
return next;
|
4552 |
} |
4553 |
|
4554 |
int vc1_split(AVCodecContext *avctx,
|
4555 |
const uint8_t *buf, int buf_size) |
4556 |
{ |
4557 |
int i;
|
4558 |
uint32_t state= -1;
|
4559 |
|
4560 |
for(i=0; i<buf_size; i++){ |
4561 |
state= (state<<8) | buf[i];
|
4562 |
if(IS_MARKER(state) && state != VC1_CODE_SEQHDR && state != VC1_CODE_ENTRYPOINT)
|
4563 |
return i-3; |
4564 |
} |
4565 |
return 0; |
4566 |
} |
4567 |
|
4568 |
AVCodecParser vc1_parser = { |
4569 |
{ CODEC_ID_VC1 }, |
4570 |
sizeof(ParseContext1),
|
4571 |
NULL,
|
4572 |
vc1_parse, |
4573 |
ff_parse1_close, |
4574 |
vc1_split, |
4575 |
}; |
4576 |
#endif /* CONFIG_VC1_PARSER */ |