ffmpeg / libavcodec / mpeg12.c @ d375c104
History | View | Annotate | Download (90.5 KB)
1 |
/*
|
---|---|
2 |
* MPEG-1/2 decoder
|
3 |
* Copyright (c) 2000,2001 Fabrice Bellard
|
4 |
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
|
5 |
*
|
6 |
* This file is part of FFmpeg.
|
7 |
*
|
8 |
* FFmpeg is free software; you can redistribute it and/or
|
9 |
* modify it under the terms of the GNU Lesser General Public
|
10 |
* License as published by the Free Software Foundation; either
|
11 |
* version 2.1 of the License, or (at your option) any later version.
|
12 |
*
|
13 |
* FFmpeg is distributed in the hope that it will be useful,
|
14 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
16 |
* Lesser General Public License for more details.
|
17 |
*
|
18 |
* You should have received a copy of the GNU Lesser General Public
|
19 |
* License along with FFmpeg; if not, write to the Free Software
|
20 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
21 |
*/
|
22 |
|
23 |
/**
|
24 |
* @file
|
25 |
* MPEG-1/2 decoder
|
26 |
*/
|
27 |
|
28 |
//#define DEBUG
|
29 |
#include "internal.h" |
30 |
#include "avcodec.h" |
31 |
#include "dsputil.h" |
32 |
#include "mpegvideo.h" |
33 |
|
34 |
#include "mpeg12.h" |
35 |
#include "mpeg12data.h" |
36 |
#include "mpeg12decdata.h" |
37 |
#include "bytestream.h" |
38 |
#include "vdpau_internal.h" |
39 |
#include "xvmc_internal.h" |
40 |
#include "thread.h" |
41 |
|
42 |
//#undef NDEBUG
|
43 |
//#include <assert.h>
|
44 |
|
45 |
|
46 |
#define MV_VLC_BITS 9 |
47 |
#define MBINCR_VLC_BITS 9 |
48 |
#define MB_PAT_VLC_BITS 9 |
49 |
#define MB_PTYPE_VLC_BITS 6 |
50 |
#define MB_BTYPE_VLC_BITS 6 |
51 |
|
52 |
static inline int mpeg1_decode_block_intra(MpegEncContext *s, |
53 |
DCTELEM *block, |
54 |
int n);
|
55 |
static inline int mpeg1_decode_block_inter(MpegEncContext *s, |
56 |
DCTELEM *block, |
57 |
int n);
|
58 |
static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n); |
59 |
static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, |
60 |
DCTELEM *block, |
61 |
int n);
|
62 |
static inline int mpeg2_decode_block_intra(MpegEncContext *s, |
63 |
DCTELEM *block, |
64 |
int n);
|
65 |
static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, DCTELEM *block, int n); |
66 |
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n); |
67 |
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred); |
68 |
static void exchange_uv(MpegEncContext *s); |
69 |
|
70 |
static const enum PixelFormat pixfmt_xvmc_mpg2_420[] = { |
71 |
PIX_FMT_XVMC_MPEG2_IDCT, |
72 |
PIX_FMT_XVMC_MPEG2_MC, |
73 |
PIX_FMT_NONE}; |
74 |
|
75 |
uint8_t ff_mpeg12_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3]; |
76 |
|
77 |
|
78 |
#define INIT_2D_VLC_RL(rl, static_size)\
|
79 |
{\ |
80 |
static RL_VLC_ELEM rl_vlc_table[static_size];\
|
81 |
INIT_VLC_STATIC(&rl.vlc, TEX_VLC_BITS, rl.n + 2,\
|
82 |
&rl.table_vlc[0][1], 4, 2,\ |
83 |
&rl.table_vlc[0][0], 4, 2, static_size);\ |
84 |
\ |
85 |
rl.rl_vlc[0]= rl_vlc_table;\
|
86 |
init_2d_vlc_rl(&rl);\ |
87 |
} |
88 |
|
89 |
static void init_2d_vlc_rl(RLTable *rl) |
90 |
{ |
91 |
int i;
|
92 |
|
93 |
for(i=0; i<rl->vlc.table_size; i++){ |
94 |
int code= rl->vlc.table[i][0]; |
95 |
int len = rl->vlc.table[i][1]; |
96 |
int level, run;
|
97 |
|
98 |
if(len==0){ // illegal code |
99 |
run= 65;
|
100 |
level= MAX_LEVEL; |
101 |
}else if(len<0){ //more bits needed |
102 |
run= 0;
|
103 |
level= code; |
104 |
}else{
|
105 |
if(code==rl->n){ //esc |
106 |
run= 65;
|
107 |
level= 0;
|
108 |
}else if(code==rl->n+1){ //eob |
109 |
run= 0;
|
110 |
level= 127;
|
111 |
}else{
|
112 |
run= rl->table_run [code] + 1;
|
113 |
level= rl->table_level[code]; |
114 |
} |
115 |
} |
116 |
rl->rl_vlc[0][i].len= len;
|
117 |
rl->rl_vlc[0][i].level= level;
|
118 |
rl->rl_vlc[0][i].run= run;
|
119 |
} |
120 |
} |
121 |
|
122 |
void ff_mpeg12_common_init(MpegEncContext *s)
|
123 |
{ |
124 |
|
125 |
s->y_dc_scale_table= |
126 |
s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision]; |
127 |
|
128 |
} |
129 |
|
130 |
void ff_mpeg1_clean_buffers(MpegEncContext *s){
|
131 |
s->last_dc[0] = 1 << (7 + s->intra_dc_precision); |
132 |
s->last_dc[1] = s->last_dc[0]; |
133 |
s->last_dc[2] = s->last_dc[0]; |
134 |
memset(s->last_mv, 0, sizeof(s->last_mv)); |
135 |
} |
136 |
|
137 |
|
138 |
/******************************************/
|
139 |
/* decoding */
|
140 |
|
141 |
VLC ff_dc_lum_vlc; |
142 |
VLC ff_dc_chroma_vlc; |
143 |
|
144 |
static VLC mv_vlc;
|
145 |
static VLC mbincr_vlc;
|
146 |
static VLC mb_ptype_vlc;
|
147 |
static VLC mb_btype_vlc;
|
148 |
static VLC mb_pat_vlc;
|
149 |
|
150 |
av_cold void ff_mpeg12_init_vlcs(void) |
151 |
{ |
152 |
static int done = 0; |
153 |
|
154 |
if (!done) {
|
155 |
done = 1;
|
156 |
|
157 |
INIT_VLC_STATIC(&ff_dc_lum_vlc, DC_VLC_BITS, 12,
|
158 |
ff_mpeg12_vlc_dc_lum_bits, 1, 1, |
159 |
ff_mpeg12_vlc_dc_lum_code, 2, 2, 512); |
160 |
INIT_VLC_STATIC(&ff_dc_chroma_vlc, DC_VLC_BITS, 12,
|
161 |
ff_mpeg12_vlc_dc_chroma_bits, 1, 1, |
162 |
ff_mpeg12_vlc_dc_chroma_code, 2, 2, 514); |
163 |
INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 17,
|
164 |
&ff_mpeg12_mbMotionVectorTable[0][1], 2, 1, |
165 |
&ff_mpeg12_mbMotionVectorTable[0][0], 2, 1, 518); |
166 |
INIT_VLC_STATIC(&mbincr_vlc, MBINCR_VLC_BITS, 36,
|
167 |
&ff_mpeg12_mbAddrIncrTable[0][1], 2, 1, |
168 |
&ff_mpeg12_mbAddrIncrTable[0][0], 2, 1, 538); |
169 |
INIT_VLC_STATIC(&mb_pat_vlc, MB_PAT_VLC_BITS, 64,
|
170 |
&ff_mpeg12_mbPatTable[0][1], 2, 1, |
171 |
&ff_mpeg12_mbPatTable[0][0], 2, 1, 512); |
172 |
|
173 |
INIT_VLC_STATIC(&mb_ptype_vlc, MB_PTYPE_VLC_BITS, 7,
|
174 |
&table_mb_ptype[0][1], 2, 1, |
175 |
&table_mb_ptype[0][0], 2, 1, 64); |
176 |
INIT_VLC_STATIC(&mb_btype_vlc, MB_BTYPE_VLC_BITS, 11,
|
177 |
&table_mb_btype[0][1], 2, 1, |
178 |
&table_mb_btype[0][0], 2, 1, 64); |
179 |
init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
|
180 |
init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
|
181 |
|
182 |
INIT_2D_VLC_RL(ff_rl_mpeg1, 680);
|
183 |
INIT_2D_VLC_RL(ff_rl_mpeg2, 674);
|
184 |
} |
185 |
} |
186 |
|
187 |
static inline int get_dmv(MpegEncContext *s) |
188 |
{ |
189 |
if(get_bits1(&s->gb))
|
190 |
return 1 - (get_bits1(&s->gb) << 1); |
191 |
else
|
192 |
return 0; |
193 |
} |
194 |
|
195 |
static inline int get_qscale(MpegEncContext *s) |
196 |
{ |
197 |
int qscale = get_bits(&s->gb, 5); |
198 |
if (s->q_scale_type) {
|
199 |
return non_linear_qscale[qscale];
|
200 |
} else {
|
201 |
return qscale << 1; |
202 |
} |
203 |
} |
204 |
|
205 |
/* motion type (for MPEG-2) */
|
206 |
#define MT_FIELD 1 |
207 |
#define MT_FRAME 2 |
208 |
#define MT_16X8 2 |
209 |
#define MT_DMV 3 |
210 |
|
211 |
static int mpeg_decode_mb(MpegEncContext *s, |
212 |
DCTELEM block[12][64]) |
213 |
{ |
214 |
int i, j, k, cbp, val, mb_type, motion_type;
|
215 |
const int mb_block_count = 4 + (1<< s->chroma_format); |
216 |
|
217 |
av_dlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
|
218 |
|
219 |
assert(s->mb_skipped==0);
|
220 |
|
221 |
if (s->mb_skip_run-- != 0) { |
222 |
if (s->pict_type == FF_P_TYPE) {
|
223 |
s->mb_skipped = 1;
|
224 |
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; |
225 |
} else {
|
226 |
int mb_type;
|
227 |
|
228 |
if(s->mb_x)
|
229 |
mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
|
230 |
else
|
231 |
mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all |
232 |
if(IS_INTRA(mb_type))
|
233 |
return -1; |
234 |
|
235 |
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= |
236 |
mb_type | MB_TYPE_SKIP; |
237 |
// assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
|
238 |
|
239 |
if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0) |
240 |
s->mb_skipped = 1;
|
241 |
} |
242 |
|
243 |
return 0; |
244 |
} |
245 |
|
246 |
switch(s->pict_type) {
|
247 |
default:
|
248 |
case FF_I_TYPE:
|
249 |
if (get_bits1(&s->gb) == 0) { |
250 |
if (get_bits1(&s->gb) == 0){ |
251 |
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y);
|
252 |
return -1; |
253 |
} |
254 |
mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA; |
255 |
} else {
|
256 |
mb_type = MB_TYPE_INTRA; |
257 |
} |
258 |
break;
|
259 |
case FF_P_TYPE:
|
260 |
mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
|
261 |
if (mb_type < 0){ |
262 |
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y);
|
263 |
return -1; |
264 |
} |
265 |
mb_type = ptype2mb_type[ mb_type ]; |
266 |
break;
|
267 |
case FF_B_TYPE:
|
268 |
mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
|
269 |
if (mb_type < 0){ |
270 |
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y);
|
271 |
return -1; |
272 |
} |
273 |
mb_type = btype2mb_type[ mb_type ]; |
274 |
break;
|
275 |
} |
276 |
av_dlog(s->avctx, "mb_type=%x\n", mb_type);
|
277 |
// motion_type = 0; /* avoid warning */
|
278 |
if (IS_INTRA(mb_type)) {
|
279 |
s->dsp.clear_blocks(s->block[0]);
|
280 |
|
281 |
if(!s->chroma_y_shift){
|
282 |
s->dsp.clear_blocks(s->block[6]);
|
283 |
} |
284 |
|
285 |
/* compute DCT type */
|
286 |
if (s->picture_structure == PICT_FRAME && //FIXME add an interlaced_dct coded var? |
287 |
!s->frame_pred_frame_dct) { |
288 |
s->interlaced_dct = get_bits1(&s->gb); |
289 |
} |
290 |
|
291 |
if (IS_QUANT(mb_type))
|
292 |
s->qscale = get_qscale(s); |
293 |
|
294 |
if (s->concealment_motion_vectors) {
|
295 |
/* just parse them */
|
296 |
if (s->picture_structure != PICT_FRAME)
|
297 |
skip_bits1(&s->gb); /* field select */
|
298 |
|
299 |
s->mv[0][0][0]= s->last_mv[0][0][0]= s->last_mv[0][1][0] = |
300 |
mpeg_decode_motion(s, s->mpeg_f_code[0][0], s->last_mv[0][0][0]); |
301 |
s->mv[0][0][1]= s->last_mv[0][0][1]= s->last_mv[0][1][1] = |
302 |
mpeg_decode_motion(s, s->mpeg_f_code[0][1], s->last_mv[0][0][1]); |
303 |
|
304 |
skip_bits1(&s->gb); /* marker */
|
305 |
}else
|
306 |
memset(s->last_mv, 0, sizeof(s->last_mv)); /* reset mv prediction */ |
307 |
s->mb_intra = 1;
|
308 |
//if 1, we memcpy blocks in xvmcvideo
|
309 |
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration > 1){ |
310 |
ff_xvmc_pack_pblocks(s,-1);//inter are always full blocks |
311 |
if(s->swap_uv){
|
312 |
exchange_uv(s); |
313 |
} |
314 |
} |
315 |
|
316 |
if (s->codec_id == CODEC_ID_MPEG2VIDEO) {
|
317 |
if(s->flags2 & CODEC_FLAG2_FAST){
|
318 |
for(i=0;i<6;i++) { |
319 |
mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i); |
320 |
} |
321 |
}else{
|
322 |
for(i=0;i<mb_block_count;i++) { |
323 |
if (mpeg2_decode_block_intra(s, *s->pblocks[i], i) < 0) |
324 |
return -1; |
325 |
} |
326 |
} |
327 |
} else {
|
328 |
for(i=0;i<6;i++) { |
329 |
if (mpeg1_decode_block_intra(s, *s->pblocks[i], i) < 0) |
330 |
return -1; |
331 |
} |
332 |
} |
333 |
} else {
|
334 |
if (mb_type & MB_TYPE_ZERO_MV){
|
335 |
assert(mb_type & MB_TYPE_CBP); |
336 |
|
337 |
s->mv_dir = MV_DIR_FORWARD; |
338 |
if(s->picture_structure == PICT_FRAME){
|
339 |
if(!s->frame_pred_frame_dct)
|
340 |
s->interlaced_dct = get_bits1(&s->gb); |
341 |
s->mv_type = MV_TYPE_16X16; |
342 |
}else{
|
343 |
s->mv_type = MV_TYPE_FIELD; |
344 |
mb_type |= MB_TYPE_INTERLACED; |
345 |
s->field_select[0][0]= s->picture_structure - 1; |
346 |
} |
347 |
|
348 |
if (IS_QUANT(mb_type))
|
349 |
s->qscale = get_qscale(s); |
350 |
|
351 |
s->last_mv[0][0][0] = 0; |
352 |
s->last_mv[0][0][1] = 0; |
353 |
s->last_mv[0][1][0] = 0; |
354 |
s->last_mv[0][1][1] = 0; |
355 |
s->mv[0][0][0] = 0; |
356 |
s->mv[0][0][1] = 0; |
357 |
}else{
|
358 |
assert(mb_type & MB_TYPE_L0L1); |
359 |
//FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
|
360 |
/* get additional motion vector type */
|
361 |
if (s->frame_pred_frame_dct)
|
362 |
motion_type = MT_FRAME; |
363 |
else{
|
364 |
motion_type = get_bits(&s->gb, 2);
|
365 |
if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
|
366 |
s->interlaced_dct = get_bits1(&s->gb); |
367 |
} |
368 |
|
369 |
if (IS_QUANT(mb_type))
|
370 |
s->qscale = get_qscale(s); |
371 |
|
372 |
/* motion vectors */
|
373 |
s->mv_dir= (mb_type>>13)&3; |
374 |
av_dlog(s->avctx, "motion_type=%d\n", motion_type);
|
375 |
switch(motion_type) {
|
376 |
case MT_FRAME: /* or MT_16X8 */ |
377 |
if (s->picture_structure == PICT_FRAME) {
|
378 |
mb_type |= MB_TYPE_16x16; |
379 |
s->mv_type = MV_TYPE_16X16; |
380 |
for(i=0;i<2;i++) { |
381 |
if (USES_LIST(mb_type, i)) {
|
382 |
/* MT_FRAME */
|
383 |
s->mv[i][0][0]= s->last_mv[i][0][0]= s->last_mv[i][1][0] = |
384 |
mpeg_decode_motion(s, s->mpeg_f_code[i][0], s->last_mv[i][0][0]); |
385 |
s->mv[i][0][1]= s->last_mv[i][0][1]= s->last_mv[i][1][1] = |
386 |
mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][0][1]); |
387 |
/* full_pel: only for MPEG-1 */
|
388 |
if (s->full_pel[i]){
|
389 |
s->mv[i][0][0] <<= 1; |
390 |
s->mv[i][0][1] <<= 1; |
391 |
} |
392 |
} |
393 |
} |
394 |
} else {
|
395 |
mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; |
396 |
s->mv_type = MV_TYPE_16X8; |
397 |
for(i=0;i<2;i++) { |
398 |
if (USES_LIST(mb_type, i)) {
|
399 |
/* MT_16X8 */
|
400 |
for(j=0;j<2;j++) { |
401 |
s->field_select[i][j] = get_bits1(&s->gb); |
402 |
for(k=0;k<2;k++) { |
403 |
val = mpeg_decode_motion(s, s->mpeg_f_code[i][k], |
404 |
s->last_mv[i][j][k]); |
405 |
s->last_mv[i][j][k] = val; |
406 |
s->mv[i][j][k] = val; |
407 |
} |
408 |
} |
409 |
} |
410 |
} |
411 |
} |
412 |
break;
|
413 |
case MT_FIELD:
|
414 |
s->mv_type = MV_TYPE_FIELD; |
415 |
if (s->picture_structure == PICT_FRAME) {
|
416 |
mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; |
417 |
for(i=0;i<2;i++) { |
418 |
if (USES_LIST(mb_type, i)) {
|
419 |
for(j=0;j<2;j++) { |
420 |
s->field_select[i][j] = get_bits1(&s->gb); |
421 |
val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
|
422 |
s->last_mv[i][j][0]);
|
423 |
s->last_mv[i][j][0] = val;
|
424 |
s->mv[i][j][0] = val;
|
425 |
av_dlog(s->avctx, "fmx=%d\n", val);
|
426 |
val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
|
427 |
s->last_mv[i][j][1] >> 1); |
428 |
s->last_mv[i][j][1] = val << 1; |
429 |
s->mv[i][j][1] = val;
|
430 |
av_dlog(s->avctx, "fmy=%d\n", val);
|
431 |
} |
432 |
} |
433 |
} |
434 |
} else {
|
435 |
mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; |
436 |
for(i=0;i<2;i++) { |
437 |
if (USES_LIST(mb_type, i)) {
|
438 |
s->field_select[i][0] = get_bits1(&s->gb);
|
439 |
for(k=0;k<2;k++) { |
440 |
val = mpeg_decode_motion(s, s->mpeg_f_code[i][k], |
441 |
s->last_mv[i][0][k]);
|
442 |
s->last_mv[i][0][k] = val;
|
443 |
s->last_mv[i][1][k] = val;
|
444 |
s->mv[i][0][k] = val;
|
445 |
} |
446 |
} |
447 |
} |
448 |
} |
449 |
break;
|
450 |
case MT_DMV:
|
451 |
s->mv_type = MV_TYPE_DMV; |
452 |
for(i=0;i<2;i++) { |
453 |
if (USES_LIST(mb_type, i)) {
|
454 |
int dmx, dmy, mx, my, m;
|
455 |
const int my_shift= s->picture_structure == PICT_FRAME; |
456 |
|
457 |
mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
|
458 |
s->last_mv[i][0][0]); |
459 |
s->last_mv[i][0][0] = mx; |
460 |
s->last_mv[i][1][0] = mx; |
461 |
dmx = get_dmv(s); |
462 |
my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
|
463 |
s->last_mv[i][0][1] >> my_shift); |
464 |
dmy = get_dmv(s); |
465 |
|
466 |
|
467 |
s->last_mv[i][0][1] = my<<my_shift; |
468 |
s->last_mv[i][1][1] = my<<my_shift; |
469 |
|
470 |
s->mv[i][0][0] = mx; |
471 |
s->mv[i][0][1] = my; |
472 |
s->mv[i][1][0] = mx;//not used |
473 |
s->mv[i][1][1] = my;//not used |
474 |
|
475 |
if (s->picture_structure == PICT_FRAME) {
|
476 |
mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; |
477 |
|
478 |
//m = 1 + 2 * s->top_field_first;
|
479 |
m = s->top_field_first ? 1 : 3; |
480 |
|
481 |
/* top -> top pred */
|
482 |
s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx; |
483 |
s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1; |
484 |
m = 4 - m;
|
485 |
s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx; |
486 |
s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1; |
487 |
} else {
|
488 |
mb_type |= MB_TYPE_16x16; |
489 |
|
490 |
s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx; |
491 |
s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy; |
492 |
if(s->picture_structure == PICT_TOP_FIELD)
|
493 |
s->mv[i][2][1]--; |
494 |
else
|
495 |
s->mv[i][2][1]++; |
496 |
} |
497 |
} |
498 |
} |
499 |
break;
|
500 |
default:
|
501 |
av_log(s->avctx, AV_LOG_ERROR, "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
|
502 |
return -1; |
503 |
} |
504 |
} |
505 |
|
506 |
s->mb_intra = 0;
|
507 |
if (HAS_CBP(mb_type)) {
|
508 |
s->dsp.clear_blocks(s->block[0]);
|
509 |
|
510 |
cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
|
511 |
if(mb_block_count > 6){ |
512 |
cbp<<= mb_block_count-6;
|
513 |
cbp |= get_bits(&s->gb, mb_block_count-6);
|
514 |
s->dsp.clear_blocks(s->block[6]);
|
515 |
} |
516 |
if (cbp <= 0){ |
517 |
av_log(s->avctx, AV_LOG_ERROR, "invalid cbp at %d %d\n", s->mb_x, s->mb_y);
|
518 |
return -1; |
519 |
} |
520 |
|
521 |
//if 1, we memcpy blocks in xvmcvideo
|
522 |
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration > 1){ |
523 |
ff_xvmc_pack_pblocks(s,cbp); |
524 |
if(s->swap_uv){
|
525 |
exchange_uv(s); |
526 |
} |
527 |
} |
528 |
|
529 |
if (s->codec_id == CODEC_ID_MPEG2VIDEO) {
|
530 |
if(s->flags2 & CODEC_FLAG2_FAST){
|
531 |
for(i=0;i<6;i++) { |
532 |
if(cbp & 32) { |
533 |
mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i); |
534 |
} else {
|
535 |
s->block_last_index[i] = -1;
|
536 |
} |
537 |
cbp+=cbp; |
538 |
} |
539 |
}else{
|
540 |
cbp<<= 12-mb_block_count;
|
541 |
|
542 |
for(i=0;i<mb_block_count;i++) { |
543 |
if ( cbp & (1<<11) ) { |
544 |
if (mpeg2_decode_block_non_intra(s, *s->pblocks[i], i) < 0) |
545 |
return -1; |
546 |
} else {
|
547 |
s->block_last_index[i] = -1;
|
548 |
} |
549 |
cbp+=cbp; |
550 |
} |
551 |
} |
552 |
} else {
|
553 |
if(s->flags2 & CODEC_FLAG2_FAST){
|
554 |
for(i=0;i<6;i++) { |
555 |
if (cbp & 32) { |
556 |
mpeg1_fast_decode_block_inter(s, *s->pblocks[i], i); |
557 |
} else {
|
558 |
s->block_last_index[i] = -1;
|
559 |
} |
560 |
cbp+=cbp; |
561 |
} |
562 |
}else{
|
563 |
for(i=0;i<6;i++) { |
564 |
if (cbp & 32) { |
565 |
if (mpeg1_decode_block_inter(s, *s->pblocks[i], i) < 0) |
566 |
return -1; |
567 |
} else {
|
568 |
s->block_last_index[i] = -1;
|
569 |
} |
570 |
cbp+=cbp; |
571 |
} |
572 |
} |
573 |
} |
574 |
}else{
|
575 |
for(i=0;i<12;i++) |
576 |
s->block_last_index[i] = -1;
|
577 |
} |
578 |
} |
579 |
|
580 |
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type; |
581 |
|
582 |
return 0; |
583 |
} |
584 |
|
585 |
/* as H.263, but only 17 codes */
|
586 |
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred) |
587 |
{ |
588 |
int code, sign, val, l, shift;
|
589 |
|
590 |
code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
|
591 |
if (code == 0) { |
592 |
return pred;
|
593 |
} |
594 |
if (code < 0) { |
595 |
return 0xffff; |
596 |
} |
597 |
|
598 |
sign = get_bits1(&s->gb); |
599 |
shift = fcode - 1;
|
600 |
val = code; |
601 |
if (shift) {
|
602 |
val = (val - 1) << shift;
|
603 |
val |= get_bits(&s->gb, shift); |
604 |
val++; |
605 |
} |
606 |
if (sign)
|
607 |
val = -val; |
608 |
val += pred; |
609 |
|
610 |
/* modulo decoding */
|
611 |
l= INT_BIT - 5 - shift;
|
612 |
val = (val<<l)>>l; |
613 |
return val;
|
614 |
} |
615 |
|
616 |
static inline int mpeg1_decode_block_intra(MpegEncContext *s, |
617 |
DCTELEM *block, |
618 |
int n)
|
619 |
{ |
620 |
int level, dc, diff, i, j, run;
|
621 |
int component;
|
622 |
RLTable *rl = &ff_rl_mpeg1; |
623 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
624 |
const uint16_t *quant_matrix= s->intra_matrix;
|
625 |
const int qscale= s->qscale; |
626 |
|
627 |
/* DC coefficient */
|
628 |
component = (n <= 3 ? 0 : n - 4 + 1); |
629 |
diff = decode_dc(&s->gb, component); |
630 |
if (diff >= 0xffff) |
631 |
return -1; |
632 |
dc = s->last_dc[component]; |
633 |
dc += diff; |
634 |
s->last_dc[component] = dc; |
635 |
block[0] = dc*quant_matrix[0]; |
636 |
av_dlog(s->avctx, "dc=%d diff=%d\n", dc, diff);
|
637 |
i = 0;
|
638 |
{ |
639 |
OPEN_READER(re, &s->gb); |
640 |
/* now quantify & encode AC coefficients */
|
641 |
for(;;) {
|
642 |
UPDATE_CACHE(re, &s->gb); |
643 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
644 |
|
645 |
if(level == 127){ |
646 |
break;
|
647 |
} else if(level != 0) { |
648 |
i += run; |
649 |
j = scantable[i]; |
650 |
level= (level*qscale*quant_matrix[j])>>4;
|
651 |
level= (level-1)|1; |
652 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
653 |
LAST_SKIP_BITS(re, &s->gb, 1);
|
654 |
} else {
|
655 |
/* escape */
|
656 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
657 |
UPDATE_CACHE(re, &s->gb); |
658 |
level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8); |
659 |
if (level == -128) { |
660 |
level = SHOW_UBITS(re, &s->gb, 8) - 256; LAST_SKIP_BITS(re, &s->gb, 8); |
661 |
} else if (level == 0) { |
662 |
level = SHOW_UBITS(re, &s->gb, 8) ; LAST_SKIP_BITS(re, &s->gb, 8); |
663 |
} |
664 |
i += run; |
665 |
j = scantable[i]; |
666 |
if(level<0){ |
667 |
level= -level; |
668 |
level= (level*qscale*quant_matrix[j])>>4;
|
669 |
level= (level-1)|1; |
670 |
level= -level; |
671 |
}else{
|
672 |
level= (level*qscale*quant_matrix[j])>>4;
|
673 |
level= (level-1)|1; |
674 |
} |
675 |
} |
676 |
if (i > 63){ |
677 |
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
678 |
return -1; |
679 |
} |
680 |
|
681 |
block[j] = level; |
682 |
} |
683 |
CLOSE_READER(re, &s->gb); |
684 |
} |
685 |
s->block_last_index[n] = i; |
686 |
return 0; |
687 |
} |
688 |
|
689 |
int ff_mpeg1_decode_block_intra(MpegEncContext *s,
|
690 |
DCTELEM *block, |
691 |
int n)
|
692 |
{ |
693 |
return mpeg1_decode_block_intra(s, block, n);
|
694 |
} |
695 |
|
696 |
static inline int mpeg1_decode_block_inter(MpegEncContext *s, |
697 |
DCTELEM *block, |
698 |
int n)
|
699 |
{ |
700 |
int level, i, j, run;
|
701 |
RLTable *rl = &ff_rl_mpeg1; |
702 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
703 |
const uint16_t *quant_matrix= s->inter_matrix;
|
704 |
const int qscale= s->qscale; |
705 |
|
706 |
{ |
707 |
OPEN_READER(re, &s->gb); |
708 |
i = -1;
|
709 |
// special case for first coefficient, no need to add second VLC table
|
710 |
UPDATE_CACHE(re, &s->gb); |
711 |
if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { |
712 |
level= (3*qscale*quant_matrix[0])>>5; |
713 |
level= (level-1)|1; |
714 |
if(GET_CACHE(re, &s->gb)&0x40000000) |
715 |
level= -level; |
716 |
block[0] = level;
|
717 |
i++; |
718 |
SKIP_BITS(re, &s->gb, 2);
|
719 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
720 |
goto end;
|
721 |
} |
722 |
/* now quantify & encode AC coefficients */
|
723 |
for(;;) {
|
724 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
725 |
|
726 |
if(level != 0) { |
727 |
i += run; |
728 |
j = scantable[i]; |
729 |
level= ((level*2+1)*qscale*quant_matrix[j])>>5; |
730 |
level= (level-1)|1; |
731 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
732 |
SKIP_BITS(re, &s->gb, 1);
|
733 |
} else {
|
734 |
/* escape */
|
735 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
736 |
UPDATE_CACHE(re, &s->gb); |
737 |
level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8); |
738 |
if (level == -128) { |
739 |
level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8); |
740 |
} else if (level == 0) { |
741 |
level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8); |
742 |
} |
743 |
i += run; |
744 |
j = scantable[i]; |
745 |
if(level<0){ |
746 |
level= -level; |
747 |
level= ((level*2+1)*qscale*quant_matrix[j])>>5; |
748 |
level= (level-1)|1; |
749 |
level= -level; |
750 |
}else{
|
751 |
level= ((level*2+1)*qscale*quant_matrix[j])>>5; |
752 |
level= (level-1)|1; |
753 |
} |
754 |
} |
755 |
if (i > 63){ |
756 |
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
757 |
return -1; |
758 |
} |
759 |
|
760 |
block[j] = level; |
761 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
762 |
break;
|
763 |
UPDATE_CACHE(re, &s->gb); |
764 |
} |
765 |
end:
|
766 |
LAST_SKIP_BITS(re, &s->gb, 2);
|
767 |
CLOSE_READER(re, &s->gb); |
768 |
} |
769 |
s->block_last_index[n] = i; |
770 |
return 0; |
771 |
} |
772 |
|
773 |
static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n) |
774 |
{ |
775 |
int level, i, j, run;
|
776 |
RLTable *rl = &ff_rl_mpeg1; |
777 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
778 |
const int qscale= s->qscale; |
779 |
|
780 |
{ |
781 |
OPEN_READER(re, &s->gb); |
782 |
i = -1;
|
783 |
// special case for first coefficient, no need to add second VLC table
|
784 |
UPDATE_CACHE(re, &s->gb); |
785 |
if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { |
786 |
level= (3*qscale)>>1; |
787 |
level= (level-1)|1; |
788 |
if(GET_CACHE(re, &s->gb)&0x40000000) |
789 |
level= -level; |
790 |
block[0] = level;
|
791 |
i++; |
792 |
SKIP_BITS(re, &s->gb, 2);
|
793 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
794 |
goto end;
|
795 |
} |
796 |
|
797 |
/* now quantify & encode AC coefficients */
|
798 |
for(;;) {
|
799 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
800 |
|
801 |
if(level != 0) { |
802 |
i += run; |
803 |
j = scantable[i]; |
804 |
level= ((level*2+1)*qscale)>>1; |
805 |
level= (level-1)|1; |
806 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
807 |
SKIP_BITS(re, &s->gb, 1);
|
808 |
} else {
|
809 |
/* escape */
|
810 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
811 |
UPDATE_CACHE(re, &s->gb); |
812 |
level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8); |
813 |
if (level == -128) { |
814 |
level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8); |
815 |
} else if (level == 0) { |
816 |
level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8); |
817 |
} |
818 |
i += run; |
819 |
j = scantable[i]; |
820 |
if(level<0){ |
821 |
level= -level; |
822 |
level= ((level*2+1)*qscale)>>1; |
823 |
level= (level-1)|1; |
824 |
level= -level; |
825 |
}else{
|
826 |
level= ((level*2+1)*qscale)>>1; |
827 |
level= (level-1)|1; |
828 |
} |
829 |
} |
830 |
|
831 |
block[j] = level; |
832 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
833 |
break;
|
834 |
UPDATE_CACHE(re, &s->gb); |
835 |
} |
836 |
end:
|
837 |
LAST_SKIP_BITS(re, &s->gb, 2);
|
838 |
CLOSE_READER(re, &s->gb); |
839 |
} |
840 |
s->block_last_index[n] = i; |
841 |
return 0; |
842 |
} |
843 |
|
844 |
|
845 |
static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, |
846 |
DCTELEM *block, |
847 |
int n)
|
848 |
{ |
849 |
int level, i, j, run;
|
850 |
RLTable *rl = &ff_rl_mpeg1; |
851 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
852 |
const uint16_t *quant_matrix;
|
853 |
const int qscale= s->qscale; |
854 |
int mismatch;
|
855 |
|
856 |
mismatch = 1;
|
857 |
|
858 |
{ |
859 |
OPEN_READER(re, &s->gb); |
860 |
i = -1;
|
861 |
if (n < 4) |
862 |
quant_matrix = s->inter_matrix; |
863 |
else
|
864 |
quant_matrix = s->chroma_inter_matrix; |
865 |
|
866 |
// special case for first coefficient, no need to add second VLC table
|
867 |
UPDATE_CACHE(re, &s->gb); |
868 |
if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { |
869 |
level= (3*qscale*quant_matrix[0])>>5; |
870 |
if(GET_CACHE(re, &s->gb)&0x40000000) |
871 |
level= -level; |
872 |
block[0] = level;
|
873 |
mismatch ^= level; |
874 |
i++; |
875 |
SKIP_BITS(re, &s->gb, 2);
|
876 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
877 |
goto end;
|
878 |
} |
879 |
|
880 |
/* now quantify & encode AC coefficients */
|
881 |
for(;;) {
|
882 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
883 |
|
884 |
if(level != 0) { |
885 |
i += run; |
886 |
j = scantable[i]; |
887 |
level= ((level*2+1)*qscale*quant_matrix[j])>>5; |
888 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
889 |
SKIP_BITS(re, &s->gb, 1);
|
890 |
} else {
|
891 |
/* escape */
|
892 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
893 |
UPDATE_CACHE(re, &s->gb); |
894 |
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); |
895 |
|
896 |
i += run; |
897 |
j = scantable[i]; |
898 |
if(level<0){ |
899 |
level= ((-level*2+1)*qscale*quant_matrix[j])>>5; |
900 |
level= -level; |
901 |
}else{
|
902 |
level= ((level*2+1)*qscale*quant_matrix[j])>>5; |
903 |
} |
904 |
} |
905 |
if (i > 63){ |
906 |
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
907 |
return -1; |
908 |
} |
909 |
|
910 |
mismatch ^= level; |
911 |
block[j] = level; |
912 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
913 |
break;
|
914 |
UPDATE_CACHE(re, &s->gb); |
915 |
} |
916 |
end:
|
917 |
LAST_SKIP_BITS(re, &s->gb, 2);
|
918 |
CLOSE_READER(re, &s->gb); |
919 |
} |
920 |
block[63] ^= (mismatch & 1); |
921 |
|
922 |
s->block_last_index[n] = i; |
923 |
return 0; |
924 |
} |
925 |
|
926 |
static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, |
927 |
DCTELEM *block, |
928 |
int n)
|
929 |
{ |
930 |
int level, i, j, run;
|
931 |
RLTable *rl = &ff_rl_mpeg1; |
932 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
933 |
const int qscale= s->qscale; |
934 |
OPEN_READER(re, &s->gb); |
935 |
i = -1;
|
936 |
|
937 |
// special case for first coefficient, no need to add second VLC table
|
938 |
UPDATE_CACHE(re, &s->gb); |
939 |
if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { |
940 |
level= (3*qscale)>>1; |
941 |
if(GET_CACHE(re, &s->gb)&0x40000000) |
942 |
level= -level; |
943 |
block[0] = level;
|
944 |
i++; |
945 |
SKIP_BITS(re, &s->gb, 2);
|
946 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
947 |
goto end;
|
948 |
} |
949 |
|
950 |
/* now quantify & encode AC coefficients */
|
951 |
for(;;) {
|
952 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
953 |
|
954 |
if(level != 0) { |
955 |
i += run; |
956 |
j = scantable[i]; |
957 |
level= ((level*2+1)*qscale)>>1; |
958 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
959 |
SKIP_BITS(re, &s->gb, 1);
|
960 |
} else {
|
961 |
/* escape */
|
962 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
963 |
UPDATE_CACHE(re, &s->gb); |
964 |
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); |
965 |
|
966 |
i += run; |
967 |
j = scantable[i]; |
968 |
if(level<0){ |
969 |
level= ((-level*2+1)*qscale)>>1; |
970 |
level= -level; |
971 |
}else{
|
972 |
level= ((level*2+1)*qscale)>>1; |
973 |
} |
974 |
} |
975 |
|
976 |
block[j] = level; |
977 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
978 |
break;
|
979 |
UPDATE_CACHE(re, &s->gb); |
980 |
} |
981 |
end:
|
982 |
LAST_SKIP_BITS(re, &s->gb, 2);
|
983 |
CLOSE_READER(re, &s->gb); |
984 |
s->block_last_index[n] = i; |
985 |
return 0; |
986 |
} |
987 |
|
988 |
|
989 |
static inline int mpeg2_decode_block_intra(MpegEncContext *s, |
990 |
DCTELEM *block, |
991 |
int n)
|
992 |
{ |
993 |
int level, dc, diff, i, j, run;
|
994 |
int component;
|
995 |
RLTable *rl; |
996 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
997 |
const uint16_t *quant_matrix;
|
998 |
const int qscale= s->qscale; |
999 |
int mismatch;
|
1000 |
|
1001 |
/* DC coefficient */
|
1002 |
if (n < 4){ |
1003 |
quant_matrix = s->intra_matrix; |
1004 |
component = 0;
|
1005 |
}else{
|
1006 |
quant_matrix = s->chroma_intra_matrix; |
1007 |
component = (n&1) + 1; |
1008 |
} |
1009 |
diff = decode_dc(&s->gb, component); |
1010 |
if (diff >= 0xffff) |
1011 |
return -1; |
1012 |
dc = s->last_dc[component]; |
1013 |
dc += diff; |
1014 |
s->last_dc[component] = dc; |
1015 |
block[0] = dc << (3 - s->intra_dc_precision); |
1016 |
av_dlog(s->avctx, "dc=%d\n", block[0]); |
1017 |
mismatch = block[0] ^ 1; |
1018 |
i = 0;
|
1019 |
if (s->intra_vlc_format)
|
1020 |
rl = &ff_rl_mpeg2; |
1021 |
else
|
1022 |
rl = &ff_rl_mpeg1; |
1023 |
|
1024 |
{ |
1025 |
OPEN_READER(re, &s->gb); |
1026 |
/* now quantify & encode AC coefficients */
|
1027 |
for(;;) {
|
1028 |
UPDATE_CACHE(re, &s->gb); |
1029 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
1030 |
|
1031 |
if(level == 127){ |
1032 |
break;
|
1033 |
} else if(level != 0) { |
1034 |
i += run; |
1035 |
j = scantable[i]; |
1036 |
level= (level*qscale*quant_matrix[j])>>4;
|
1037 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
1038 |
LAST_SKIP_BITS(re, &s->gb, 1);
|
1039 |
} else {
|
1040 |
/* escape */
|
1041 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
1042 |
UPDATE_CACHE(re, &s->gb); |
1043 |
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); |
1044 |
i += run; |
1045 |
j = scantable[i]; |
1046 |
if(level<0){ |
1047 |
level= (-level*qscale*quant_matrix[j])>>4;
|
1048 |
level= -level; |
1049 |
}else{
|
1050 |
level= (level*qscale*quant_matrix[j])>>4;
|
1051 |
} |
1052 |
} |
1053 |
if (i > 63){ |
1054 |
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
1055 |
return -1; |
1056 |
} |
1057 |
|
1058 |
mismatch^= level; |
1059 |
block[j] = level; |
1060 |
} |
1061 |
CLOSE_READER(re, &s->gb); |
1062 |
} |
1063 |
block[63]^= mismatch&1; |
1064 |
|
1065 |
s->block_last_index[n] = i; |
1066 |
return 0; |
1067 |
} |
1068 |
|
1069 |
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, |
1070 |
DCTELEM *block, |
1071 |
int n)
|
1072 |
{ |
1073 |
int level, dc, diff, j, run;
|
1074 |
int component;
|
1075 |
RLTable *rl; |
1076 |
uint8_t * scantable= s->intra_scantable.permutated; |
1077 |
const uint16_t *quant_matrix;
|
1078 |
const int qscale= s->qscale; |
1079 |
|
1080 |
/* DC coefficient */
|
1081 |
if (n < 4){ |
1082 |
quant_matrix = s->intra_matrix; |
1083 |
component = 0;
|
1084 |
}else{
|
1085 |
quant_matrix = s->chroma_intra_matrix; |
1086 |
component = (n&1) + 1; |
1087 |
} |
1088 |
diff = decode_dc(&s->gb, component); |
1089 |
if (diff >= 0xffff) |
1090 |
return -1; |
1091 |
dc = s->last_dc[component]; |
1092 |
dc += diff; |
1093 |
s->last_dc[component] = dc; |
1094 |
block[0] = dc << (3 - s->intra_dc_precision); |
1095 |
if (s->intra_vlc_format)
|
1096 |
rl = &ff_rl_mpeg2; |
1097 |
else
|
1098 |
rl = &ff_rl_mpeg1; |
1099 |
|
1100 |
{ |
1101 |
OPEN_READER(re, &s->gb); |
1102 |
/* now quantify & encode AC coefficients */
|
1103 |
for(;;) {
|
1104 |
UPDATE_CACHE(re, &s->gb); |
1105 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
1106 |
|
1107 |
if(level == 127){ |
1108 |
break;
|
1109 |
} else if(level != 0) { |
1110 |
scantable += run; |
1111 |
j = *scantable; |
1112 |
level= (level*qscale*quant_matrix[j])>>4;
|
1113 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
1114 |
LAST_SKIP_BITS(re, &s->gb, 1);
|
1115 |
} else {
|
1116 |
/* escape */
|
1117 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
1118 |
UPDATE_CACHE(re, &s->gb); |
1119 |
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); |
1120 |
scantable += run; |
1121 |
j = *scantable; |
1122 |
if(level<0){ |
1123 |
level= (-level*qscale*quant_matrix[j])>>4;
|
1124 |
level= -level; |
1125 |
}else{
|
1126 |
level= (level*qscale*quant_matrix[j])>>4;
|
1127 |
} |
1128 |
} |
1129 |
|
1130 |
block[j] = level; |
1131 |
} |
1132 |
CLOSE_READER(re, &s->gb); |
1133 |
} |
1134 |
|
1135 |
s->block_last_index[n] = scantable - s->intra_scantable.permutated; |
1136 |
return 0; |
1137 |
} |
1138 |
|
1139 |
typedef struct Mpeg1Context { |
1140 |
MpegEncContext mpeg_enc_ctx; |
1141 |
int mpeg_enc_ctx_allocated; /* true if decoding context allocated */ |
1142 |
int repeat_field; /* true if we must repeat the field */ |
1143 |
AVPanScan pan_scan; /**< some temporary storage for the panscan */
|
1144 |
int slice_count;
|
1145 |
int swap_uv;//indicate VCR2 |
1146 |
int save_aspect_info;
|
1147 |
int save_width, save_height, save_progressive_seq;
|
1148 |
AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator
|
1149 |
int sync; ///< Did we reach a sync point like a GOP/SEQ/KEYFrame? |
1150 |
} Mpeg1Context; |
1151 |
|
1152 |
static av_cold int mpeg_decode_init(AVCodecContext *avctx) |
1153 |
{ |
1154 |
Mpeg1Context *s = avctx->priv_data; |
1155 |
MpegEncContext *s2 = &s->mpeg_enc_ctx; |
1156 |
int i;
|
1157 |
|
1158 |
/* we need some permutation to store matrices,
|
1159 |
* until MPV_common_init() sets the real permutation. */
|
1160 |
for(i=0;i<64;i++) |
1161 |
s2->dsp.idct_permutation[i]=i; |
1162 |
|
1163 |
MPV_decode_defaults(s2); |
1164 |
|
1165 |
s->mpeg_enc_ctx.avctx= avctx; |
1166 |
s->mpeg_enc_ctx.flags= avctx->flags; |
1167 |
s->mpeg_enc_ctx.flags2= avctx->flags2; |
1168 |
ff_mpeg12_common_init(&s->mpeg_enc_ctx); |
1169 |
ff_mpeg12_init_vlcs(); |
1170 |
|
1171 |
s->mpeg_enc_ctx_allocated = 0;
|
1172 |
s->mpeg_enc_ctx.picture_number = 0;
|
1173 |
s->repeat_field = 0;
|
1174 |
s->mpeg_enc_ctx.codec_id= avctx->codec->id; |
1175 |
avctx->color_range= AVCOL_RANGE_MPEG; |
1176 |
if (avctx->codec->id == CODEC_ID_MPEG1VIDEO)
|
1177 |
avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; |
1178 |
else
|
1179 |
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; |
1180 |
return 0; |
1181 |
} |
1182 |
|
1183 |
static int mpeg_decode_update_thread_context(AVCodecContext *avctx, const AVCodecContext *avctx_from) |
1184 |
{ |
1185 |
Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data; |
1186 |
MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx; |
1187 |
int err;
|
1188 |
|
1189 |
if(avctx == avctx_from || !ctx_from->mpeg_enc_ctx_allocated || !s1->context_initialized)
|
1190 |
return 0; |
1191 |
|
1192 |
err = ff_mpeg_update_thread_context(avctx, avctx_from); |
1193 |
if(err) return err; |
1194 |
|
1195 |
if(!ctx->mpeg_enc_ctx_allocated)
|
1196 |
memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext)); |
1197 |
|
1198 |
if(!(s->pict_type == FF_B_TYPE || s->low_delay))
|
1199 |
s->picture_number++; |
1200 |
|
1201 |
return 0; |
1202 |
} |
1203 |
|
1204 |
static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, |
1205 |
const uint8_t *new_perm){
|
1206 |
uint16_t temp_matrix[64];
|
1207 |
int i;
|
1208 |
|
1209 |
memcpy(temp_matrix,matrix,64*sizeof(uint16_t)); |
1210 |
|
1211 |
for(i=0;i<64;i++){ |
1212 |
matrix[new_perm[i]] = temp_matrix[old_perm[i]]; |
1213 |
} |
1214 |
} |
1215 |
|
1216 |
static enum PixelFormat mpeg_get_pixelformat(AVCodecContext *avctx){ |
1217 |
Mpeg1Context *s1 = avctx->priv_data; |
1218 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1219 |
|
1220 |
if(avctx->xvmc_acceleration)
|
1221 |
return avctx->get_format(avctx,pixfmt_xvmc_mpg2_420);
|
1222 |
else if(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU){ |
1223 |
if(avctx->codec_id == CODEC_ID_MPEG1VIDEO)
|
1224 |
return PIX_FMT_VDPAU_MPEG1;
|
1225 |
else
|
1226 |
return PIX_FMT_VDPAU_MPEG2;
|
1227 |
}else{
|
1228 |
if(s->chroma_format < 2) |
1229 |
return avctx->get_format(avctx,ff_hwaccel_pixfmt_list_420);
|
1230 |
else if(s->chroma_format == 2) |
1231 |
return PIX_FMT_YUV422P;
|
1232 |
else
|
1233 |
return PIX_FMT_YUV444P;
|
1234 |
} |
1235 |
} |
1236 |
|
1237 |
/* Call this function when we know all parameters.
|
1238 |
* It may be called in different places for MPEG-1 and MPEG-2. */
|
1239 |
static int mpeg_decode_postinit(AVCodecContext *avctx){ |
1240 |
Mpeg1Context *s1 = avctx->priv_data; |
1241 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1242 |
uint8_t old_permutation[64];
|
1243 |
|
1244 |
if (
|
1245 |
(s1->mpeg_enc_ctx_allocated == 0)||
|
1246 |
avctx->coded_width != s->width || |
1247 |
avctx->coded_height != s->height|| |
1248 |
s1->save_width != s->width || |
1249 |
s1->save_height != s->height || |
1250 |
s1->save_aspect_info != s->aspect_ratio_info|| |
1251 |
s1->save_progressive_seq != s->progressive_sequence || |
1252 |
0)
|
1253 |
{ |
1254 |
|
1255 |
if (s1->mpeg_enc_ctx_allocated) {
|
1256 |
ParseContext pc= s->parse_context; |
1257 |
s->parse_context.buffer=0;
|
1258 |
MPV_common_end(s); |
1259 |
s->parse_context= pc; |
1260 |
} |
1261 |
|
1262 |
if( (s->width == 0 )||(s->height == 0)) |
1263 |
return -2; |
1264 |
|
1265 |
avcodec_set_dimensions(avctx, s->width, s->height); |
1266 |
avctx->bit_rate = s->bit_rate; |
1267 |
s1->save_aspect_info = s->aspect_ratio_info; |
1268 |
s1->save_width = s->width; |
1269 |
s1->save_height = s->height; |
1270 |
s1->save_progressive_seq = s->progressive_sequence; |
1271 |
|
1272 |
/* low_delay may be forced, in this case we will have B-frames
|
1273 |
* that behave like P-frames. */
|
1274 |
avctx->has_b_frames = !(s->low_delay); |
1275 |
|
1276 |
assert((avctx->sub_id==1) == (avctx->codec_id==CODEC_ID_MPEG1VIDEO));
|
1277 |
if(avctx->codec_id==CODEC_ID_MPEG1VIDEO){
|
1278 |
//MPEG-1 fps
|
1279 |
avctx->time_base.den= ff_frame_rate_tab[s->frame_rate_index].num; |
1280 |
avctx->time_base.num= ff_frame_rate_tab[s->frame_rate_index].den; |
1281 |
//MPEG-1 aspect
|
1282 |
avctx->sample_aspect_ratio= av_d2q( |
1283 |
1.0/ff_mpeg1_aspect[s->aspect_ratio_info], 255); |
1284 |
avctx->ticks_per_frame=1;
|
1285 |
}else{//MPEG-2 |
1286 |
//MPEG-2 fps
|
1287 |
av_reduce( |
1288 |
&s->avctx->time_base.den, |
1289 |
&s->avctx->time_base.num, |
1290 |
ff_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num*2,
|
1291 |
ff_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den, |
1292 |
1<<30); |
1293 |
avctx->ticks_per_frame=2;
|
1294 |
//MPEG-2 aspect
|
1295 |
if(s->aspect_ratio_info > 1){ |
1296 |
//we ignore the spec here as reality does not match the spec, see for example
|
1297 |
// res_change_ffmpeg_aspect.ts and sequence-display-aspect.mpg
|
1298 |
if( (s1->pan_scan.width == 0 )||(s1->pan_scan.height == 0) || 1){ |
1299 |
s->avctx->sample_aspect_ratio= |
1300 |
av_div_q( |
1301 |
ff_mpeg2_aspect[s->aspect_ratio_info], |
1302 |
(AVRational){s->width, s->height} |
1303 |
); |
1304 |
}else{
|
1305 |
s->avctx->sample_aspect_ratio= |
1306 |
av_div_q( |
1307 |
ff_mpeg2_aspect[s->aspect_ratio_info], |
1308 |
(AVRational){s1->pan_scan.width, s1->pan_scan.height} |
1309 |
); |
1310 |
} |
1311 |
}else{
|
1312 |
s->avctx->sample_aspect_ratio= |
1313 |
ff_mpeg2_aspect[s->aspect_ratio_info]; |
1314 |
} |
1315 |
}//MPEG-2
|
1316 |
|
1317 |
avctx->pix_fmt = mpeg_get_pixelformat(avctx); |
1318 |
avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); |
1319 |
//until then pix_fmt may be changed right after codec init
|
1320 |
if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ||
|
1321 |
avctx->hwaccel || |
1322 |
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ) |
1323 |
if( avctx->idct_algo == FF_IDCT_AUTO )
|
1324 |
avctx->idct_algo = FF_IDCT_SIMPLE; |
1325 |
|
1326 |
/* Quantization matrices may need reordering
|
1327 |
* if DCT permutation is changed. */
|
1328 |
memcpy(old_permutation,s->dsp.idct_permutation,64*sizeof(uint8_t)); |
1329 |
|
1330 |
if (MPV_common_init(s) < 0) |
1331 |
return -2; |
1332 |
|
1333 |
quant_matrix_rebuild(s->intra_matrix, old_permutation,s->dsp.idct_permutation); |
1334 |
quant_matrix_rebuild(s->inter_matrix, old_permutation,s->dsp.idct_permutation); |
1335 |
quant_matrix_rebuild(s->chroma_intra_matrix,old_permutation,s->dsp.idct_permutation); |
1336 |
quant_matrix_rebuild(s->chroma_inter_matrix,old_permutation,s->dsp.idct_permutation); |
1337 |
|
1338 |
s1->mpeg_enc_ctx_allocated = 1;
|
1339 |
} |
1340 |
return 0; |
1341 |
} |
1342 |
|
1343 |
static int mpeg1_decode_picture(AVCodecContext *avctx, |
1344 |
const uint8_t *buf, int buf_size) |
1345 |
{ |
1346 |
Mpeg1Context *s1 = avctx->priv_data; |
1347 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1348 |
int ref, f_code, vbv_delay;
|
1349 |
|
1350 |
init_get_bits(&s->gb, buf, buf_size*8);
|
1351 |
|
1352 |
ref = get_bits(&s->gb, 10); /* temporal ref */ |
1353 |
s->pict_type = get_bits(&s->gb, 3);
|
1354 |
if(s->pict_type == 0 || s->pict_type > 3) |
1355 |
return -1; |
1356 |
|
1357 |
vbv_delay= get_bits(&s->gb, 16);
|
1358 |
if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) {
|
1359 |
s->full_pel[0] = get_bits1(&s->gb);
|
1360 |
f_code = get_bits(&s->gb, 3);
|
1361 |
if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT) |
1362 |
return -1; |
1363 |
s->mpeg_f_code[0][0] = f_code; |
1364 |
s->mpeg_f_code[0][1] = f_code; |
1365 |
} |
1366 |
if (s->pict_type == FF_B_TYPE) {
|
1367 |
s->full_pel[1] = get_bits1(&s->gb);
|
1368 |
f_code = get_bits(&s->gb, 3);
|
1369 |
if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT) |
1370 |
return -1; |
1371 |
s->mpeg_f_code[1][0] = f_code; |
1372 |
s->mpeg_f_code[1][1] = f_code; |
1373 |
} |
1374 |
s->current_picture.pict_type= s->pict_type; |
1375 |
s->current_picture.key_frame= s->pict_type == FF_I_TYPE; |
1376 |
|
1377 |
if(avctx->debug & FF_DEBUG_PICT_INFO)
|
1378 |
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
|
1379 |
|
1380 |
s->y_dc_scale = 8;
|
1381 |
s->c_dc_scale = 8;
|
1382 |
return 0; |
1383 |
} |
1384 |
|
1385 |
static void mpeg_decode_sequence_extension(Mpeg1Context *s1) |
1386 |
{ |
1387 |
MpegEncContext *s= &s1->mpeg_enc_ctx; |
1388 |
int horiz_size_ext, vert_size_ext;
|
1389 |
int bit_rate_ext;
|
1390 |
|
1391 |
skip_bits(&s->gb, 1); /* profile and level esc*/ |
1392 |
s->avctx->profile= get_bits(&s->gb, 3);
|
1393 |
s->avctx->level= get_bits(&s->gb, 4);
|
1394 |
s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
|
1395 |
s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */ |
1396 |
horiz_size_ext = get_bits(&s->gb, 2);
|
1397 |
vert_size_ext = get_bits(&s->gb, 2);
|
1398 |
s->width |= (horiz_size_ext << 12);
|
1399 |
s->height |= (vert_size_ext << 12);
|
1400 |
bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */ |
1401 |
s->bit_rate += (bit_rate_ext << 18) * 400; |
1402 |
skip_bits1(&s->gb); /* marker */
|
1403 |
s->avctx->rc_buffer_size += get_bits(&s->gb, 8)*1024*16<<10; |
1404 |
|
1405 |
s->low_delay = get_bits1(&s->gb); |
1406 |
if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1; |
1407 |
|
1408 |
s1->frame_rate_ext.num = get_bits(&s->gb, 2)+1; |
1409 |
s1->frame_rate_ext.den = get_bits(&s->gb, 5)+1; |
1410 |
|
1411 |
av_dlog(s->avctx, "sequence extension\n");
|
1412 |
s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO; |
1413 |
s->avctx->sub_id = 2; /* indicates MPEG-2 found */ |
1414 |
|
1415 |
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
1416 |
av_log(s->avctx, AV_LOG_DEBUG, "profile: %d, level: %d vbv buffer: %d, bitrate:%d\n",
|
1417 |
s->avctx->profile, s->avctx->level, s->avctx->rc_buffer_size, s->bit_rate); |
1418 |
|
1419 |
} |
1420 |
|
1421 |
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1) |
1422 |
{ |
1423 |
MpegEncContext *s= &s1->mpeg_enc_ctx; |
1424 |
int color_description, w, h;
|
1425 |
|
1426 |
skip_bits(&s->gb, 3); /* video format */ |
1427 |
color_description= get_bits1(&s->gb); |
1428 |
if(color_description){
|
1429 |
s->avctx->color_primaries= get_bits(&s->gb, 8);
|
1430 |
s->avctx->color_trc = get_bits(&s->gb, 8);
|
1431 |
s->avctx->colorspace = get_bits(&s->gb, 8);
|
1432 |
} |
1433 |
w= get_bits(&s->gb, 14);
|
1434 |
skip_bits(&s->gb, 1); //marker |
1435 |
h= get_bits(&s->gb, 14);
|
1436 |
// remaining 3 bits are zero padding
|
1437 |
|
1438 |
s1->pan_scan.width= 16*w;
|
1439 |
s1->pan_scan.height=16*h;
|
1440 |
|
1441 |
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
1442 |
av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
|
1443 |
} |
1444 |
|
1445 |
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1) |
1446 |
{ |
1447 |
MpegEncContext *s= &s1->mpeg_enc_ctx; |
1448 |
int i,nofco;
|
1449 |
|
1450 |
nofco = 1;
|
1451 |
if(s->progressive_sequence){
|
1452 |
if(s->repeat_first_field){
|
1453 |
nofco++; |
1454 |
if(s->top_field_first)
|
1455 |
nofco++; |
1456 |
} |
1457 |
}else{
|
1458 |
if(s->picture_structure == PICT_FRAME){
|
1459 |
nofco++; |
1460 |
if(s->repeat_first_field)
|
1461 |
nofco++; |
1462 |
} |
1463 |
} |
1464 |
for(i=0; i<nofco; i++){ |
1465 |
s1->pan_scan.position[i][0]= get_sbits(&s->gb, 16); |
1466 |
skip_bits(&s->gb, 1); //marker |
1467 |
s1->pan_scan.position[i][1]= get_sbits(&s->gb, 16); |
1468 |
skip_bits(&s->gb, 1); //marker |
1469 |
} |
1470 |
|
1471 |
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
1472 |
av_log(s->avctx, AV_LOG_DEBUG, "pde (%d,%d) (%d,%d) (%d,%d)\n",
|
1473 |
s1->pan_scan.position[0][0], s1->pan_scan.position[0][1], |
1474 |
s1->pan_scan.position[1][0], s1->pan_scan.position[1][1], |
1475 |
s1->pan_scan.position[2][0], s1->pan_scan.position[2][1] |
1476 |
); |
1477 |
} |
1478 |
|
1479 |
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra){ |
1480 |
int i;
|
1481 |
|
1482 |
for(i=0; i<64; i++) { |
1483 |
int j = s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
|
1484 |
int v = get_bits(&s->gb, 8); |
1485 |
if(v==0){ |
1486 |
av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
|
1487 |
return -1; |
1488 |
} |
1489 |
if(intra && i==0 && v!=8){ |
1490 |
av_log(s->avctx, AV_LOG_ERROR, "intra matrix invalid, ignoring\n");
|
1491 |
v= 8; // needed by pink.mpg / issue1046 |
1492 |
} |
1493 |
matrix0[j] = v; |
1494 |
if(matrix1)
|
1495 |
matrix1[j] = v; |
1496 |
} |
1497 |
return 0; |
1498 |
} |
1499 |
|
1500 |
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s) |
1501 |
{ |
1502 |
av_dlog(s->avctx, "matrix extension\n");
|
1503 |
|
1504 |
if(get_bits1(&s->gb)) load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1); |
1505 |
if(get_bits1(&s->gb)) load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0); |
1506 |
if(get_bits1(&s->gb)) load_matrix(s, s->chroma_intra_matrix, NULL , 1); |
1507 |
if(get_bits1(&s->gb)) load_matrix(s, s->chroma_inter_matrix, NULL , 0); |
1508 |
} |
1509 |
|
1510 |
static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1) |
1511 |
{ |
1512 |
MpegEncContext *s= &s1->mpeg_enc_ctx; |
1513 |
|
1514 |
s->full_pel[0] = s->full_pel[1] = 0; |
1515 |
s->mpeg_f_code[0][0] = get_bits(&s->gb, 4); |
1516 |
s->mpeg_f_code[0][1] = get_bits(&s->gb, 4); |
1517 |
s->mpeg_f_code[1][0] = get_bits(&s->gb, 4); |
1518 |
s->mpeg_f_code[1][1] = get_bits(&s->gb, 4); |
1519 |
if(!s->pict_type && s1->mpeg_enc_ctx_allocated){
|
1520 |
av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code, guessing missing values\n");
|
1521 |
if(s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1]==15){ |
1522 |
if(s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15) |
1523 |
s->pict_type= FF_I_TYPE; |
1524 |
else
|
1525 |
s->pict_type= FF_P_TYPE; |
1526 |
}else
|
1527 |
s->pict_type= FF_B_TYPE; |
1528 |
s->current_picture.pict_type= s->pict_type; |
1529 |
s->current_picture.key_frame= s->pict_type == FF_I_TYPE; |
1530 |
} |
1531 |
s->intra_dc_precision = get_bits(&s->gb, 2);
|
1532 |
s->picture_structure = get_bits(&s->gb, 2);
|
1533 |
s->top_field_first = get_bits1(&s->gb); |
1534 |
s->frame_pred_frame_dct = get_bits1(&s->gb); |
1535 |
s->concealment_motion_vectors = get_bits1(&s->gb); |
1536 |
s->q_scale_type = get_bits1(&s->gb); |
1537 |
s->intra_vlc_format = get_bits1(&s->gb); |
1538 |
s->alternate_scan = get_bits1(&s->gb); |
1539 |
s->repeat_first_field = get_bits1(&s->gb); |
1540 |
s->chroma_420_type = get_bits1(&s->gb); |
1541 |
s->progressive_frame = get_bits1(&s->gb); |
1542 |
|
1543 |
if(s->progressive_sequence && !s->progressive_frame){
|
1544 |
s->progressive_frame= 1;
|
1545 |
av_log(s->avctx, AV_LOG_ERROR, "interlaced frame in progressive sequence, ignoring\n");
|
1546 |
} |
1547 |
|
1548 |
if(s->picture_structure==0 || (s->progressive_frame && s->picture_structure!=PICT_FRAME)){ |
1549 |
av_log(s->avctx, AV_LOG_ERROR, "picture_structure %d invalid, ignoring\n", s->picture_structure);
|
1550 |
s->picture_structure= PICT_FRAME; |
1551 |
} |
1552 |
|
1553 |
if(s->progressive_sequence && !s->frame_pred_frame_dct){
|
1554 |
av_log(s->avctx, AV_LOG_ERROR, "invalid frame_pred_frame_dct\n");
|
1555 |
s->frame_pred_frame_dct= 1;
|
1556 |
} |
1557 |
|
1558 |
if(s->picture_structure == PICT_FRAME){
|
1559 |
s->first_field=0;
|
1560 |
s->v_edge_pos= 16*s->mb_height;
|
1561 |
}else{
|
1562 |
s->first_field ^= 1;
|
1563 |
s->v_edge_pos= 8*s->mb_height;
|
1564 |
memset(s->mbskip_table, 0, s->mb_stride*s->mb_height);
|
1565 |
} |
1566 |
|
1567 |
if(s->alternate_scan){
|
1568 |
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); |
1569 |
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan); |
1570 |
}else{
|
1571 |
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); |
1572 |
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); |
1573 |
} |
1574 |
|
1575 |
/* composite display not parsed */
|
1576 |
av_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
|
1577 |
av_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
|
1578 |
av_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
|
1579 |
av_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
|
1580 |
av_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
|
1581 |
av_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
|
1582 |
av_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
|
1583 |
av_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
|
1584 |
av_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
|
1585 |
} |
1586 |
|
1587 |
static void exchange_uv(MpegEncContext *s){ |
1588 |
DCTELEM (*tmp)[64];
|
1589 |
|
1590 |
tmp = s->pblocks[4];
|
1591 |
s->pblocks[4] = s->pblocks[5]; |
1592 |
s->pblocks[5] = tmp;
|
1593 |
} |
1594 |
|
1595 |
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size){ |
1596 |
AVCodecContext *avctx= s->avctx; |
1597 |
Mpeg1Context *s1 = (Mpeg1Context*)s; |
1598 |
|
1599 |
/* start frame decoding */
|
1600 |
if(s->first_field || s->picture_structure==PICT_FRAME){
|
1601 |
if(MPV_frame_start(s, avctx) < 0) |
1602 |
return -1; |
1603 |
|
1604 |
ff_er_frame_start(s); |
1605 |
|
1606 |
/* first check if we must repeat the frame */
|
1607 |
s->current_picture_ptr->repeat_pict = 0;
|
1608 |
if (s->repeat_first_field) {
|
1609 |
if (s->progressive_sequence) {
|
1610 |
if (s->top_field_first)
|
1611 |
s->current_picture_ptr->repeat_pict = 4;
|
1612 |
else
|
1613 |
s->current_picture_ptr->repeat_pict = 2;
|
1614 |
} else if (s->progressive_frame) { |
1615 |
s->current_picture_ptr->repeat_pict = 1;
|
1616 |
} |
1617 |
} |
1618 |
|
1619 |
*s->current_picture_ptr->pan_scan= s1->pan_scan; |
1620 |
|
1621 |
if (HAVE_PTHREADS && avctx->active_thread_type&FF_THREAD_FRAME)
|
1622 |
ff_thread_finish_setup(avctx); |
1623 |
}else{ //second field |
1624 |
int i;
|
1625 |
|
1626 |
if(!s->current_picture_ptr){
|
1627 |
av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
|
1628 |
return -1; |
1629 |
} |
1630 |
|
1631 |
for(i=0; i<4; i++){ |
1632 |
s->current_picture.data[i] = s->current_picture_ptr->data[i]; |
1633 |
if(s->picture_structure == PICT_BOTTOM_FIELD){
|
1634 |
s->current_picture.data[i] += s->current_picture_ptr->linesize[i]; |
1635 |
} |
1636 |
} |
1637 |
} |
1638 |
|
1639 |
if (avctx->hwaccel) {
|
1640 |
if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0) |
1641 |
return -1; |
1642 |
} |
1643 |
|
1644 |
// MPV_frame_start will call this function too,
|
1645 |
// but we need to call it on every field
|
1646 |
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
|
1647 |
if(ff_xvmc_field_start(s,avctx) < 0) |
1648 |
return -1; |
1649 |
|
1650 |
return 0; |
1651 |
} |
1652 |
|
1653 |
#define DECODE_SLICE_ERROR -1 |
1654 |
#define DECODE_SLICE_OK 0 |
1655 |
|
1656 |
/**
|
1657 |
* decodes a slice. MpegEncContext.mb_y must be set to the MB row from the startcode
|
1658 |
* @return DECODE_SLICE_ERROR if the slice is damaged<br>
|
1659 |
* DECODE_SLICE_OK if this slice is ok<br>
|
1660 |
*/
|
1661 |
static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y, |
1662 |
const uint8_t **buf, int buf_size) |
1663 |
{ |
1664 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1665 |
AVCodecContext *avctx= s->avctx; |
1666 |
const int field_pic= s->picture_structure != PICT_FRAME; |
1667 |
const int lowres= s->avctx->lowres; |
1668 |
|
1669 |
s->resync_mb_x= |
1670 |
s->resync_mb_y= -1;
|
1671 |
|
1672 |
assert(mb_y < s->mb_height); |
1673 |
|
1674 |
init_get_bits(&s->gb, *buf, buf_size*8);
|
1675 |
|
1676 |
ff_mpeg1_clean_buffers(s); |
1677 |
s->interlaced_dct = 0;
|
1678 |
|
1679 |
s->qscale = get_qscale(s); |
1680 |
|
1681 |
if(s->qscale == 0){ |
1682 |
av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
|
1683 |
return -1; |
1684 |
} |
1685 |
|
1686 |
/* extra slice info */
|
1687 |
while (get_bits1(&s->gb) != 0) { |
1688 |
skip_bits(&s->gb, 8);
|
1689 |
} |
1690 |
|
1691 |
s->mb_x=0;
|
1692 |
|
1693 |
if(mb_y==0 && s->codec_tag == AV_RL32("SLIF")){ |
1694 |
skip_bits1(&s->gb); |
1695 |
}else{
|
1696 |
for(;;) {
|
1697 |
int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2); |
1698 |
if (code < 0){ |
1699 |
av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
|
1700 |
return -1; |
1701 |
} |
1702 |
if (code >= 33) { |
1703 |
if (code == 33) { |
1704 |
s->mb_x += 33;
|
1705 |
} |
1706 |
/* otherwise, stuffing, nothing to do */
|
1707 |
} else {
|
1708 |
s->mb_x += code; |
1709 |
break;
|
1710 |
} |
1711 |
} |
1712 |
} |
1713 |
|
1714 |
if(s->mb_x >= (unsigned)s->mb_width){ |
1715 |
av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
|
1716 |
return -1; |
1717 |
} |
1718 |
|
1719 |
if (avctx->hwaccel) {
|
1720 |
const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */ |
1721 |
int start_code = -1; |
1722 |
buf_end = ff_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
|
1723 |
if (buf_end < *buf + buf_size)
|
1724 |
buf_end -= 4;
|
1725 |
s->mb_y = mb_y; |
1726 |
if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_end - buf_start) < 0) |
1727 |
return DECODE_SLICE_ERROR;
|
1728 |
*buf = buf_end; |
1729 |
return DECODE_SLICE_OK;
|
1730 |
} |
1731 |
|
1732 |
s->resync_mb_x= s->mb_x; |
1733 |
s->resync_mb_y= s->mb_y= mb_y; |
1734 |
s->mb_skip_run= 0;
|
1735 |
ff_init_block_index(s); |
1736 |
|
1737 |
if (s->mb_y==0 && s->mb_x==0 && (s->first_field || s->picture_structure==PICT_FRAME)) { |
1738 |
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
|
1739 |
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
|
1740 |
s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1], |
1741 |
s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")), |
1742 |
s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"", |
1743 |
s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors, |
1744 |
s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :""); |
1745 |
} |
1746 |
} |
1747 |
|
1748 |
for(;;) {
|
1749 |
//If 1, we memcpy blocks in xvmcvideo.
|
1750 |
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration > 1) |
1751 |
ff_xvmc_init_block(s);//set s->block
|
1752 |
|
1753 |
if(mpeg_decode_mb(s, s->block) < 0) |
1754 |
return -1; |
1755 |
|
1756 |
if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs |
1757 |
const int wrap = s->b8_stride; |
1758 |
int xy = s->mb_x*2 + s->mb_y*2*wrap; |
1759 |
int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride); |
1760 |
int motion_x, motion_y, dir, i;
|
1761 |
|
1762 |
for(i=0; i<2; i++){ |
1763 |
for(dir=0; dir<2; dir++){ |
1764 |
if (s->mb_intra || (dir==1 && s->pict_type != FF_B_TYPE)) { |
1765 |
motion_x = motion_y = 0;
|
1766 |
}else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){ |
1767 |
motion_x = s->mv[dir][0][0]; |
1768 |
motion_y = s->mv[dir][0][1]; |
1769 |
} else /*if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8))*/ { |
1770 |
motion_x = s->mv[dir][i][0];
|
1771 |
motion_y = s->mv[dir][i][1];
|
1772 |
} |
1773 |
|
1774 |
s->current_picture.motion_val[dir][xy ][0] = motion_x;
|
1775 |
s->current_picture.motion_val[dir][xy ][1] = motion_y;
|
1776 |
s->current_picture.motion_val[dir][xy + 1][0] = motion_x; |
1777 |
s->current_picture.motion_val[dir][xy + 1][1] = motion_y; |
1778 |
s->current_picture.ref_index [dir][b8_xy ]= |
1779 |
s->current_picture.ref_index [dir][b8_xy + 1]= s->field_select[dir][i];
|
1780 |
assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1); |
1781 |
} |
1782 |
xy += wrap; |
1783 |
b8_xy +=2;
|
1784 |
} |
1785 |
} |
1786 |
|
1787 |
s->dest[0] += 16 >> lowres; |
1788 |
s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift; |
1789 |
s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift; |
1790 |
|
1791 |
MPV_decode_mb(s, s->block); |
1792 |
|
1793 |
if (++s->mb_x >= s->mb_width) {
|
1794 |
const int mb_size= 16>>s->avctx->lowres; |
1795 |
|
1796 |
ff_draw_horiz_band(s, mb_size*(s->mb_y>>field_pic), mb_size); |
1797 |
MPV_report_decode_progress(s); |
1798 |
|
1799 |
s->mb_x = 0;
|
1800 |
s->mb_y += 1<<field_pic;
|
1801 |
|
1802 |
if(s->mb_y >= s->mb_height){
|
1803 |
int left= get_bits_left(&s->gb);
|
1804 |
int is_d10= s->chroma_format==2 && s->pict_type==FF_I_TYPE && avctx->profile==0 && avctx->level==5 |
1805 |
&& s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0 |
1806 |
&& s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/; |
1807 |
|
1808 |
if(left < 0 || (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) |
1809 |
|| (avctx->error_recognition >= FF_ER_AGGRESSIVE && left>8)){
|
1810 |
av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X\n", left, show_bits(&s->gb, FFMIN(left, 23))); |
1811 |
return -1; |
1812 |
}else
|
1813 |
goto eos;
|
1814 |
} |
1815 |
|
1816 |
ff_init_block_index(s); |
1817 |
} |
1818 |
|
1819 |
/* skip mb handling */
|
1820 |
if (s->mb_skip_run == -1) { |
1821 |
/* read increment again */
|
1822 |
s->mb_skip_run = 0;
|
1823 |
for(;;) {
|
1824 |
int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2); |
1825 |
if (code < 0){ |
1826 |
av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
|
1827 |
return -1; |
1828 |
} |
1829 |
if (code >= 33) { |
1830 |
if (code == 33) { |
1831 |
s->mb_skip_run += 33;
|
1832 |
}else if(code == 35){ |
1833 |
if(s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0){ |
1834 |
av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
|
1835 |
return -1; |
1836 |
} |
1837 |
goto eos; /* end of slice */ |
1838 |
} |
1839 |
/* otherwise, stuffing, nothing to do */
|
1840 |
} else {
|
1841 |
s->mb_skip_run += code; |
1842 |
break;
|
1843 |
} |
1844 |
} |
1845 |
if(s->mb_skip_run){
|
1846 |
int i;
|
1847 |
if(s->pict_type == FF_I_TYPE){
|
1848 |
av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y);
|
1849 |
return -1; |
1850 |
} |
1851 |
|
1852 |
/* skip mb */
|
1853 |
s->mb_intra = 0;
|
1854 |
for(i=0;i<12;i++) |
1855 |
s->block_last_index[i] = -1;
|
1856 |
if(s->picture_structure == PICT_FRAME)
|
1857 |
s->mv_type = MV_TYPE_16X16; |
1858 |
else
|
1859 |
s->mv_type = MV_TYPE_FIELD; |
1860 |
if (s->pict_type == FF_P_TYPE) {
|
1861 |
/* if P type, zero motion vector is implied */
|
1862 |
s->mv_dir = MV_DIR_FORWARD; |
1863 |
s->mv[0][0][0] = s->mv[0][0][1] = 0; |
1864 |
s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0; |
1865 |
s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0; |
1866 |
s->field_select[0][0]= (s->picture_structure - 1) & 1; |
1867 |
} else {
|
1868 |
/* if B type, reuse previous vectors and directions */
|
1869 |
s->mv[0][0][0] = s->last_mv[0][0][0]; |
1870 |
s->mv[0][0][1] = s->last_mv[0][0][1]; |
1871 |
s->mv[1][0][0] = s->last_mv[1][0][0]; |
1872 |
s->mv[1][0][1] = s->last_mv[1][0][1]; |
1873 |
} |
1874 |
} |
1875 |
} |
1876 |
} |
1877 |
eos: // end of slice |
1878 |
*buf += (get_bits_count(&s->gb)-1)/8; |
1879 |
//printf("y %d %d %d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
|
1880 |
return 0; |
1881 |
} |
1882 |
|
1883 |
static int slice_decode_thread(AVCodecContext *c, void *arg){ |
1884 |
MpegEncContext *s= *(void**)arg;
|
1885 |
const uint8_t *buf= s->gb.buffer;
|
1886 |
int mb_y= s->start_mb_y;
|
1887 |
const int field_pic= s->picture_structure != PICT_FRAME; |
1888 |
|
1889 |
s->error_count= (3*(s->end_mb_y - s->start_mb_y)*s->mb_width) >> field_pic;
|
1890 |
|
1891 |
for(;;){
|
1892 |
uint32_t start_code; |
1893 |
int ret;
|
1894 |
|
1895 |
ret= mpeg_decode_slice((Mpeg1Context*)s, mb_y, &buf, s->gb.buffer_end - buf); |
1896 |
emms_c(); |
1897 |
//av_log(c, AV_LOG_DEBUG, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
|
1898 |
//ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, s->start_mb_y, s->end_mb_y, s->error_count);
|
1899 |
if(ret < 0){ |
1900 |
if(s->resync_mb_x>=0 && s->resync_mb_y>=0) |
1901 |
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); |
1902 |
}else{
|
1903 |
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
|
1904 |
} |
1905 |
|
1906 |
if(s->mb_y == s->end_mb_y)
|
1907 |
return 0; |
1908 |
|
1909 |
start_code= -1;
|
1910 |
buf = ff_find_start_code(buf, s->gb.buffer_end, &start_code); |
1911 |
mb_y= start_code - SLICE_MIN_START_CODE; |
1912 |
if(mb_y < 0 || mb_y >= s->end_mb_y) |
1913 |
return -1; |
1914 |
} |
1915 |
|
1916 |
return 0; //not reached |
1917 |
} |
1918 |
|
1919 |
/**
|
1920 |
* Handle slice ends.
|
1921 |
* @return 1 if it seems to be the last slice
|
1922 |
*/
|
1923 |
static int slice_end(AVCodecContext *avctx, AVFrame *pict) |
1924 |
{ |
1925 |
Mpeg1Context *s1 = avctx->priv_data; |
1926 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1927 |
|
1928 |
if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
|
1929 |
return 0; |
1930 |
|
1931 |
if (s->avctx->hwaccel) {
|
1932 |
if (s->avctx->hwaccel->end_frame(s->avctx) < 0) |
1933 |
av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode picture\n");
|
1934 |
} |
1935 |
|
1936 |
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
|
1937 |
ff_xvmc_field_end(s); |
1938 |
|
1939 |
/* end of slice reached */
|
1940 |
if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) { |
1941 |
/* end of image */
|
1942 |
|
1943 |
s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2; |
1944 |
|
1945 |
ff_er_frame_end(s); |
1946 |
|
1947 |
MPV_frame_end(s); |
1948 |
|
1949 |
if (s->pict_type == FF_B_TYPE || s->low_delay) {
|
1950 |
*pict= *(AVFrame*)s->current_picture_ptr; |
1951 |
ff_print_debug_info(s, pict); |
1952 |
} else {
|
1953 |
if (avctx->active_thread_type&FF_THREAD_FRAME)
|
1954 |
s->picture_number++; |
1955 |
/* latency of 1 frame for I- and P-frames */
|
1956 |
/* XXX: use another variable than picture_number */
|
1957 |
if (s->last_picture_ptr != NULL) { |
1958 |
*pict= *(AVFrame*)s->last_picture_ptr; |
1959 |
ff_print_debug_info(s, pict); |
1960 |
} |
1961 |
} |
1962 |
|
1963 |
return 1; |
1964 |
} else {
|
1965 |
return 0; |
1966 |
} |
1967 |
} |
1968 |
|
1969 |
static int mpeg1_decode_sequence(AVCodecContext *avctx, |
1970 |
const uint8_t *buf, int buf_size) |
1971 |
{ |
1972 |
Mpeg1Context *s1 = avctx->priv_data; |
1973 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1974 |
int width,height;
|
1975 |
int i, v, j;
|
1976 |
|
1977 |
init_get_bits(&s->gb, buf, buf_size*8);
|
1978 |
|
1979 |
width = get_bits(&s->gb, 12);
|
1980 |
height = get_bits(&s->gb, 12);
|
1981 |
if (width <= 0 || height <= 0) |
1982 |
return -1; |
1983 |
s->aspect_ratio_info= get_bits(&s->gb, 4);
|
1984 |
if (s->aspect_ratio_info == 0) { |
1985 |
av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
|
1986 |
if (avctx->error_recognition >= FF_ER_COMPLIANT)
|
1987 |
return -1; |
1988 |
} |
1989 |
s->frame_rate_index = get_bits(&s->gb, 4);
|
1990 |
if (s->frame_rate_index == 0 || s->frame_rate_index > 13) |
1991 |
return -1; |
1992 |
s->bit_rate = get_bits(&s->gb, 18) * 400; |
1993 |
if (get_bits1(&s->gb) == 0) /* marker */ |
1994 |
return -1; |
1995 |
s->width = width; |
1996 |
s->height = height; |
1997 |
|
1998 |
s->avctx->rc_buffer_size= get_bits(&s->gb, 10) * 1024*16; |
1999 |
skip_bits(&s->gb, 1);
|
2000 |
|
2001 |
/* get matrix */
|
2002 |
if (get_bits1(&s->gb)) {
|
2003 |
load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
|
2004 |
} else {
|
2005 |
for(i=0;i<64;i++) { |
2006 |
j = s->dsp.idct_permutation[i]; |
2007 |
v = ff_mpeg1_default_intra_matrix[i]; |
2008 |
s->intra_matrix[j] = v; |
2009 |
s->chroma_intra_matrix[j] = v; |
2010 |
} |
2011 |
} |
2012 |
if (get_bits1(&s->gb)) {
|
2013 |
load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
|
2014 |
} else {
|
2015 |
for(i=0;i<64;i++) { |
2016 |
int j= s->dsp.idct_permutation[i];
|
2017 |
v = ff_mpeg1_default_non_intra_matrix[i]; |
2018 |
s->inter_matrix[j] = v; |
2019 |
s->chroma_inter_matrix[j] = v; |
2020 |
} |
2021 |
} |
2022 |
|
2023 |
if(show_bits(&s->gb, 23) != 0){ |
2024 |
av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
|
2025 |
return -1; |
2026 |
} |
2027 |
|
2028 |
/* we set MPEG-2 parameters so that it emulates MPEG-1 */
|
2029 |
s->progressive_sequence = 1;
|
2030 |
s->progressive_frame = 1;
|
2031 |
s->picture_structure = PICT_FRAME; |
2032 |
s->frame_pred_frame_dct = 1;
|
2033 |
s->chroma_format = 1;
|
2034 |
s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG1VIDEO; |
2035 |
avctx->sub_id = 1; /* indicates MPEG-1 */ |
2036 |
s->out_format = FMT_MPEG1; |
2037 |
s->swap_uv = 0;//AFAIK VCR2 does not have SEQ_HEADER |
2038 |
if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1; |
2039 |
|
2040 |
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
2041 |
av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d\n",
|
2042 |
s->avctx->rc_buffer_size, s->bit_rate); |
2043 |
|
2044 |
return 0; |
2045 |
} |
2046 |
|
2047 |
static int vcr2_init_sequence(AVCodecContext *avctx) |
2048 |
{ |
2049 |
Mpeg1Context *s1 = avctx->priv_data; |
2050 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
2051 |
int i, v;
|
2052 |
|
2053 |
/* start new MPEG-1 context decoding */
|
2054 |
s->out_format = FMT_MPEG1; |
2055 |
if (s1->mpeg_enc_ctx_allocated) {
|
2056 |
MPV_common_end(s); |
2057 |
} |
2058 |
s->width = avctx->coded_width; |
2059 |
s->height = avctx->coded_height; |
2060 |
avctx->has_b_frames= 0; //true? |
2061 |
s->low_delay= 1;
|
2062 |
|
2063 |
avctx->pix_fmt = mpeg_get_pixelformat(avctx); |
2064 |
avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); |
2065 |
|
2066 |
if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT || avctx->hwaccel ||
|
2067 |
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ) |
2068 |
if( avctx->idct_algo == FF_IDCT_AUTO )
|
2069 |
avctx->idct_algo = FF_IDCT_SIMPLE; |
2070 |
|
2071 |
if (MPV_common_init(s) < 0) |
2072 |
return -1; |
2073 |
exchange_uv(s);//common init reset pblocks, so we swap them here
|
2074 |
s->swap_uv = 1;// in case of xvmc we need to swap uv for each MB |
2075 |
s1->mpeg_enc_ctx_allocated = 1;
|
2076 |
|
2077 |
for(i=0;i<64;i++) { |
2078 |
int j= s->dsp.idct_permutation[i];
|
2079 |
v = ff_mpeg1_default_intra_matrix[i]; |
2080 |
s->intra_matrix[j] = v; |
2081 |
s->chroma_intra_matrix[j] = v; |
2082 |
|
2083 |
v = ff_mpeg1_default_non_intra_matrix[i]; |
2084 |
s->inter_matrix[j] = v; |
2085 |
s->chroma_inter_matrix[j] = v; |
2086 |
} |
2087 |
|
2088 |
s->progressive_sequence = 1;
|
2089 |
s->progressive_frame = 1;
|
2090 |
s->picture_structure = PICT_FRAME; |
2091 |
s->frame_pred_frame_dct = 1;
|
2092 |
s->chroma_format = 1;
|
2093 |
s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO; |
2094 |
avctx->sub_id = 2; /* indicates MPEG-2 */ |
2095 |
s1->save_width = s->width; |
2096 |
s1->save_height = s->height; |
2097 |
s1->save_progressive_seq = s->progressive_sequence; |
2098 |
return 0; |
2099 |
} |
2100 |
|
2101 |
|
2102 |
static void mpeg_decode_user_data(AVCodecContext *avctx, |
2103 |
const uint8_t *p, int buf_size) |
2104 |
{ |
2105 |
const uint8_t *buf_end = p+buf_size;
|
2106 |
|
2107 |
/* we parse the DTG active format information */
|
2108 |
if (buf_end - p >= 5 && |
2109 |
p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') { |
2110 |
int flags = p[4]; |
2111 |
p += 5;
|
2112 |
if (flags & 0x80) { |
2113 |
/* skip event id */
|
2114 |
p += 2;
|
2115 |
} |
2116 |
if (flags & 0x40) { |
2117 |
if (buf_end - p < 1) |
2118 |
return;
|
2119 |
avctx->dtg_active_format = p[0] & 0x0f; |
2120 |
} |
2121 |
} |
2122 |
} |
2123 |
|
2124 |
static void mpeg_decode_gop(AVCodecContext *avctx, |
2125 |
const uint8_t *buf, int buf_size){ |
2126 |
Mpeg1Context *s1 = avctx->priv_data; |
2127 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
2128 |
|
2129 |
int drop_frame_flag;
|
2130 |
int time_code_hours, time_code_minutes;
|
2131 |
int time_code_seconds, time_code_pictures;
|
2132 |
int broken_link;
|
2133 |
|
2134 |
init_get_bits(&s->gb, buf, buf_size*8);
|
2135 |
|
2136 |
drop_frame_flag = get_bits1(&s->gb); |
2137 |
|
2138 |
time_code_hours=get_bits(&s->gb,5);
|
2139 |
time_code_minutes = get_bits(&s->gb,6);
|
2140 |
skip_bits1(&s->gb);//marker bit
|
2141 |
time_code_seconds = get_bits(&s->gb,6);
|
2142 |
time_code_pictures = get_bits(&s->gb,6);
|
2143 |
|
2144 |
s->closed_gop = get_bits1(&s->gb); |
2145 |
/*broken_link indicate that after editing the
|
2146 |
reference frames of the first B-Frames after GOP I-Frame
|
2147 |
are missing (open gop)*/
|
2148 |
broken_link = get_bits1(&s->gb); |
2149 |
|
2150 |
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
2151 |
av_log(s->avctx, AV_LOG_DEBUG, "GOP (%2d:%02d:%02d.[%02d]) closed_gop=%d broken_link=%d\n",
|
2152 |
time_code_hours, time_code_minutes, time_code_seconds, |
2153 |
time_code_pictures, s->closed_gop, broken_link); |
2154 |
} |
2155 |
/**
|
2156 |
* Find the end of the current frame in the bitstream.
|
2157 |
* @return the position of the first byte of the next frame, or -1
|
2158 |
*/
|
2159 |
int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s) |
2160 |
{ |
2161 |
int i;
|
2162 |
uint32_t state= pc->state; |
2163 |
|
2164 |
/* EOF considered as end of frame */
|
2165 |
if (buf_size == 0) |
2166 |
return 0; |
2167 |
|
2168 |
/*
|
2169 |
0 frame start -> 1/4
|
2170 |
1 first_SEQEXT -> 0/2
|
2171 |
2 first field start -> 3/0
|
2172 |
3 second_SEQEXT -> 2/0
|
2173 |
4 searching end
|
2174 |
*/
|
2175 |
|
2176 |
for(i=0; i<buf_size; i++){ |
2177 |
assert(pc->frame_start_found>=0 && pc->frame_start_found<=4); |
2178 |
if(pc->frame_start_found&1){ |
2179 |
if(state == EXT_START_CODE && (buf[i]&0xF0) != 0x80) |
2180 |
pc->frame_start_found--; |
2181 |
else if(state == EXT_START_CODE+2){ |
2182 |
if((buf[i]&3) == 3) pc->frame_start_found= 0; |
2183 |
else pc->frame_start_found= (pc->frame_start_found+1)&3; |
2184 |
} |
2185 |
state++; |
2186 |
}else{
|
2187 |
i= ff_find_start_code(buf+i, buf+buf_size, &state) - buf - 1;
|
2188 |
if(pc->frame_start_found==0 && state >= SLICE_MIN_START_CODE && state <= SLICE_MAX_START_CODE){ |
2189 |
i++; |
2190 |
pc->frame_start_found=4;
|
2191 |
} |
2192 |
if(state == SEQ_END_CODE){
|
2193 |
pc->state=-1;
|
2194 |
return i+1; |
2195 |
} |
2196 |
if(pc->frame_start_found==2 && state == SEQ_START_CODE) |
2197 |
pc->frame_start_found= 0;
|
2198 |
if(pc->frame_start_found<4 && state == EXT_START_CODE) |
2199 |
pc->frame_start_found++; |
2200 |
if(pc->frame_start_found == 4 && (state&0xFFFFFF00) == 0x100){ |
2201 |
if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
|
2202 |
pc->frame_start_found=0;
|
2203 |
pc->state=-1;
|
2204 |
return i-3; |
2205 |
} |
2206 |
} |
2207 |
if(pc->frame_start_found == 0 && s && state == PICTURE_START_CODE){ |
2208 |
ff_fetch_timestamp(s, i-3, 1); |
2209 |
} |
2210 |
} |
2211 |
} |
2212 |
pc->state= state; |
2213 |
return END_NOT_FOUND;
|
2214 |
} |
2215 |
|
2216 |
static int decode_chunks(AVCodecContext *avctx, |
2217 |
AVFrame *picture, int *data_size,
|
2218 |
const uint8_t *buf, int buf_size); |
2219 |
|
2220 |
/* handle buffering and image synchronisation */
|
2221 |
static int mpeg_decode_frame(AVCodecContext *avctx, |
2222 |
void *data, int *data_size, |
2223 |
AVPacket *avpkt) |
2224 |
{ |
2225 |
const uint8_t *buf = avpkt->data;
|
2226 |
int buf_size = avpkt->size;
|
2227 |
Mpeg1Context *s = avctx->priv_data; |
2228 |
AVFrame *picture = data; |
2229 |
MpegEncContext *s2 = &s->mpeg_enc_ctx; |
2230 |
av_dlog(avctx, "fill_buffer\n");
|
2231 |
|
2232 |
if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) { |
2233 |
/* special case for last picture */
|
2234 |
if (s2->low_delay==0 && s2->next_picture_ptr) { |
2235 |
*picture= *(AVFrame*)s2->next_picture_ptr; |
2236 |
s2->next_picture_ptr= NULL;
|
2237 |
|
2238 |
*data_size = sizeof(AVFrame);
|
2239 |
} |
2240 |
return buf_size;
|
2241 |
} |
2242 |
|
2243 |
if(s2->flags&CODEC_FLAG_TRUNCATED){
|
2244 |
int next= ff_mpeg1_find_frame_end(&s2->parse_context, buf, buf_size, NULL); |
2245 |
|
2246 |
if( ff_combine_frame(&s2->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0 ) |
2247 |
return buf_size;
|
2248 |
} |
2249 |
|
2250 |
#if 0
|
2251 |
if (s->repeat_field % 2 == 1) {
|
2252 |
s->repeat_field++;
|
2253 |
//fprintf(stderr,"\nRepeating last frame: %d -> %d! pict: %d %d", avctx->frame_number-1, avctx->frame_number,
|
2254 |
// s2->picture_number, s->repeat_field);
|
2255 |
if (avctx->flags & CODEC_FLAG_REPEAT_FIELD) {
|
2256 |
*data_size = sizeof(AVPicture);
|
2257 |
goto the_end;
|
2258 |
}
|
2259 |
}
|
2260 |
#endif
|
2261 |
|
2262 |
if(s->mpeg_enc_ctx_allocated==0 && avctx->codec_tag == AV_RL32("VCR2")) |
2263 |
vcr2_init_sequence(avctx); |
2264 |
|
2265 |
s->slice_count= 0;
|
2266 |
|
2267 |
if(avctx->extradata && !avctx->frame_number)
|
2268 |
decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size); |
2269 |
|
2270 |
return decode_chunks(avctx, picture, data_size, buf, buf_size);
|
2271 |
} |
2272 |
|
2273 |
static int decode_chunks(AVCodecContext *avctx, |
2274 |
AVFrame *picture, int *data_size,
|
2275 |
const uint8_t *buf, int buf_size) |
2276 |
{ |
2277 |
Mpeg1Context *s = avctx->priv_data; |
2278 |
MpegEncContext *s2 = &s->mpeg_enc_ctx; |
2279 |
const uint8_t *buf_ptr = buf;
|
2280 |
const uint8_t *buf_end = buf + buf_size;
|
2281 |
int ret, input_size;
|
2282 |
int last_code= 0; |
2283 |
|
2284 |
for(;;) {
|
2285 |
/* find next start code */
|
2286 |
uint32_t start_code = -1;
|
2287 |
buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code); |
2288 |
if (start_code > 0x1ff){ |
2289 |
if(s2->pict_type != FF_B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){
|
2290 |
if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){
|
2291 |
int i;
|
2292 |
|
2293 |
avctx->execute(avctx, slice_decode_thread, &s2->thread_context[0], NULL, s->slice_count, sizeof(void*)); |
2294 |
for(i=0; i<s->slice_count; i++) |
2295 |
s2->error_count += s2->thread_context[i]->error_count; |
2296 |
} |
2297 |
|
2298 |
if (CONFIG_MPEG_VDPAU_DECODER && avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
|
2299 |
ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count); |
2300 |
|
2301 |
if (slice_end(avctx, picture)) {
|
2302 |
if(s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice |
2303 |
*data_size = sizeof(AVPicture);
|
2304 |
} |
2305 |
} |
2306 |
s2->pict_type= 0;
|
2307 |
return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index); |
2308 |
} |
2309 |
|
2310 |
input_size = buf_end - buf_ptr; |
2311 |
|
2312 |
if(avctx->debug & FF_DEBUG_STARTCODE){
|
2313 |
av_log(avctx, AV_LOG_DEBUG, "%3X at %td left %d\n", start_code, buf_ptr-buf, input_size);
|
2314 |
} |
2315 |
|
2316 |
/* prepare data for next start code */
|
2317 |
switch(start_code) {
|
2318 |
case SEQ_START_CODE:
|
2319 |
if(last_code == 0){ |
2320 |
mpeg1_decode_sequence(avctx, buf_ptr, |
2321 |
input_size); |
2322 |
s->sync=1;
|
2323 |
}else{
|
2324 |
av_log(avctx, AV_LOG_ERROR, "ignoring SEQ_START_CODE after %X\n", last_code);
|
2325 |
} |
2326 |
break;
|
2327 |
|
2328 |
case PICTURE_START_CODE:
|
2329 |
if(last_code == 0 || last_code == SLICE_MIN_START_CODE){ |
2330 |
if(mpeg_decode_postinit(avctx) < 0){ |
2331 |
av_log(avctx, AV_LOG_ERROR, "mpeg_decode_postinit() failure\n");
|
2332 |
return -1; |
2333 |
} |
2334 |
|
2335 |
/* we have a complete image: we try to decompress it */
|
2336 |
if(mpeg1_decode_picture(avctx,
|
2337 |
buf_ptr, input_size) < 0)
|
2338 |
s2->pict_type=0;
|
2339 |
s2->first_slice = 1;
|
2340 |
last_code= PICTURE_START_CODE; |
2341 |
}else{
|
2342 |
av_log(avctx, AV_LOG_ERROR, "ignoring pic after %X\n", last_code);
|
2343 |
} |
2344 |
break;
|
2345 |
case EXT_START_CODE:
|
2346 |
init_get_bits(&s2->gb, buf_ptr, input_size*8);
|
2347 |
|
2348 |
switch(get_bits(&s2->gb, 4)) { |
2349 |
case 0x1: |
2350 |
if(last_code == 0){ |
2351 |
mpeg_decode_sequence_extension(s); |
2352 |
}else{
|
2353 |
av_log(avctx, AV_LOG_ERROR, "ignoring seq ext after %X\n", last_code);
|
2354 |
} |
2355 |
break;
|
2356 |
case 0x2: |
2357 |
mpeg_decode_sequence_display_extension(s); |
2358 |
break;
|
2359 |
case 0x3: |
2360 |
mpeg_decode_quant_matrix_extension(s2); |
2361 |
break;
|
2362 |
case 0x7: |
2363 |
mpeg_decode_picture_display_extension(s); |
2364 |
break;
|
2365 |
case 0x8: |
2366 |
if(last_code == PICTURE_START_CODE){
|
2367 |
mpeg_decode_picture_coding_extension(s); |
2368 |
}else{
|
2369 |
av_log(avctx, AV_LOG_ERROR, "ignoring pic cod ext after %X\n", last_code);
|
2370 |
} |
2371 |
break;
|
2372 |
} |
2373 |
break;
|
2374 |
case USER_START_CODE:
|
2375 |
mpeg_decode_user_data(avctx, |
2376 |
buf_ptr, input_size); |
2377 |
break;
|
2378 |
case GOP_START_CODE:
|
2379 |
if(last_code == 0){ |
2380 |
s2->first_field=0;
|
2381 |
mpeg_decode_gop(avctx, |
2382 |
buf_ptr, input_size); |
2383 |
s->sync=1;
|
2384 |
}else{
|
2385 |
av_log(avctx, AV_LOG_ERROR, "ignoring GOP_START_CODE after %X\n", last_code);
|
2386 |
} |
2387 |
break;
|
2388 |
default:
|
2389 |
if (start_code >= SLICE_MIN_START_CODE &&
|
2390 |
start_code <= SLICE_MAX_START_CODE && last_code!=0) {
|
2391 |
const int field_pic= s2->picture_structure != PICT_FRAME; |
2392 |
int mb_y= (start_code - SLICE_MIN_START_CODE) << field_pic;
|
2393 |
last_code= SLICE_MIN_START_CODE; |
2394 |
|
2395 |
if(s2->picture_structure == PICT_BOTTOM_FIELD)
|
2396 |
mb_y++; |
2397 |
|
2398 |
if (mb_y >= s2->mb_height){
|
2399 |
av_log(s2->avctx, AV_LOG_ERROR, "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
|
2400 |
return -1; |
2401 |
} |
2402 |
|
2403 |
if(s2->last_picture_ptr==NULL){ |
2404 |
/* Skip B-frames if we do not have reference frames and gop is not closed */
|
2405 |
if(s2->pict_type==FF_B_TYPE){
|
2406 |
if(!s2->closed_gop)
|
2407 |
break;
|
2408 |
} |
2409 |
} |
2410 |
if(s2->pict_type==FF_I_TYPE)
|
2411 |
s->sync=1;
|
2412 |
if(s2->next_picture_ptr==NULL){ |
2413 |
/* Skip P-frames if we do not have a reference frame or we have an invalid header. */
|
2414 |
if(s2->pict_type==FF_P_TYPE && !s->sync) break; |
2415 |
} |
2416 |
/* Skip B-frames if we are in a hurry. */
|
2417 |
if(avctx->hurry_up && s2->pict_type==FF_B_TYPE) break; |
2418 |
if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==FF_B_TYPE)
|
2419 |
||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=FF_I_TYPE) |
2420 |
|| avctx->skip_frame >= AVDISCARD_ALL) |
2421 |
break;
|
2422 |
/* Skip everything if we are in a hurry>=5. */
|
2423 |
if(avctx->hurry_up>=5) break; |
2424 |
|
2425 |
if (!s->mpeg_enc_ctx_allocated) break; |
2426 |
|
2427 |
if(s2->codec_id == CODEC_ID_MPEG2VIDEO){
|
2428 |
if(mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom)
|
2429 |
break;
|
2430 |
} |
2431 |
|
2432 |
if(!s2->pict_type){
|
2433 |
av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
|
2434 |
break;
|
2435 |
} |
2436 |
|
2437 |
if(s2->first_slice){
|
2438 |
s2->first_slice=0;
|
2439 |
if(mpeg_field_start(s2, buf, buf_size) < 0) |
2440 |
return -1; |
2441 |
} |
2442 |
if(!s2->current_picture_ptr){
|
2443 |
av_log(avctx, AV_LOG_ERROR, "current_picture not initialized\n");
|
2444 |
return -1; |
2445 |
} |
2446 |
|
2447 |
if (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
|
2448 |
s->slice_count++; |
2449 |
break;
|
2450 |
} |
2451 |
|
2452 |
if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){
|
2453 |
int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count; |
2454 |
if(threshold <= mb_y){
|
2455 |
MpegEncContext *thread_context= s2->thread_context[s->slice_count]; |
2456 |
|
2457 |
thread_context->start_mb_y= mb_y; |
2458 |
thread_context->end_mb_y = s2->mb_height; |
2459 |
if(s->slice_count){
|
2460 |
s2->thread_context[s->slice_count-1]->end_mb_y= mb_y;
|
2461 |
ff_update_duplicate_context(thread_context, s2); |
2462 |
} |
2463 |
init_get_bits(&thread_context->gb, buf_ptr, input_size*8);
|
2464 |
s->slice_count++; |
2465 |
} |
2466 |
buf_ptr += 2; //FIXME add minimum number of bytes per slice |
2467 |
}else{
|
2468 |
ret = mpeg_decode_slice(s, mb_y, &buf_ptr, input_size); |
2469 |
emms_c(); |
2470 |
|
2471 |
if(ret < 0){ |
2472 |
if(s2->resync_mb_x>=0 && s2->resync_mb_y>=0) |
2473 |
ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); |
2474 |
}else{
|
2475 |
ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, AC_END|DC_END|MV_END);
|
2476 |
} |
2477 |
} |
2478 |
} |
2479 |
break;
|
2480 |
} |
2481 |
} |
2482 |
} |
2483 |
|
2484 |
static void flush(AVCodecContext *avctx){ |
2485 |
Mpeg1Context *s = avctx->priv_data; |
2486 |
|
2487 |
s->sync=0;
|
2488 |
|
2489 |
ff_mpeg_flush(avctx); |
2490 |
} |
2491 |
|
2492 |
static int mpeg_decode_end(AVCodecContext *avctx) |
2493 |
{ |
2494 |
Mpeg1Context *s = avctx->priv_data; |
2495 |
|
2496 |
if (s->mpeg_enc_ctx_allocated)
|
2497 |
MPV_common_end(&s->mpeg_enc_ctx); |
2498 |
return 0; |
2499 |
} |
2500 |
|
2501 |
static const AVProfile mpeg2_video_profiles[] = { |
2502 |
{ FF_PROFILE_MPEG2_422, "4:2:2" },
|
2503 |
{ FF_PROFILE_MPEG2_HIGH, "High" },
|
2504 |
{ FF_PROFILE_MPEG2_SS, "Spatially Scalable" },
|
2505 |
{ FF_PROFILE_MPEG2_SNR_SCALABLE, "SNR Scalable" },
|
2506 |
{ FF_PROFILE_MPEG2_MAIN, "Main" },
|
2507 |
{ FF_PROFILE_MPEG2_SIMPLE, "Simple" },
|
2508 |
{ FF_PROFILE_RESERVED, "Reserved" },
|
2509 |
{ FF_PROFILE_RESERVED, "Reserved" },
|
2510 |
}; |
2511 |
|
2512 |
|
2513 |
AVCodec ff_mpeg1video_decoder = { |
2514 |
"mpeg1video",
|
2515 |
AVMEDIA_TYPE_VIDEO, |
2516 |
CODEC_ID_MPEG1VIDEO, |
2517 |
sizeof(Mpeg1Context),
|
2518 |
mpeg_decode_init, |
2519 |
NULL,
|
2520 |
mpeg_decode_end, |
2521 |
mpeg_decode_frame, |
2522 |
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, |
2523 |
.flush= flush, |
2524 |
.max_lowres= 3,
|
2525 |
.long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),
|
2526 |
.update_thread_context= ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context) |
2527 |
}; |
2528 |
|
2529 |
AVCodec ff_mpeg2video_decoder = { |
2530 |
"mpeg2video",
|
2531 |
AVMEDIA_TYPE_VIDEO, |
2532 |
CODEC_ID_MPEG2VIDEO, |
2533 |
sizeof(Mpeg1Context),
|
2534 |
mpeg_decode_init, |
2535 |
NULL,
|
2536 |
mpeg_decode_end, |
2537 |
mpeg_decode_frame, |
2538 |
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, |
2539 |
.flush= flush, |
2540 |
.max_lowres= 3,
|
2541 |
.long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"),
|
2542 |
.profiles = NULL_IF_CONFIG_SMALL(mpeg2_video_profiles), |
2543 |
}; |
2544 |
|
2545 |
//legacy decoder
|
2546 |
AVCodec ff_mpegvideo_decoder = { |
2547 |
"mpegvideo",
|
2548 |
AVMEDIA_TYPE_VIDEO, |
2549 |
CODEC_ID_MPEG2VIDEO, |
2550 |
sizeof(Mpeg1Context),
|
2551 |
mpeg_decode_init, |
2552 |
NULL,
|
2553 |
mpeg_decode_end, |
2554 |
mpeg_decode_frame, |
2555 |
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, |
2556 |
.flush= flush, |
2557 |
.max_lowres= 3,
|
2558 |
.long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),
|
2559 |
}; |
2560 |
|
2561 |
#if CONFIG_MPEG_XVMC_DECODER
|
2562 |
static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx){ |
2563 |
if( avctx->active_thread_type & FF_THREAD_SLICE )
|
2564 |
return -1; |
2565 |
if( !(avctx->slice_flags & SLICE_FLAG_CODED_ORDER) )
|
2566 |
return -1; |
2567 |
if( !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD) ){
|
2568 |
av_dlog(avctx, "mpeg12.c: XvMC decoder will work better if SLICE_FLAG_ALLOW_FIELD is set\n");
|
2569 |
} |
2570 |
mpeg_decode_init(avctx); |
2571 |
|
2572 |
avctx->pix_fmt = PIX_FMT_XVMC_MPEG2_IDCT; |
2573 |
avctx->xvmc_acceleration = 2;//2 - the blocks are packed! |
2574 |
|
2575 |
return 0; |
2576 |
} |
2577 |
|
2578 |
AVCodec ff_mpeg_xvmc_decoder = { |
2579 |
"mpegvideo_xvmc",
|
2580 |
AVMEDIA_TYPE_VIDEO, |
2581 |
CODEC_ID_MPEG2VIDEO_XVMC, |
2582 |
sizeof(Mpeg1Context),
|
2583 |
mpeg_mc_decode_init, |
2584 |
NULL,
|
2585 |
mpeg_decode_end, |
2586 |
mpeg_decode_frame, |
2587 |
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL | CODEC_CAP_DELAY, |
2588 |
.flush= flush, |
2589 |
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video XvMC (X-Video Motion Compensation)"),
|
2590 |
}; |
2591 |
|
2592 |
#endif
|
2593 |
|
2594 |
#if CONFIG_MPEG_VDPAU_DECODER
|
2595 |
AVCodec ff_mpeg_vdpau_decoder = { |
2596 |
"mpegvideo_vdpau",
|
2597 |
AVMEDIA_TYPE_VIDEO, |
2598 |
CODEC_ID_MPEG2VIDEO, |
2599 |
sizeof(Mpeg1Context),
|
2600 |
mpeg_decode_init, |
2601 |
NULL,
|
2602 |
mpeg_decode_end, |
2603 |
mpeg_decode_frame, |
2604 |
CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, |
2605 |
.flush= flush, |
2606 |
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"),
|
2607 |
}; |
2608 |
#endif
|
2609 |
|
2610 |
#if CONFIG_MPEG1_VDPAU_DECODER
|
2611 |
AVCodec ff_mpeg1_vdpau_decoder = { |
2612 |
"mpeg1video_vdpau",
|
2613 |
AVMEDIA_TYPE_VIDEO, |
2614 |
CODEC_ID_MPEG1VIDEO, |
2615 |
sizeof(Mpeg1Context),
|
2616 |
mpeg_decode_init, |
2617 |
NULL,
|
2618 |
mpeg_decode_end, |
2619 |
mpeg_decode_frame, |
2620 |
CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, |
2621 |
.flush= flush, |
2622 |
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"),
|
2623 |
}; |
2624 |
#endif
|
2625 |
|