ffmpeg / libavcodec / mpeg12.c @ 7f125c3e
History | View | Annotate | Download (92.6 KB)
1 |
/*
|
---|---|
2 |
* MPEG-1/2 decoder
|
3 |
* Copyright (c) 2000,2001 Fabrice Bellard
|
4 |
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
|
5 |
*
|
6 |
* This file is part of FFmpeg.
|
7 |
*
|
8 |
* FFmpeg is free software; you can redistribute it and/or
|
9 |
* modify it under the terms of the GNU Lesser General Public
|
10 |
* License as published by the Free Software Foundation; either
|
11 |
* version 2.1 of the License, or (at your option) any later version.
|
12 |
*
|
13 |
* FFmpeg is distributed in the hope that it will be useful,
|
14 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
16 |
* Lesser General Public License for more details.
|
17 |
*
|
18 |
* You should have received a copy of the GNU Lesser General Public
|
19 |
* License along with FFmpeg; if not, write to the Free Software
|
20 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
21 |
*/
|
22 |
|
23 |
/**
|
24 |
* @file
|
25 |
* MPEG-1/2 decoder
|
26 |
*/
|
27 |
|
28 |
//#define DEBUG
|
29 |
#include "internal.h" |
30 |
#include "avcodec.h" |
31 |
#include "dsputil.h" |
32 |
#include "mpegvideo.h" |
33 |
|
34 |
#include "mpeg12.h" |
35 |
#include "mpeg12data.h" |
36 |
#include "mpeg12decdata.h" |
37 |
#include "bytestream.h" |
38 |
#include "vdpau_internal.h" |
39 |
#include "xvmc_internal.h" |
40 |
#include "thread.h" |
41 |
|
42 |
//#undef NDEBUG
|
43 |
//#include <assert.h>
|
44 |
|
45 |
|
46 |
#define MV_VLC_BITS 9 |
47 |
#define MBINCR_VLC_BITS 9 |
48 |
#define MB_PAT_VLC_BITS 9 |
49 |
#define MB_PTYPE_VLC_BITS 6 |
50 |
#define MB_BTYPE_VLC_BITS 6 |
51 |
|
52 |
static inline int mpeg1_decode_block_intra(MpegEncContext *s, |
53 |
DCTELEM *block, |
54 |
int n);
|
55 |
static inline int mpeg1_decode_block_inter(MpegEncContext *s, |
56 |
DCTELEM *block, |
57 |
int n);
|
58 |
static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n); |
59 |
static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, |
60 |
DCTELEM *block, |
61 |
int n);
|
62 |
static inline int mpeg2_decode_block_intra(MpegEncContext *s, |
63 |
DCTELEM *block, |
64 |
int n);
|
65 |
static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, DCTELEM *block, int n); |
66 |
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n); |
67 |
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred); |
68 |
static void exchange_uv(MpegEncContext *s); |
69 |
|
70 |
uint8_t ff_mpeg12_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3]; |
71 |
|
72 |
|
73 |
#define INIT_2D_VLC_RL(rl, static_size)\
|
74 |
{\ |
75 |
static RL_VLC_ELEM rl_vlc_table[static_size];\
|
76 |
INIT_VLC_STATIC(&rl.vlc, TEX_VLC_BITS, rl.n + 2,\
|
77 |
&rl.table_vlc[0][1], 4, 2,\ |
78 |
&rl.table_vlc[0][0], 4, 2, static_size);\ |
79 |
\ |
80 |
rl.rl_vlc[0]= rl_vlc_table;\
|
81 |
init_2d_vlc_rl(&rl);\ |
82 |
} |
83 |
|
84 |
static void init_2d_vlc_rl(RLTable *rl) |
85 |
{ |
86 |
int i;
|
87 |
|
88 |
for(i=0; i<rl->vlc.table_size; i++){ |
89 |
int code= rl->vlc.table[i][0]; |
90 |
int len = rl->vlc.table[i][1]; |
91 |
int level, run;
|
92 |
|
93 |
if(len==0){ // illegal code |
94 |
run= 65;
|
95 |
level= MAX_LEVEL; |
96 |
}else if(len<0){ //more bits needed |
97 |
run= 0;
|
98 |
level= code; |
99 |
}else{
|
100 |
if(code==rl->n){ //esc |
101 |
run= 65;
|
102 |
level= 0;
|
103 |
}else if(code==rl->n+1){ //eob |
104 |
run= 0;
|
105 |
level= 127;
|
106 |
}else{
|
107 |
run= rl->table_run [code] + 1;
|
108 |
level= rl->table_level[code]; |
109 |
} |
110 |
} |
111 |
rl->rl_vlc[0][i].len= len;
|
112 |
rl->rl_vlc[0][i].level= level;
|
113 |
rl->rl_vlc[0][i].run= run;
|
114 |
} |
115 |
} |
116 |
|
117 |
void ff_mpeg12_common_init(MpegEncContext *s)
|
118 |
{ |
119 |
|
120 |
s->y_dc_scale_table= |
121 |
s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision]; |
122 |
|
123 |
} |
124 |
|
125 |
void ff_mpeg1_clean_buffers(MpegEncContext *s){
|
126 |
s->last_dc[0] = 1 << (7 + s->intra_dc_precision); |
127 |
s->last_dc[1] = s->last_dc[0]; |
128 |
s->last_dc[2] = s->last_dc[0]; |
129 |
memset(s->last_mv, 0, sizeof(s->last_mv)); |
130 |
} |
131 |
|
132 |
|
133 |
/******************************************/
|
134 |
/* decoding */
|
135 |
|
136 |
VLC ff_dc_lum_vlc; |
137 |
VLC ff_dc_chroma_vlc; |
138 |
|
139 |
static VLC mv_vlc;
|
140 |
static VLC mbincr_vlc;
|
141 |
static VLC mb_ptype_vlc;
|
142 |
static VLC mb_btype_vlc;
|
143 |
static VLC mb_pat_vlc;
|
144 |
|
145 |
av_cold void ff_mpeg12_init_vlcs(void) |
146 |
{ |
147 |
static int done = 0; |
148 |
|
149 |
if (!done) {
|
150 |
done = 1;
|
151 |
|
152 |
INIT_VLC_STATIC(&ff_dc_lum_vlc, DC_VLC_BITS, 12,
|
153 |
ff_mpeg12_vlc_dc_lum_bits, 1, 1, |
154 |
ff_mpeg12_vlc_dc_lum_code, 2, 2, 512); |
155 |
INIT_VLC_STATIC(&ff_dc_chroma_vlc, DC_VLC_BITS, 12,
|
156 |
ff_mpeg12_vlc_dc_chroma_bits, 1, 1, |
157 |
ff_mpeg12_vlc_dc_chroma_code, 2, 2, 514); |
158 |
INIT_VLC_STATIC(&mv_vlc, MV_VLC_BITS, 17,
|
159 |
&ff_mpeg12_mbMotionVectorTable[0][1], 2, 1, |
160 |
&ff_mpeg12_mbMotionVectorTable[0][0], 2, 1, 518); |
161 |
INIT_VLC_STATIC(&mbincr_vlc, MBINCR_VLC_BITS, 36,
|
162 |
&ff_mpeg12_mbAddrIncrTable[0][1], 2, 1, |
163 |
&ff_mpeg12_mbAddrIncrTable[0][0], 2, 1, 538); |
164 |
INIT_VLC_STATIC(&mb_pat_vlc, MB_PAT_VLC_BITS, 64,
|
165 |
&ff_mpeg12_mbPatTable[0][1], 2, 1, |
166 |
&ff_mpeg12_mbPatTable[0][0], 2, 1, 512); |
167 |
|
168 |
INIT_VLC_STATIC(&mb_ptype_vlc, MB_PTYPE_VLC_BITS, 7,
|
169 |
&table_mb_ptype[0][1], 2, 1, |
170 |
&table_mb_ptype[0][0], 2, 1, 64); |
171 |
INIT_VLC_STATIC(&mb_btype_vlc, MB_BTYPE_VLC_BITS, 11,
|
172 |
&table_mb_btype[0][1], 2, 1, |
173 |
&table_mb_btype[0][0], 2, 1, 64); |
174 |
init_rl(&ff_rl_mpeg1, ff_mpeg12_static_rl_table_store[0]);
|
175 |
init_rl(&ff_rl_mpeg2, ff_mpeg12_static_rl_table_store[1]);
|
176 |
|
177 |
INIT_2D_VLC_RL(ff_rl_mpeg1, 680);
|
178 |
INIT_2D_VLC_RL(ff_rl_mpeg2, 674);
|
179 |
} |
180 |
} |
181 |
|
182 |
static inline int get_dmv(MpegEncContext *s) |
183 |
{ |
184 |
if(get_bits1(&s->gb))
|
185 |
return 1 - (get_bits1(&s->gb) << 1); |
186 |
else
|
187 |
return 0; |
188 |
} |
189 |
|
190 |
static inline int get_qscale(MpegEncContext *s) |
191 |
{ |
192 |
int qscale = get_bits(&s->gb, 5); |
193 |
if (s->q_scale_type) {
|
194 |
return non_linear_qscale[qscale];
|
195 |
} else {
|
196 |
return qscale << 1; |
197 |
} |
198 |
} |
199 |
|
200 |
/* motion type (for MPEG-2) */
|
201 |
#define MT_FIELD 1 |
202 |
#define MT_FRAME 2 |
203 |
#define MT_16X8 2 |
204 |
#define MT_DMV 3 |
205 |
|
206 |
static int mpeg_decode_mb(MpegEncContext *s, |
207 |
DCTELEM block[12][64]) |
208 |
{ |
209 |
int i, j, k, cbp, val, mb_type, motion_type;
|
210 |
const int mb_block_count = 4 + (1<< s->chroma_format); |
211 |
|
212 |
av_dlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
|
213 |
|
214 |
assert(s->mb_skipped==0);
|
215 |
|
216 |
if (s->mb_skip_run-- != 0) { |
217 |
if (s->pict_type == FF_P_TYPE) {
|
218 |
s->mb_skipped = 1;
|
219 |
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; |
220 |
} else {
|
221 |
int mb_type;
|
222 |
|
223 |
if(s->mb_x)
|
224 |
mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
|
225 |
else
|
226 |
mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all |
227 |
if(IS_INTRA(mb_type))
|
228 |
return -1; |
229 |
|
230 |
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= |
231 |
mb_type | MB_TYPE_SKIP; |
232 |
// assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
|
233 |
|
234 |
if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0) |
235 |
s->mb_skipped = 1;
|
236 |
} |
237 |
|
238 |
return 0; |
239 |
} |
240 |
|
241 |
switch(s->pict_type) {
|
242 |
default:
|
243 |
case FF_I_TYPE:
|
244 |
if (get_bits1(&s->gb) == 0) { |
245 |
if (get_bits1(&s->gb) == 0){ |
246 |
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y);
|
247 |
return -1; |
248 |
} |
249 |
mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA; |
250 |
} else {
|
251 |
mb_type = MB_TYPE_INTRA; |
252 |
} |
253 |
break;
|
254 |
case FF_P_TYPE:
|
255 |
mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
|
256 |
if (mb_type < 0){ |
257 |
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y);
|
258 |
return -1; |
259 |
} |
260 |
mb_type = ptype2mb_type[ mb_type ]; |
261 |
break;
|
262 |
case FF_B_TYPE:
|
263 |
mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
|
264 |
if (mb_type < 0){ |
265 |
av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y);
|
266 |
return -1; |
267 |
} |
268 |
mb_type = btype2mb_type[ mb_type ]; |
269 |
break;
|
270 |
} |
271 |
av_dlog(s->avctx, "mb_type=%x\n", mb_type);
|
272 |
// motion_type = 0; /* avoid warning */
|
273 |
if (IS_INTRA(mb_type)) {
|
274 |
s->dsp.clear_blocks(s->block[0]);
|
275 |
|
276 |
if(!s->chroma_y_shift){
|
277 |
s->dsp.clear_blocks(s->block[6]);
|
278 |
} |
279 |
|
280 |
/* compute DCT type */
|
281 |
if (s->picture_structure == PICT_FRAME && //FIXME add an interlaced_dct coded var? |
282 |
!s->frame_pred_frame_dct) { |
283 |
s->interlaced_dct = get_bits1(&s->gb); |
284 |
} |
285 |
|
286 |
if (IS_QUANT(mb_type))
|
287 |
s->qscale = get_qscale(s); |
288 |
|
289 |
if (s->concealment_motion_vectors) {
|
290 |
/* just parse them */
|
291 |
if (s->picture_structure != PICT_FRAME)
|
292 |
skip_bits1(&s->gb); /* field select */
|
293 |
|
294 |
s->mv[0][0][0]= s->last_mv[0][0][0]= s->last_mv[0][1][0] = |
295 |
mpeg_decode_motion(s, s->mpeg_f_code[0][0], s->last_mv[0][0][0]); |
296 |
s->mv[0][0][1]= s->last_mv[0][0][1]= s->last_mv[0][1][1] = |
297 |
mpeg_decode_motion(s, s->mpeg_f_code[0][1], s->last_mv[0][0][1]); |
298 |
|
299 |
skip_bits1(&s->gb); /* marker */
|
300 |
}else
|
301 |
memset(s->last_mv, 0, sizeof(s->last_mv)); /* reset mv prediction */ |
302 |
s->mb_intra = 1;
|
303 |
//if 1, we memcpy blocks in xvmcvideo
|
304 |
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration > 1){ |
305 |
ff_xvmc_pack_pblocks(s,-1);//inter are always full blocks |
306 |
if(s->swap_uv){
|
307 |
exchange_uv(s); |
308 |
} |
309 |
} |
310 |
|
311 |
if (s->codec_id == CODEC_ID_MPEG2VIDEO) {
|
312 |
if(s->flags2 & CODEC_FLAG2_FAST){
|
313 |
for(i=0;i<6;i++) { |
314 |
mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i); |
315 |
} |
316 |
}else{
|
317 |
for(i=0;i<mb_block_count;i++) { |
318 |
if (mpeg2_decode_block_intra(s, *s->pblocks[i], i) < 0) |
319 |
return -1; |
320 |
} |
321 |
} |
322 |
} else {
|
323 |
for(i=0;i<6;i++) { |
324 |
if (mpeg1_decode_block_intra(s, *s->pblocks[i], i) < 0) |
325 |
return -1; |
326 |
} |
327 |
} |
328 |
} else {
|
329 |
if (mb_type & MB_TYPE_ZERO_MV){
|
330 |
assert(mb_type & MB_TYPE_CBP); |
331 |
|
332 |
s->mv_dir = MV_DIR_FORWARD; |
333 |
if(s->picture_structure == PICT_FRAME){
|
334 |
if(!s->frame_pred_frame_dct)
|
335 |
s->interlaced_dct = get_bits1(&s->gb); |
336 |
s->mv_type = MV_TYPE_16X16; |
337 |
}else{
|
338 |
s->mv_type = MV_TYPE_FIELD; |
339 |
mb_type |= MB_TYPE_INTERLACED; |
340 |
s->field_select[0][0]= s->picture_structure - 1; |
341 |
} |
342 |
|
343 |
if (IS_QUANT(mb_type))
|
344 |
s->qscale = get_qscale(s); |
345 |
|
346 |
s->last_mv[0][0][0] = 0; |
347 |
s->last_mv[0][0][1] = 0; |
348 |
s->last_mv[0][1][0] = 0; |
349 |
s->last_mv[0][1][1] = 0; |
350 |
s->mv[0][0][0] = 0; |
351 |
s->mv[0][0][1] = 0; |
352 |
}else{
|
353 |
assert(mb_type & MB_TYPE_L0L1); |
354 |
//FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
|
355 |
/* get additional motion vector type */
|
356 |
if (s->frame_pred_frame_dct)
|
357 |
motion_type = MT_FRAME; |
358 |
else{
|
359 |
motion_type = get_bits(&s->gb, 2);
|
360 |
if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
|
361 |
s->interlaced_dct = get_bits1(&s->gb); |
362 |
} |
363 |
|
364 |
if (IS_QUANT(mb_type))
|
365 |
s->qscale = get_qscale(s); |
366 |
|
367 |
/* motion vectors */
|
368 |
s->mv_dir= (mb_type>>13)&3; |
369 |
av_dlog(s->avctx, "motion_type=%d\n", motion_type);
|
370 |
switch(motion_type) {
|
371 |
case MT_FRAME: /* or MT_16X8 */ |
372 |
if (s->picture_structure == PICT_FRAME) {
|
373 |
mb_type |= MB_TYPE_16x16; |
374 |
s->mv_type = MV_TYPE_16X16; |
375 |
for(i=0;i<2;i++) { |
376 |
if (USES_LIST(mb_type, i)) {
|
377 |
/* MT_FRAME */
|
378 |
s->mv[i][0][0]= s->last_mv[i][0][0]= s->last_mv[i][1][0] = |
379 |
mpeg_decode_motion(s, s->mpeg_f_code[i][0], s->last_mv[i][0][0]); |
380 |
s->mv[i][0][1]= s->last_mv[i][0][1]= s->last_mv[i][1][1] = |
381 |
mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][0][1]); |
382 |
/* full_pel: only for MPEG-1 */
|
383 |
if (s->full_pel[i]){
|
384 |
s->mv[i][0][0] <<= 1; |
385 |
s->mv[i][0][1] <<= 1; |
386 |
} |
387 |
} |
388 |
} |
389 |
} else {
|
390 |
mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; |
391 |
s->mv_type = MV_TYPE_16X8; |
392 |
for(i=0;i<2;i++) { |
393 |
if (USES_LIST(mb_type, i)) {
|
394 |
/* MT_16X8 */
|
395 |
for(j=0;j<2;j++) { |
396 |
s->field_select[i][j] = get_bits1(&s->gb); |
397 |
for(k=0;k<2;k++) { |
398 |
val = mpeg_decode_motion(s, s->mpeg_f_code[i][k], |
399 |
s->last_mv[i][j][k]); |
400 |
s->last_mv[i][j][k] = val; |
401 |
s->mv[i][j][k] = val; |
402 |
} |
403 |
} |
404 |
} |
405 |
} |
406 |
} |
407 |
break;
|
408 |
case MT_FIELD:
|
409 |
if(s->progressive_sequence){
|
410 |
av_log(s->avctx, AV_LOG_ERROR, "MT_FIELD in progressive_sequence\n");
|
411 |
return -1; |
412 |
} |
413 |
s->mv_type = MV_TYPE_FIELD; |
414 |
if (s->picture_structure == PICT_FRAME) {
|
415 |
mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; |
416 |
for(i=0;i<2;i++) { |
417 |
if (USES_LIST(mb_type, i)) {
|
418 |
for(j=0;j<2;j++) { |
419 |
s->field_select[i][j] = get_bits1(&s->gb); |
420 |
val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
|
421 |
s->last_mv[i][j][0]);
|
422 |
s->last_mv[i][j][0] = val;
|
423 |
s->mv[i][j][0] = val;
|
424 |
av_dlog(s->avctx, "fmx=%d\n", val);
|
425 |
val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
|
426 |
s->last_mv[i][j][1] >> 1); |
427 |
s->last_mv[i][j][1] = val << 1; |
428 |
s->mv[i][j][1] = val;
|
429 |
av_dlog(s->avctx, "fmy=%d\n", val);
|
430 |
} |
431 |
} |
432 |
} |
433 |
} else {
|
434 |
mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; |
435 |
for(i=0;i<2;i++) { |
436 |
if (USES_LIST(mb_type, i)) {
|
437 |
s->field_select[i][0] = get_bits1(&s->gb);
|
438 |
for(k=0;k<2;k++) { |
439 |
val = mpeg_decode_motion(s, s->mpeg_f_code[i][k], |
440 |
s->last_mv[i][0][k]);
|
441 |
s->last_mv[i][0][k] = val;
|
442 |
s->last_mv[i][1][k] = val;
|
443 |
s->mv[i][0][k] = val;
|
444 |
} |
445 |
} |
446 |
} |
447 |
} |
448 |
break;
|
449 |
case MT_DMV:
|
450 |
if(s->progressive_sequence){
|
451 |
av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
|
452 |
return -1; |
453 |
} |
454 |
s->mv_type = MV_TYPE_DMV; |
455 |
for(i=0;i<2;i++) { |
456 |
if (USES_LIST(mb_type, i)) {
|
457 |
int dmx, dmy, mx, my, m;
|
458 |
const int my_shift= s->picture_structure == PICT_FRAME; |
459 |
|
460 |
mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
|
461 |
s->last_mv[i][0][0]); |
462 |
s->last_mv[i][0][0] = mx; |
463 |
s->last_mv[i][1][0] = mx; |
464 |
dmx = get_dmv(s); |
465 |
my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
|
466 |
s->last_mv[i][0][1] >> my_shift); |
467 |
dmy = get_dmv(s); |
468 |
|
469 |
|
470 |
s->last_mv[i][0][1] = my<<my_shift; |
471 |
s->last_mv[i][1][1] = my<<my_shift; |
472 |
|
473 |
s->mv[i][0][0] = mx; |
474 |
s->mv[i][0][1] = my; |
475 |
s->mv[i][1][0] = mx;//not used |
476 |
s->mv[i][1][1] = my;//not used |
477 |
|
478 |
if (s->picture_structure == PICT_FRAME) {
|
479 |
mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED; |
480 |
|
481 |
//m = 1 + 2 * s->top_field_first;
|
482 |
m = s->top_field_first ? 1 : 3; |
483 |
|
484 |
/* top -> top pred */
|
485 |
s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx; |
486 |
s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1; |
487 |
m = 4 - m;
|
488 |
s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx; |
489 |
s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1; |
490 |
} else {
|
491 |
mb_type |= MB_TYPE_16x16; |
492 |
|
493 |
s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx; |
494 |
s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy; |
495 |
if(s->picture_structure == PICT_TOP_FIELD)
|
496 |
s->mv[i][2][1]--; |
497 |
else
|
498 |
s->mv[i][2][1]++; |
499 |
} |
500 |
} |
501 |
} |
502 |
break;
|
503 |
default:
|
504 |
av_log(s->avctx, AV_LOG_ERROR, "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
|
505 |
return -1; |
506 |
} |
507 |
} |
508 |
|
509 |
s->mb_intra = 0;
|
510 |
if (HAS_CBP(mb_type)) {
|
511 |
s->dsp.clear_blocks(s->block[0]);
|
512 |
|
513 |
cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
|
514 |
if(mb_block_count > 6){ |
515 |
cbp<<= mb_block_count-6;
|
516 |
cbp |= get_bits(&s->gb, mb_block_count-6);
|
517 |
s->dsp.clear_blocks(s->block[6]);
|
518 |
} |
519 |
if (cbp <= 0){ |
520 |
av_log(s->avctx, AV_LOG_ERROR, "invalid cbp at %d %d\n", s->mb_x, s->mb_y);
|
521 |
return -1; |
522 |
} |
523 |
|
524 |
//if 1, we memcpy blocks in xvmcvideo
|
525 |
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration > 1){ |
526 |
ff_xvmc_pack_pblocks(s,cbp); |
527 |
if(s->swap_uv){
|
528 |
exchange_uv(s); |
529 |
} |
530 |
} |
531 |
|
532 |
if (s->codec_id == CODEC_ID_MPEG2VIDEO) {
|
533 |
if(s->flags2 & CODEC_FLAG2_FAST){
|
534 |
for(i=0;i<6;i++) { |
535 |
if(cbp & 32) { |
536 |
mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i); |
537 |
} else {
|
538 |
s->block_last_index[i] = -1;
|
539 |
} |
540 |
cbp+=cbp; |
541 |
} |
542 |
}else{
|
543 |
cbp<<= 12-mb_block_count;
|
544 |
|
545 |
for(i=0;i<mb_block_count;i++) { |
546 |
if ( cbp & (1<<11) ) { |
547 |
if (mpeg2_decode_block_non_intra(s, *s->pblocks[i], i) < 0) |
548 |
return -1; |
549 |
} else {
|
550 |
s->block_last_index[i] = -1;
|
551 |
} |
552 |
cbp+=cbp; |
553 |
} |
554 |
} |
555 |
} else {
|
556 |
if(s->flags2 & CODEC_FLAG2_FAST){
|
557 |
for(i=0;i<6;i++) { |
558 |
if (cbp & 32) { |
559 |
mpeg1_fast_decode_block_inter(s, *s->pblocks[i], i); |
560 |
} else {
|
561 |
s->block_last_index[i] = -1;
|
562 |
} |
563 |
cbp+=cbp; |
564 |
} |
565 |
}else{
|
566 |
for(i=0;i<6;i++) { |
567 |
if (cbp & 32) { |
568 |
if (mpeg1_decode_block_inter(s, *s->pblocks[i], i) < 0) |
569 |
return -1; |
570 |
} else {
|
571 |
s->block_last_index[i] = -1;
|
572 |
} |
573 |
cbp+=cbp; |
574 |
} |
575 |
} |
576 |
} |
577 |
}else{
|
578 |
for(i=0;i<12;i++) |
579 |
s->block_last_index[i] = -1;
|
580 |
} |
581 |
} |
582 |
|
583 |
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type; |
584 |
|
585 |
return 0; |
586 |
} |
587 |
|
588 |
/* as H.263, but only 17 codes */
|
589 |
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred) |
590 |
{ |
591 |
int code, sign, val, l, shift;
|
592 |
|
593 |
code = get_vlc2(&s->gb, mv_vlc.table, MV_VLC_BITS, 2);
|
594 |
if (code == 0) { |
595 |
return pred;
|
596 |
} |
597 |
if (code < 0) { |
598 |
return 0xffff; |
599 |
} |
600 |
|
601 |
sign = get_bits1(&s->gb); |
602 |
shift = fcode - 1;
|
603 |
val = code; |
604 |
if (shift) {
|
605 |
val = (val - 1) << shift;
|
606 |
val |= get_bits(&s->gb, shift); |
607 |
val++; |
608 |
} |
609 |
if (sign)
|
610 |
val = -val; |
611 |
val += pred; |
612 |
|
613 |
/* modulo decoding */
|
614 |
l= INT_BIT - 5 - shift;
|
615 |
val = (val<<l)>>l; |
616 |
return val;
|
617 |
} |
618 |
|
619 |
static inline int mpeg1_decode_block_intra(MpegEncContext *s, |
620 |
DCTELEM *block, |
621 |
int n)
|
622 |
{ |
623 |
int level, dc, diff, i, j, run;
|
624 |
int component;
|
625 |
RLTable *rl = &ff_rl_mpeg1; |
626 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
627 |
const uint16_t *quant_matrix= s->intra_matrix;
|
628 |
const int qscale= s->qscale; |
629 |
|
630 |
/* DC coefficient */
|
631 |
component = (n <= 3 ? 0 : n - 4 + 1); |
632 |
diff = decode_dc(&s->gb, component); |
633 |
if (diff >= 0xffff) |
634 |
return -1; |
635 |
dc = s->last_dc[component]; |
636 |
dc += diff; |
637 |
s->last_dc[component] = dc; |
638 |
block[0] = dc*quant_matrix[0]; |
639 |
av_dlog(s->avctx, "dc=%d diff=%d\n", dc, diff);
|
640 |
i = 0;
|
641 |
{ |
642 |
OPEN_READER(re, &s->gb); |
643 |
/* now quantify & encode AC coefficients */
|
644 |
for(;;) {
|
645 |
UPDATE_CACHE(re, &s->gb); |
646 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
647 |
|
648 |
if(level == 127){ |
649 |
break;
|
650 |
} else if(level != 0) { |
651 |
i += run; |
652 |
j = scantable[i]; |
653 |
level= (level*qscale*quant_matrix[j])>>4;
|
654 |
level= (level-1)|1; |
655 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
656 |
LAST_SKIP_BITS(re, &s->gb, 1);
|
657 |
} else {
|
658 |
/* escape */
|
659 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
660 |
UPDATE_CACHE(re, &s->gb); |
661 |
level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8); |
662 |
if (level == -128) { |
663 |
level = SHOW_UBITS(re, &s->gb, 8) - 256; LAST_SKIP_BITS(re, &s->gb, 8); |
664 |
} else if (level == 0) { |
665 |
level = SHOW_UBITS(re, &s->gb, 8) ; LAST_SKIP_BITS(re, &s->gb, 8); |
666 |
} |
667 |
i += run; |
668 |
j = scantable[i]; |
669 |
if(level<0){ |
670 |
level= -level; |
671 |
level= (level*qscale*quant_matrix[j])>>4;
|
672 |
level= (level-1)|1; |
673 |
level= -level; |
674 |
}else{
|
675 |
level= (level*qscale*quant_matrix[j])>>4;
|
676 |
level= (level-1)|1; |
677 |
} |
678 |
} |
679 |
if (i > 63){ |
680 |
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
681 |
return -1; |
682 |
} |
683 |
|
684 |
block[j] = level; |
685 |
} |
686 |
CLOSE_READER(re, &s->gb); |
687 |
} |
688 |
s->block_last_index[n] = i; |
689 |
return 0; |
690 |
} |
691 |
|
692 |
int ff_mpeg1_decode_block_intra(MpegEncContext *s,
|
693 |
DCTELEM *block, |
694 |
int n)
|
695 |
{ |
696 |
return mpeg1_decode_block_intra(s, block, n);
|
697 |
} |
698 |
|
699 |
static inline int mpeg1_decode_block_inter(MpegEncContext *s, |
700 |
DCTELEM *block, |
701 |
int n)
|
702 |
{ |
703 |
int level, i, j, run;
|
704 |
RLTable *rl = &ff_rl_mpeg1; |
705 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
706 |
const uint16_t *quant_matrix= s->inter_matrix;
|
707 |
const int qscale= s->qscale; |
708 |
|
709 |
{ |
710 |
OPEN_READER(re, &s->gb); |
711 |
i = -1;
|
712 |
// special case for first coefficient, no need to add second VLC table
|
713 |
UPDATE_CACHE(re, &s->gb); |
714 |
if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { |
715 |
level= (3*qscale*quant_matrix[0])>>5; |
716 |
level= (level-1)|1; |
717 |
if(GET_CACHE(re, &s->gb)&0x40000000) |
718 |
level= -level; |
719 |
block[0] = level;
|
720 |
i++; |
721 |
SKIP_BITS(re, &s->gb, 2);
|
722 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
723 |
goto end;
|
724 |
} |
725 |
/* now quantify & encode AC coefficients */
|
726 |
for(;;) {
|
727 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
728 |
|
729 |
if(level != 0) { |
730 |
i += run; |
731 |
j = scantable[i]; |
732 |
level= ((level*2+1)*qscale*quant_matrix[j])>>5; |
733 |
level= (level-1)|1; |
734 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
735 |
SKIP_BITS(re, &s->gb, 1);
|
736 |
} else {
|
737 |
/* escape */
|
738 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
739 |
UPDATE_CACHE(re, &s->gb); |
740 |
level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8); |
741 |
if (level == -128) { |
742 |
level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8); |
743 |
} else if (level == 0) { |
744 |
level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8); |
745 |
} |
746 |
i += run; |
747 |
j = scantable[i]; |
748 |
if(level<0){ |
749 |
level= -level; |
750 |
level= ((level*2+1)*qscale*quant_matrix[j])>>5; |
751 |
level= (level-1)|1; |
752 |
level= -level; |
753 |
}else{
|
754 |
level= ((level*2+1)*qscale*quant_matrix[j])>>5; |
755 |
level= (level-1)|1; |
756 |
} |
757 |
} |
758 |
if (i > 63){ |
759 |
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
760 |
return -1; |
761 |
} |
762 |
|
763 |
block[j] = level; |
764 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
765 |
break;
|
766 |
UPDATE_CACHE(re, &s->gb); |
767 |
} |
768 |
end:
|
769 |
LAST_SKIP_BITS(re, &s->gb, 2);
|
770 |
CLOSE_READER(re, &s->gb); |
771 |
} |
772 |
s->block_last_index[n] = i; |
773 |
return 0; |
774 |
} |
775 |
|
776 |
static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *block, int n) |
777 |
{ |
778 |
int level, i, j, run;
|
779 |
RLTable *rl = &ff_rl_mpeg1; |
780 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
781 |
const int qscale= s->qscale; |
782 |
|
783 |
{ |
784 |
OPEN_READER(re, &s->gb); |
785 |
i = -1;
|
786 |
// special case for first coefficient, no need to add second VLC table
|
787 |
UPDATE_CACHE(re, &s->gb); |
788 |
if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { |
789 |
level= (3*qscale)>>1; |
790 |
level= (level-1)|1; |
791 |
if(GET_CACHE(re, &s->gb)&0x40000000) |
792 |
level= -level; |
793 |
block[0] = level;
|
794 |
i++; |
795 |
SKIP_BITS(re, &s->gb, 2);
|
796 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
797 |
goto end;
|
798 |
} |
799 |
|
800 |
/* now quantify & encode AC coefficients */
|
801 |
for(;;) {
|
802 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
803 |
|
804 |
if(level != 0) { |
805 |
i += run; |
806 |
j = scantable[i]; |
807 |
level= ((level*2+1)*qscale)>>1; |
808 |
level= (level-1)|1; |
809 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
810 |
SKIP_BITS(re, &s->gb, 1);
|
811 |
} else {
|
812 |
/* escape */
|
813 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
814 |
UPDATE_CACHE(re, &s->gb); |
815 |
level = SHOW_SBITS(re, &s->gb, 8); SKIP_BITS(re, &s->gb, 8); |
816 |
if (level == -128) { |
817 |
level = SHOW_UBITS(re, &s->gb, 8) - 256; SKIP_BITS(re, &s->gb, 8); |
818 |
} else if (level == 0) { |
819 |
level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8); |
820 |
} |
821 |
i += run; |
822 |
j = scantable[i]; |
823 |
if(level<0){ |
824 |
level= -level; |
825 |
level= ((level*2+1)*qscale)>>1; |
826 |
level= (level-1)|1; |
827 |
level= -level; |
828 |
}else{
|
829 |
level= ((level*2+1)*qscale)>>1; |
830 |
level= (level-1)|1; |
831 |
} |
832 |
} |
833 |
|
834 |
block[j] = level; |
835 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
836 |
break;
|
837 |
UPDATE_CACHE(re, &s->gb); |
838 |
} |
839 |
end:
|
840 |
LAST_SKIP_BITS(re, &s->gb, 2);
|
841 |
CLOSE_READER(re, &s->gb); |
842 |
} |
843 |
s->block_last_index[n] = i; |
844 |
return 0; |
845 |
} |
846 |
|
847 |
|
848 |
static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, |
849 |
DCTELEM *block, |
850 |
int n)
|
851 |
{ |
852 |
int level, i, j, run;
|
853 |
RLTable *rl = &ff_rl_mpeg1; |
854 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
855 |
const uint16_t *quant_matrix;
|
856 |
const int qscale= s->qscale; |
857 |
int mismatch;
|
858 |
|
859 |
mismatch = 1;
|
860 |
|
861 |
{ |
862 |
OPEN_READER(re, &s->gb); |
863 |
i = -1;
|
864 |
if (n < 4) |
865 |
quant_matrix = s->inter_matrix; |
866 |
else
|
867 |
quant_matrix = s->chroma_inter_matrix; |
868 |
|
869 |
// special case for first coefficient, no need to add second VLC table
|
870 |
UPDATE_CACHE(re, &s->gb); |
871 |
if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { |
872 |
level= (3*qscale*quant_matrix[0])>>5; |
873 |
if(GET_CACHE(re, &s->gb)&0x40000000) |
874 |
level= -level; |
875 |
block[0] = level;
|
876 |
mismatch ^= level; |
877 |
i++; |
878 |
SKIP_BITS(re, &s->gb, 2);
|
879 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
880 |
goto end;
|
881 |
} |
882 |
|
883 |
/* now quantify & encode AC coefficients */
|
884 |
for(;;) {
|
885 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
886 |
|
887 |
if(level != 0) { |
888 |
i += run; |
889 |
j = scantable[i]; |
890 |
level= ((level*2+1)*qscale*quant_matrix[j])>>5; |
891 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
892 |
SKIP_BITS(re, &s->gb, 1);
|
893 |
} else {
|
894 |
/* escape */
|
895 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
896 |
UPDATE_CACHE(re, &s->gb); |
897 |
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); |
898 |
|
899 |
i += run; |
900 |
j = scantable[i]; |
901 |
if(level<0){ |
902 |
level= ((-level*2+1)*qscale*quant_matrix[j])>>5; |
903 |
level= -level; |
904 |
}else{
|
905 |
level= ((level*2+1)*qscale*quant_matrix[j])>>5; |
906 |
} |
907 |
} |
908 |
if (i > 63){ |
909 |
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
910 |
return -1; |
911 |
} |
912 |
|
913 |
mismatch ^= level; |
914 |
block[j] = level; |
915 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
916 |
break;
|
917 |
UPDATE_CACHE(re, &s->gb); |
918 |
} |
919 |
end:
|
920 |
LAST_SKIP_BITS(re, &s->gb, 2);
|
921 |
CLOSE_READER(re, &s->gb); |
922 |
} |
923 |
block[63] ^= (mismatch & 1); |
924 |
|
925 |
s->block_last_index[n] = i; |
926 |
return 0; |
927 |
} |
928 |
|
929 |
static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, |
930 |
DCTELEM *block, |
931 |
int n)
|
932 |
{ |
933 |
int level, i, j, run;
|
934 |
RLTable *rl = &ff_rl_mpeg1; |
935 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
936 |
const int qscale= s->qscale; |
937 |
OPEN_READER(re, &s->gb); |
938 |
i = -1;
|
939 |
|
940 |
// special case for first coefficient, no need to add second VLC table
|
941 |
UPDATE_CACHE(re, &s->gb); |
942 |
if (((int32_t)GET_CACHE(re, &s->gb)) < 0) { |
943 |
level= (3*qscale)>>1; |
944 |
if(GET_CACHE(re, &s->gb)&0x40000000) |
945 |
level= -level; |
946 |
block[0] = level;
|
947 |
i++; |
948 |
SKIP_BITS(re, &s->gb, 2);
|
949 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
950 |
goto end;
|
951 |
} |
952 |
|
953 |
/* now quantify & encode AC coefficients */
|
954 |
for(;;) {
|
955 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
956 |
|
957 |
if(level != 0) { |
958 |
i += run; |
959 |
j = scantable[i]; |
960 |
level= ((level*2+1)*qscale)>>1; |
961 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
962 |
SKIP_BITS(re, &s->gb, 1);
|
963 |
} else {
|
964 |
/* escape */
|
965 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
966 |
UPDATE_CACHE(re, &s->gb); |
967 |
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); |
968 |
|
969 |
i += run; |
970 |
j = scantable[i]; |
971 |
if(level<0){ |
972 |
level= ((-level*2+1)*qscale)>>1; |
973 |
level= -level; |
974 |
}else{
|
975 |
level= ((level*2+1)*qscale)>>1; |
976 |
} |
977 |
} |
978 |
|
979 |
block[j] = level; |
980 |
if(((int32_t)GET_CACHE(re, &s->gb)) <= (int32_t)0xBFFFFFFF) |
981 |
break;
|
982 |
UPDATE_CACHE(re, &s->gb); |
983 |
} |
984 |
end:
|
985 |
LAST_SKIP_BITS(re, &s->gb, 2);
|
986 |
CLOSE_READER(re, &s->gb); |
987 |
s->block_last_index[n] = i; |
988 |
return 0; |
989 |
} |
990 |
|
991 |
|
992 |
static inline int mpeg2_decode_block_intra(MpegEncContext *s, |
993 |
DCTELEM *block, |
994 |
int n)
|
995 |
{ |
996 |
int level, dc, diff, i, j, run;
|
997 |
int component;
|
998 |
RLTable *rl; |
999 |
uint8_t * const scantable= s->intra_scantable.permutated;
|
1000 |
const uint16_t *quant_matrix;
|
1001 |
const int qscale= s->qscale; |
1002 |
int mismatch;
|
1003 |
|
1004 |
/* DC coefficient */
|
1005 |
if (n < 4){ |
1006 |
quant_matrix = s->intra_matrix; |
1007 |
component = 0;
|
1008 |
}else{
|
1009 |
quant_matrix = s->chroma_intra_matrix; |
1010 |
component = (n&1) + 1; |
1011 |
} |
1012 |
diff = decode_dc(&s->gb, component); |
1013 |
if (diff >= 0xffff) |
1014 |
return -1; |
1015 |
dc = s->last_dc[component]; |
1016 |
dc += diff; |
1017 |
s->last_dc[component] = dc; |
1018 |
block[0] = dc << (3 - s->intra_dc_precision); |
1019 |
av_dlog(s->avctx, "dc=%d\n", block[0]); |
1020 |
mismatch = block[0] ^ 1; |
1021 |
i = 0;
|
1022 |
if (s->intra_vlc_format)
|
1023 |
rl = &ff_rl_mpeg2; |
1024 |
else
|
1025 |
rl = &ff_rl_mpeg1; |
1026 |
|
1027 |
{ |
1028 |
OPEN_READER(re, &s->gb); |
1029 |
/* now quantify & encode AC coefficients */
|
1030 |
for(;;) {
|
1031 |
UPDATE_CACHE(re, &s->gb); |
1032 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
1033 |
|
1034 |
if(level == 127){ |
1035 |
break;
|
1036 |
} else if(level != 0) { |
1037 |
i += run; |
1038 |
j = scantable[i]; |
1039 |
level= (level*qscale*quant_matrix[j])>>4;
|
1040 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
1041 |
LAST_SKIP_BITS(re, &s->gb, 1);
|
1042 |
} else {
|
1043 |
/* escape */
|
1044 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
1045 |
UPDATE_CACHE(re, &s->gb); |
1046 |
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); |
1047 |
i += run; |
1048 |
j = scantable[i]; |
1049 |
if(level<0){ |
1050 |
level= (-level*qscale*quant_matrix[j])>>4;
|
1051 |
level= -level; |
1052 |
}else{
|
1053 |
level= (level*qscale*quant_matrix[j])>>4;
|
1054 |
} |
1055 |
} |
1056 |
if (i > 63){ |
1057 |
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
1058 |
return -1; |
1059 |
} |
1060 |
|
1061 |
mismatch^= level; |
1062 |
block[j] = level; |
1063 |
} |
1064 |
CLOSE_READER(re, &s->gb); |
1065 |
} |
1066 |
block[63]^= mismatch&1; |
1067 |
|
1068 |
s->block_last_index[n] = i; |
1069 |
return 0; |
1070 |
} |
1071 |
|
1072 |
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, |
1073 |
DCTELEM *block, |
1074 |
int n)
|
1075 |
{ |
1076 |
int level, dc, diff, j, run;
|
1077 |
int component;
|
1078 |
RLTable *rl; |
1079 |
uint8_t * scantable= s->intra_scantable.permutated; |
1080 |
const uint16_t *quant_matrix;
|
1081 |
const int qscale= s->qscale; |
1082 |
|
1083 |
/* DC coefficient */
|
1084 |
if (n < 4){ |
1085 |
quant_matrix = s->intra_matrix; |
1086 |
component = 0;
|
1087 |
}else{
|
1088 |
quant_matrix = s->chroma_intra_matrix; |
1089 |
component = (n&1) + 1; |
1090 |
} |
1091 |
diff = decode_dc(&s->gb, component); |
1092 |
if (diff >= 0xffff) |
1093 |
return -1; |
1094 |
dc = s->last_dc[component]; |
1095 |
dc += diff; |
1096 |
s->last_dc[component] = dc; |
1097 |
block[0] = dc << (3 - s->intra_dc_precision); |
1098 |
if (s->intra_vlc_format)
|
1099 |
rl = &ff_rl_mpeg2; |
1100 |
else
|
1101 |
rl = &ff_rl_mpeg1; |
1102 |
|
1103 |
{ |
1104 |
OPEN_READER(re, &s->gb); |
1105 |
/* now quantify & encode AC coefficients */
|
1106 |
for(;;) {
|
1107 |
UPDATE_CACHE(re, &s->gb); |
1108 |
GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); |
1109 |
|
1110 |
if(level == 127){ |
1111 |
break;
|
1112 |
} else if(level != 0) { |
1113 |
scantable += run; |
1114 |
j = *scantable; |
1115 |
level= (level*qscale*quant_matrix[j])>>4;
|
1116 |
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); |
1117 |
LAST_SKIP_BITS(re, &s->gb, 1);
|
1118 |
} else {
|
1119 |
/* escape */
|
1120 |
run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); |
1121 |
UPDATE_CACHE(re, &s->gb); |
1122 |
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12); |
1123 |
scantable += run; |
1124 |
j = *scantable; |
1125 |
if(level<0){ |
1126 |
level= (-level*qscale*quant_matrix[j])>>4;
|
1127 |
level= -level; |
1128 |
}else{
|
1129 |
level= (level*qscale*quant_matrix[j])>>4;
|
1130 |
} |
1131 |
} |
1132 |
|
1133 |
block[j] = level; |
1134 |
} |
1135 |
CLOSE_READER(re, &s->gb); |
1136 |
} |
1137 |
|
1138 |
s->block_last_index[n] = scantable - s->intra_scantable.permutated; |
1139 |
return 0; |
1140 |
} |
1141 |
|
1142 |
typedef struct Mpeg1Context { |
1143 |
MpegEncContext mpeg_enc_ctx; |
1144 |
int mpeg_enc_ctx_allocated; /* true if decoding context allocated */ |
1145 |
int repeat_field; /* true if we must repeat the field */ |
1146 |
AVPanScan pan_scan; /**< some temporary storage for the panscan */
|
1147 |
int slice_count;
|
1148 |
int swap_uv;//indicate VCR2 |
1149 |
int save_aspect_info;
|
1150 |
int save_width, save_height, save_progressive_seq;
|
1151 |
AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator
|
1152 |
int sync; ///< Did we reach a sync point like a GOP/SEQ/KEYFrame? |
1153 |
} Mpeg1Context; |
1154 |
|
1155 |
static av_cold int mpeg_decode_init(AVCodecContext *avctx) |
1156 |
{ |
1157 |
Mpeg1Context *s = avctx->priv_data; |
1158 |
MpegEncContext *s2 = &s->mpeg_enc_ctx; |
1159 |
int i;
|
1160 |
|
1161 |
/* we need some permutation to store matrices,
|
1162 |
* until MPV_common_init() sets the real permutation. */
|
1163 |
for(i=0;i<64;i++) |
1164 |
s2->dsp.idct_permutation[i]=i; |
1165 |
|
1166 |
MPV_decode_defaults(s2); |
1167 |
|
1168 |
s->mpeg_enc_ctx.avctx= avctx; |
1169 |
s->mpeg_enc_ctx.flags= avctx->flags; |
1170 |
s->mpeg_enc_ctx.flags2= avctx->flags2; |
1171 |
ff_mpeg12_common_init(&s->mpeg_enc_ctx); |
1172 |
ff_mpeg12_init_vlcs(); |
1173 |
|
1174 |
s->mpeg_enc_ctx_allocated = 0;
|
1175 |
s->mpeg_enc_ctx.picture_number = 0;
|
1176 |
s->repeat_field = 0;
|
1177 |
s->mpeg_enc_ctx.codec_id= avctx->codec->id; |
1178 |
avctx->color_range= AVCOL_RANGE_MPEG; |
1179 |
if (avctx->codec->id == CODEC_ID_MPEG1VIDEO)
|
1180 |
avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; |
1181 |
else
|
1182 |
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; |
1183 |
return 0; |
1184 |
} |
1185 |
|
1186 |
static int mpeg_decode_update_thread_context(AVCodecContext *avctx, const AVCodecContext *avctx_from) |
1187 |
{ |
1188 |
Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data; |
1189 |
MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx; |
1190 |
int err;
|
1191 |
|
1192 |
if(avctx == avctx_from || !ctx_from->mpeg_enc_ctx_allocated || !s1->context_initialized)
|
1193 |
return 0; |
1194 |
|
1195 |
err = ff_mpeg_update_thread_context(avctx, avctx_from); |
1196 |
if(err) return err; |
1197 |
|
1198 |
if(!ctx->mpeg_enc_ctx_allocated)
|
1199 |
memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext)); |
1200 |
|
1201 |
if(!(s->pict_type == FF_B_TYPE || s->low_delay))
|
1202 |
s->picture_number++; |
1203 |
|
1204 |
return 0; |
1205 |
} |
1206 |
|
1207 |
static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, |
1208 |
const uint8_t *new_perm){
|
1209 |
uint16_t temp_matrix[64];
|
1210 |
int i;
|
1211 |
|
1212 |
memcpy(temp_matrix,matrix,64*sizeof(uint16_t)); |
1213 |
|
1214 |
for(i=0;i<64;i++){ |
1215 |
matrix[new_perm[i]] = temp_matrix[old_perm[i]]; |
1216 |
} |
1217 |
} |
1218 |
|
1219 |
static const enum PixelFormat mpeg1_hwaccel_pixfmt_list_420[] = { |
1220 |
PIX_FMT_XVMC_MPEG2_IDCT, |
1221 |
PIX_FMT_XVMC_MPEG2_MC, |
1222 |
PIX_FMT_VDPAU_MPEG1, |
1223 |
PIX_FMT_DXVA2_VLD, |
1224 |
PIX_FMT_VAAPI_VLD, |
1225 |
PIX_FMT_YUV420P, |
1226 |
PIX_FMT_NONE |
1227 |
}; |
1228 |
|
1229 |
static const enum PixelFormat mpeg2_hwaccel_pixfmt_list_420[] = { |
1230 |
PIX_FMT_XVMC_MPEG2_IDCT, |
1231 |
PIX_FMT_XVMC_MPEG2_MC, |
1232 |
PIX_FMT_VDPAU_MPEG2, |
1233 |
PIX_FMT_DXVA2_VLD, |
1234 |
PIX_FMT_VAAPI_VLD, |
1235 |
PIX_FMT_YUV420P, |
1236 |
PIX_FMT_NONE |
1237 |
}; |
1238 |
|
1239 |
static inline int uses_vdpau(AVCodecContext *avctx) { |
1240 |
return avctx->pix_fmt == PIX_FMT_VDPAU_MPEG1 || avctx->pix_fmt == PIX_FMT_VDPAU_MPEG2;
|
1241 |
} |
1242 |
|
1243 |
static enum PixelFormat mpeg_get_pixelformat(AVCodecContext *avctx){ |
1244 |
Mpeg1Context *s1 = avctx->priv_data; |
1245 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1246 |
|
1247 |
if(s->chroma_format < 2) { |
1248 |
enum PixelFormat res;
|
1249 |
res = avctx->get_format(avctx, |
1250 |
avctx->codec_id == CODEC_ID_MPEG1VIDEO ? |
1251 |
mpeg1_hwaccel_pixfmt_list_420 : |
1252 |
mpeg2_hwaccel_pixfmt_list_420); |
1253 |
if (res != PIX_FMT_XVMC_MPEG2_IDCT && res != PIX_FMT_XVMC_MPEG2_MC) {
|
1254 |
avctx->xvmc_acceleration = 0;
|
1255 |
} else if (!avctx->xvmc_acceleration) { |
1256 |
avctx->xvmc_acceleration = 2;
|
1257 |
} |
1258 |
return res;
|
1259 |
} else if(s->chroma_format == 2) |
1260 |
return PIX_FMT_YUV422P;
|
1261 |
else
|
1262 |
return PIX_FMT_YUV444P;
|
1263 |
} |
1264 |
|
1265 |
/* Call this function when we know all parameters.
|
1266 |
* It may be called in different places for MPEG-1 and MPEG-2. */
|
1267 |
static int mpeg_decode_postinit(AVCodecContext *avctx){ |
1268 |
Mpeg1Context *s1 = avctx->priv_data; |
1269 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1270 |
uint8_t old_permutation[64];
|
1271 |
|
1272 |
if (
|
1273 |
(s1->mpeg_enc_ctx_allocated == 0)||
|
1274 |
avctx->coded_width != s->width || |
1275 |
avctx->coded_height != s->height|| |
1276 |
s1->save_width != s->width || |
1277 |
s1->save_height != s->height || |
1278 |
s1->save_aspect_info != s->aspect_ratio_info|| |
1279 |
s1->save_progressive_seq != s->progressive_sequence || |
1280 |
0)
|
1281 |
{ |
1282 |
|
1283 |
if (s1->mpeg_enc_ctx_allocated) {
|
1284 |
ParseContext pc= s->parse_context; |
1285 |
s->parse_context.buffer=0;
|
1286 |
MPV_common_end(s); |
1287 |
s->parse_context= pc; |
1288 |
} |
1289 |
|
1290 |
if( (s->width == 0 )||(s->height == 0)) |
1291 |
return -2; |
1292 |
|
1293 |
avcodec_set_dimensions(avctx, s->width, s->height); |
1294 |
avctx->bit_rate = s->bit_rate; |
1295 |
s1->save_aspect_info = s->aspect_ratio_info; |
1296 |
s1->save_width = s->width; |
1297 |
s1->save_height = s->height; |
1298 |
s1->save_progressive_seq = s->progressive_sequence; |
1299 |
|
1300 |
/* low_delay may be forced, in this case we will have B-frames
|
1301 |
* that behave like P-frames. */
|
1302 |
avctx->has_b_frames = !(s->low_delay); |
1303 |
|
1304 |
assert((avctx->sub_id==1) == (avctx->codec_id==CODEC_ID_MPEG1VIDEO));
|
1305 |
if(avctx->codec_id==CODEC_ID_MPEG1VIDEO){
|
1306 |
//MPEG-1 fps
|
1307 |
avctx->time_base.den= ff_frame_rate_tab[s->frame_rate_index].num; |
1308 |
avctx->time_base.num= ff_frame_rate_tab[s->frame_rate_index].den; |
1309 |
//MPEG-1 aspect
|
1310 |
avctx->sample_aspect_ratio= av_d2q( |
1311 |
1.0/ff_mpeg1_aspect[s->aspect_ratio_info], 255); |
1312 |
avctx->ticks_per_frame=1;
|
1313 |
}else{//MPEG-2 |
1314 |
//MPEG-2 fps
|
1315 |
av_reduce( |
1316 |
&s->avctx->time_base.den, |
1317 |
&s->avctx->time_base.num, |
1318 |
ff_frame_rate_tab[s->frame_rate_index].num * s1->frame_rate_ext.num*2,
|
1319 |
ff_frame_rate_tab[s->frame_rate_index].den * s1->frame_rate_ext.den, |
1320 |
1<<30); |
1321 |
avctx->ticks_per_frame=2;
|
1322 |
//MPEG-2 aspect
|
1323 |
if(s->aspect_ratio_info > 1){ |
1324 |
AVRational dar= |
1325 |
av_mul_q( |
1326 |
av_div_q( |
1327 |
ff_mpeg2_aspect[s->aspect_ratio_info], |
1328 |
(AVRational){s1->pan_scan.width, s1->pan_scan.height} |
1329 |
), |
1330 |
(AVRational){s->width, s->height}); |
1331 |
|
1332 |
//we ignore the spec here and guess a bit as reality does not match the spec, see for example
|
1333 |
// res_change_ffmpeg_aspect.ts and sequence-display-aspect.mpg
|
1334 |
//issue1613, 621, 562
|
1335 |
if( (s1->pan_scan.width == 0 )||(s1->pan_scan.height == 0) |
1336 |
|| (av_cmp_q(dar,(AVRational){4,3})&&av_cmp_q(dar,(AVRational){16,9}))){ |
1337 |
s->avctx->sample_aspect_ratio= |
1338 |
av_div_q( |
1339 |
ff_mpeg2_aspect[s->aspect_ratio_info], |
1340 |
(AVRational){s->width, s->height} |
1341 |
); |
1342 |
}else{
|
1343 |
s->avctx->sample_aspect_ratio= |
1344 |
av_div_q( |
1345 |
ff_mpeg2_aspect[s->aspect_ratio_info], |
1346 |
(AVRational){s1->pan_scan.width, s1->pan_scan.height} |
1347 |
); |
1348 |
//issue1613 4/3 16/9 -> 16/9
|
1349 |
//res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
|
1350 |
//widescreen-issue562.mpg 4/3 16/9 -> 16/9
|
1351 |
// s->avctx->sample_aspect_ratio= av_mul_q(s->avctx->sample_aspect_ratio, (AVRational){s->width, s->height});
|
1352 |
//av_log(NULL, AV_LOG_ERROR, "A %d/%d\n",ff_mpeg2_aspect[s->aspect_ratio_info].num, ff_mpeg2_aspect[s->aspect_ratio_info].den);
|
1353 |
//av_log(NULL, AV_LOG_ERROR, "B %d/%d\n",s->avctx->sample_aspect_ratio.num, s->avctx->sample_aspect_ratio.den);
|
1354 |
} |
1355 |
}else{
|
1356 |
s->avctx->sample_aspect_ratio= |
1357 |
ff_mpeg2_aspect[s->aspect_ratio_info]; |
1358 |
} |
1359 |
}//MPEG-2
|
1360 |
|
1361 |
avctx->pix_fmt = mpeg_get_pixelformat(avctx); |
1362 |
avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); |
1363 |
//until then pix_fmt may be changed right after codec init
|
1364 |
if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ||
|
1365 |
avctx->hwaccel ) |
1366 |
if( avctx->idct_algo == FF_IDCT_AUTO )
|
1367 |
avctx->idct_algo = FF_IDCT_SIMPLE; |
1368 |
|
1369 |
/* Quantization matrices may need reordering
|
1370 |
* if DCT permutation is changed. */
|
1371 |
memcpy(old_permutation,s->dsp.idct_permutation,64*sizeof(uint8_t)); |
1372 |
|
1373 |
if (MPV_common_init(s) < 0) |
1374 |
return -2; |
1375 |
|
1376 |
quant_matrix_rebuild(s->intra_matrix, old_permutation,s->dsp.idct_permutation); |
1377 |
quant_matrix_rebuild(s->inter_matrix, old_permutation,s->dsp.idct_permutation); |
1378 |
quant_matrix_rebuild(s->chroma_intra_matrix,old_permutation,s->dsp.idct_permutation); |
1379 |
quant_matrix_rebuild(s->chroma_inter_matrix,old_permutation,s->dsp.idct_permutation); |
1380 |
|
1381 |
s1->mpeg_enc_ctx_allocated = 1;
|
1382 |
} |
1383 |
return 0; |
1384 |
} |
1385 |
|
1386 |
static int mpeg1_decode_picture(AVCodecContext *avctx, |
1387 |
const uint8_t *buf, int buf_size) |
1388 |
{ |
1389 |
Mpeg1Context *s1 = avctx->priv_data; |
1390 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1391 |
int ref, f_code, vbv_delay;
|
1392 |
|
1393 |
init_get_bits(&s->gb, buf, buf_size*8);
|
1394 |
|
1395 |
ref = get_bits(&s->gb, 10); /* temporal ref */ |
1396 |
s->pict_type = get_bits(&s->gb, 3);
|
1397 |
if(s->pict_type == 0 || s->pict_type > 3) |
1398 |
return -1; |
1399 |
|
1400 |
vbv_delay= get_bits(&s->gb, 16);
|
1401 |
if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) {
|
1402 |
s->full_pel[0] = get_bits1(&s->gb);
|
1403 |
f_code = get_bits(&s->gb, 3);
|
1404 |
if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT) |
1405 |
return -1; |
1406 |
s->mpeg_f_code[0][0] = f_code; |
1407 |
s->mpeg_f_code[0][1] = f_code; |
1408 |
} |
1409 |
if (s->pict_type == FF_B_TYPE) {
|
1410 |
s->full_pel[1] = get_bits1(&s->gb);
|
1411 |
f_code = get_bits(&s->gb, 3);
|
1412 |
if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT) |
1413 |
return -1; |
1414 |
s->mpeg_f_code[1][0] = f_code; |
1415 |
s->mpeg_f_code[1][1] = f_code; |
1416 |
} |
1417 |
s->current_picture.pict_type= s->pict_type; |
1418 |
s->current_picture.key_frame= s->pict_type == FF_I_TYPE; |
1419 |
|
1420 |
if(avctx->debug & FF_DEBUG_PICT_INFO)
|
1421 |
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
|
1422 |
|
1423 |
s->y_dc_scale = 8;
|
1424 |
s->c_dc_scale = 8;
|
1425 |
return 0; |
1426 |
} |
1427 |
|
1428 |
static void mpeg_decode_sequence_extension(Mpeg1Context *s1) |
1429 |
{ |
1430 |
MpegEncContext *s= &s1->mpeg_enc_ctx; |
1431 |
int horiz_size_ext, vert_size_ext;
|
1432 |
int bit_rate_ext;
|
1433 |
|
1434 |
skip_bits(&s->gb, 1); /* profile and level esc*/ |
1435 |
s->avctx->profile= get_bits(&s->gb, 3);
|
1436 |
s->avctx->level= get_bits(&s->gb, 4);
|
1437 |
s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
|
1438 |
s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */ |
1439 |
horiz_size_ext = get_bits(&s->gb, 2);
|
1440 |
vert_size_ext = get_bits(&s->gb, 2);
|
1441 |
s->width |= (horiz_size_ext << 12);
|
1442 |
s->height |= (vert_size_ext << 12);
|
1443 |
bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */ |
1444 |
s->bit_rate += (bit_rate_ext << 18) * 400; |
1445 |
skip_bits1(&s->gb); /* marker */
|
1446 |
s->avctx->rc_buffer_size += get_bits(&s->gb, 8)*1024*16<<10; |
1447 |
|
1448 |
s->low_delay = get_bits1(&s->gb); |
1449 |
if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1; |
1450 |
|
1451 |
s1->frame_rate_ext.num = get_bits(&s->gb, 2)+1; |
1452 |
s1->frame_rate_ext.den = get_bits(&s->gb, 5)+1; |
1453 |
|
1454 |
av_dlog(s->avctx, "sequence extension\n");
|
1455 |
s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO; |
1456 |
s->avctx->sub_id = 2; /* indicates MPEG-2 found */ |
1457 |
|
1458 |
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
1459 |
av_log(s->avctx, AV_LOG_DEBUG, "profile: %d, level: %d vbv buffer: %d, bitrate:%d\n",
|
1460 |
s->avctx->profile, s->avctx->level, s->avctx->rc_buffer_size, s->bit_rate); |
1461 |
|
1462 |
} |
1463 |
|
1464 |
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1) |
1465 |
{ |
1466 |
MpegEncContext *s= &s1->mpeg_enc_ctx; |
1467 |
int color_description, w, h;
|
1468 |
|
1469 |
skip_bits(&s->gb, 3); /* video format */ |
1470 |
color_description= get_bits1(&s->gb); |
1471 |
if(color_description){
|
1472 |
s->avctx->color_primaries= get_bits(&s->gb, 8);
|
1473 |
s->avctx->color_trc = get_bits(&s->gb, 8);
|
1474 |
s->avctx->colorspace = get_bits(&s->gb, 8);
|
1475 |
} |
1476 |
w= get_bits(&s->gb, 14);
|
1477 |
skip_bits(&s->gb, 1); //marker |
1478 |
h= get_bits(&s->gb, 14);
|
1479 |
// remaining 3 bits are zero padding
|
1480 |
|
1481 |
s1->pan_scan.width= 16*w;
|
1482 |
s1->pan_scan.height=16*h;
|
1483 |
|
1484 |
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
1485 |
av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
|
1486 |
} |
1487 |
|
1488 |
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1) |
1489 |
{ |
1490 |
MpegEncContext *s= &s1->mpeg_enc_ctx; |
1491 |
int i,nofco;
|
1492 |
|
1493 |
nofco = 1;
|
1494 |
if(s->progressive_sequence){
|
1495 |
if(s->repeat_first_field){
|
1496 |
nofco++; |
1497 |
if(s->top_field_first)
|
1498 |
nofco++; |
1499 |
} |
1500 |
}else{
|
1501 |
if(s->picture_structure == PICT_FRAME){
|
1502 |
nofco++; |
1503 |
if(s->repeat_first_field)
|
1504 |
nofco++; |
1505 |
} |
1506 |
} |
1507 |
for(i=0; i<nofco; i++){ |
1508 |
s1->pan_scan.position[i][0]= get_sbits(&s->gb, 16); |
1509 |
skip_bits(&s->gb, 1); //marker |
1510 |
s1->pan_scan.position[i][1]= get_sbits(&s->gb, 16); |
1511 |
skip_bits(&s->gb, 1); //marker |
1512 |
} |
1513 |
|
1514 |
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
1515 |
av_log(s->avctx, AV_LOG_DEBUG, "pde (%d,%d) (%d,%d) (%d,%d)\n",
|
1516 |
s1->pan_scan.position[0][0], s1->pan_scan.position[0][1], |
1517 |
s1->pan_scan.position[1][0], s1->pan_scan.position[1][1], |
1518 |
s1->pan_scan.position[2][0], s1->pan_scan.position[2][1] |
1519 |
); |
1520 |
} |
1521 |
|
1522 |
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra){ |
1523 |
int i;
|
1524 |
|
1525 |
for(i=0; i<64; i++) { |
1526 |
int j = s->dsp.idct_permutation[ ff_zigzag_direct[i] ];
|
1527 |
int v = get_bits(&s->gb, 8); |
1528 |
if(v==0){ |
1529 |
av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
|
1530 |
return -1; |
1531 |
} |
1532 |
if(intra && i==0 && v!=8){ |
1533 |
av_log(s->avctx, AV_LOG_ERROR, "intra matrix invalid, ignoring\n");
|
1534 |
v= 8; // needed by pink.mpg / issue1046 |
1535 |
} |
1536 |
matrix0[j] = v; |
1537 |
if(matrix1)
|
1538 |
matrix1[j] = v; |
1539 |
} |
1540 |
return 0; |
1541 |
} |
1542 |
|
1543 |
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s) |
1544 |
{ |
1545 |
av_dlog(s->avctx, "matrix extension\n");
|
1546 |
|
1547 |
if(get_bits1(&s->gb)) load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1); |
1548 |
if(get_bits1(&s->gb)) load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0); |
1549 |
if(get_bits1(&s->gb)) load_matrix(s, s->chroma_intra_matrix, NULL , 1); |
1550 |
if(get_bits1(&s->gb)) load_matrix(s, s->chroma_inter_matrix, NULL , 0); |
1551 |
} |
1552 |
|
1553 |
static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1) |
1554 |
{ |
1555 |
MpegEncContext *s= &s1->mpeg_enc_ctx; |
1556 |
|
1557 |
s->full_pel[0] = s->full_pel[1] = 0; |
1558 |
s->mpeg_f_code[0][0] = get_bits(&s->gb, 4); |
1559 |
s->mpeg_f_code[0][1] = get_bits(&s->gb, 4); |
1560 |
s->mpeg_f_code[1][0] = get_bits(&s->gb, 4); |
1561 |
s->mpeg_f_code[1][1] = get_bits(&s->gb, 4); |
1562 |
if(!s->pict_type && s1->mpeg_enc_ctx_allocated){
|
1563 |
av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code, guessing missing values\n");
|
1564 |
if(s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1]==15){ |
1565 |
if(s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15) |
1566 |
s->pict_type= FF_I_TYPE; |
1567 |
else
|
1568 |
s->pict_type= FF_P_TYPE; |
1569 |
}else
|
1570 |
s->pict_type= FF_B_TYPE; |
1571 |
s->current_picture.pict_type= s->pict_type; |
1572 |
s->current_picture.key_frame= s->pict_type == FF_I_TYPE; |
1573 |
} |
1574 |
s->intra_dc_precision = get_bits(&s->gb, 2);
|
1575 |
s->picture_structure = get_bits(&s->gb, 2);
|
1576 |
s->top_field_first = get_bits1(&s->gb); |
1577 |
s->frame_pred_frame_dct = get_bits1(&s->gb); |
1578 |
s->concealment_motion_vectors = get_bits1(&s->gb); |
1579 |
s->q_scale_type = get_bits1(&s->gb); |
1580 |
s->intra_vlc_format = get_bits1(&s->gb); |
1581 |
s->alternate_scan = get_bits1(&s->gb); |
1582 |
s->repeat_first_field = get_bits1(&s->gb); |
1583 |
s->chroma_420_type = get_bits1(&s->gb); |
1584 |
s->progressive_frame = get_bits1(&s->gb); |
1585 |
|
1586 |
if(s->progressive_sequence && !s->progressive_frame){
|
1587 |
s->progressive_frame= 1;
|
1588 |
av_log(s->avctx, AV_LOG_ERROR, "interlaced frame in progressive sequence, ignoring\n");
|
1589 |
} |
1590 |
|
1591 |
if(s->picture_structure==0 || (s->progressive_frame && s->picture_structure!=PICT_FRAME)){ |
1592 |
av_log(s->avctx, AV_LOG_ERROR, "picture_structure %d invalid, ignoring\n", s->picture_structure);
|
1593 |
s->picture_structure= PICT_FRAME; |
1594 |
} |
1595 |
|
1596 |
if(s->progressive_sequence && !s->frame_pred_frame_dct){
|
1597 |
av_log(s->avctx, AV_LOG_ERROR, "invalid frame_pred_frame_dct\n");
|
1598 |
} |
1599 |
|
1600 |
if(s->picture_structure == PICT_FRAME){
|
1601 |
s->first_field=0;
|
1602 |
s->v_edge_pos= 16*s->mb_height;
|
1603 |
}else{
|
1604 |
s->first_field ^= 1;
|
1605 |
s->v_edge_pos= 8*s->mb_height;
|
1606 |
memset(s->mbskip_table, 0, s->mb_stride*s->mb_height);
|
1607 |
} |
1608 |
|
1609 |
if(s->alternate_scan){
|
1610 |
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); |
1611 |
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan); |
1612 |
}else{
|
1613 |
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); |
1614 |
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); |
1615 |
} |
1616 |
|
1617 |
/* composite display not parsed */
|
1618 |
av_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
|
1619 |
av_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
|
1620 |
av_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
|
1621 |
av_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
|
1622 |
av_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
|
1623 |
av_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
|
1624 |
av_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
|
1625 |
av_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
|
1626 |
av_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
|
1627 |
} |
1628 |
|
1629 |
static void exchange_uv(MpegEncContext *s){ |
1630 |
DCTELEM (*tmp)[64];
|
1631 |
|
1632 |
tmp = s->pblocks[4];
|
1633 |
s->pblocks[4] = s->pblocks[5]; |
1634 |
s->pblocks[5] = tmp;
|
1635 |
} |
1636 |
|
1637 |
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size){ |
1638 |
AVCodecContext *avctx= s->avctx; |
1639 |
Mpeg1Context *s1 = (Mpeg1Context*)s; |
1640 |
|
1641 |
/* start frame decoding */
|
1642 |
if(s->first_field || s->picture_structure==PICT_FRAME){
|
1643 |
if(MPV_frame_start(s, avctx) < 0) |
1644 |
return -1; |
1645 |
|
1646 |
ff_er_frame_start(s); |
1647 |
|
1648 |
/* first check if we must repeat the frame */
|
1649 |
s->current_picture_ptr->repeat_pict = 0;
|
1650 |
if (s->repeat_first_field) {
|
1651 |
if (s->progressive_sequence) {
|
1652 |
if (s->top_field_first)
|
1653 |
s->current_picture_ptr->repeat_pict = 4;
|
1654 |
else
|
1655 |
s->current_picture_ptr->repeat_pict = 2;
|
1656 |
} else if (s->progressive_frame) { |
1657 |
s->current_picture_ptr->repeat_pict = 1;
|
1658 |
} |
1659 |
} |
1660 |
|
1661 |
*s->current_picture_ptr->pan_scan= s1->pan_scan; |
1662 |
|
1663 |
if (HAVE_PTHREADS && avctx->active_thread_type&FF_THREAD_FRAME)
|
1664 |
ff_thread_finish_setup(avctx); |
1665 |
}else{ //second field |
1666 |
int i;
|
1667 |
|
1668 |
if(!s->current_picture_ptr){
|
1669 |
av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
|
1670 |
return -1; |
1671 |
} |
1672 |
|
1673 |
for(i=0; i<4; i++){ |
1674 |
s->current_picture.data[i] = s->current_picture_ptr->data[i]; |
1675 |
if(s->picture_structure == PICT_BOTTOM_FIELD){
|
1676 |
s->current_picture.data[i] += s->current_picture_ptr->linesize[i]; |
1677 |
} |
1678 |
} |
1679 |
} |
1680 |
|
1681 |
if (avctx->hwaccel) {
|
1682 |
if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0) |
1683 |
return -1; |
1684 |
} |
1685 |
|
1686 |
// MPV_frame_start will call this function too,
|
1687 |
// but we need to call it on every field
|
1688 |
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
|
1689 |
if(ff_xvmc_field_start(s,avctx) < 0) |
1690 |
return -1; |
1691 |
|
1692 |
return 0; |
1693 |
} |
1694 |
|
1695 |
#define DECODE_SLICE_ERROR -1 |
1696 |
#define DECODE_SLICE_OK 0 |
1697 |
|
1698 |
/**
|
1699 |
* decodes a slice. MpegEncContext.mb_y must be set to the MB row from the startcode
|
1700 |
* @return DECODE_SLICE_ERROR if the slice is damaged<br>
|
1701 |
* DECODE_SLICE_OK if this slice is ok<br>
|
1702 |
*/
|
1703 |
static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y, |
1704 |
const uint8_t **buf, int buf_size) |
1705 |
{ |
1706 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1707 |
AVCodecContext *avctx= s->avctx; |
1708 |
const int field_pic= s->picture_structure != PICT_FRAME; |
1709 |
const int lowres= s->avctx->lowres; |
1710 |
|
1711 |
s->resync_mb_x= |
1712 |
s->resync_mb_y= -1;
|
1713 |
|
1714 |
assert(mb_y < s->mb_height); |
1715 |
|
1716 |
init_get_bits(&s->gb, *buf, buf_size*8);
|
1717 |
|
1718 |
ff_mpeg1_clean_buffers(s); |
1719 |
s->interlaced_dct = 0;
|
1720 |
|
1721 |
s->qscale = get_qscale(s); |
1722 |
|
1723 |
if(s->qscale == 0){ |
1724 |
av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
|
1725 |
return -1; |
1726 |
} |
1727 |
|
1728 |
/* extra slice info */
|
1729 |
while (get_bits1(&s->gb) != 0) { |
1730 |
skip_bits(&s->gb, 8);
|
1731 |
} |
1732 |
|
1733 |
s->mb_x=0;
|
1734 |
|
1735 |
if(mb_y==0 && s->codec_tag == AV_RL32("SLIF")){ |
1736 |
skip_bits1(&s->gb); |
1737 |
}else{
|
1738 |
for(;;) {
|
1739 |
int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2); |
1740 |
if (code < 0){ |
1741 |
av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
|
1742 |
return -1; |
1743 |
} |
1744 |
if (code >= 33) { |
1745 |
if (code == 33) { |
1746 |
s->mb_x += 33;
|
1747 |
} |
1748 |
/* otherwise, stuffing, nothing to do */
|
1749 |
} else {
|
1750 |
s->mb_x += code; |
1751 |
break;
|
1752 |
} |
1753 |
} |
1754 |
} |
1755 |
|
1756 |
if(s->mb_x >= (unsigned)s->mb_width){ |
1757 |
av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
|
1758 |
return -1; |
1759 |
} |
1760 |
|
1761 |
if (avctx->hwaccel) {
|
1762 |
const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */ |
1763 |
int start_code = -1; |
1764 |
buf_end = ff_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
|
1765 |
if (buf_end < *buf + buf_size)
|
1766 |
buf_end -= 4;
|
1767 |
s->mb_y = mb_y; |
1768 |
if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_end - buf_start) < 0) |
1769 |
return DECODE_SLICE_ERROR;
|
1770 |
*buf = buf_end; |
1771 |
return DECODE_SLICE_OK;
|
1772 |
} |
1773 |
|
1774 |
s->resync_mb_x= s->mb_x; |
1775 |
s->resync_mb_y= s->mb_y= mb_y; |
1776 |
s->mb_skip_run= 0;
|
1777 |
ff_init_block_index(s); |
1778 |
|
1779 |
if (s->mb_y==0 && s->mb_x==0 && (s->first_field || s->picture_structure==PICT_FRAME)) { |
1780 |
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
|
1781 |
av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
|
1782 |
s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1], |
1783 |
s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")), |
1784 |
s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"", |
1785 |
s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors, |
1786 |
s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :""); |
1787 |
} |
1788 |
} |
1789 |
|
1790 |
for(;;) {
|
1791 |
//If 1, we memcpy blocks in xvmcvideo.
|
1792 |
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration > 1) |
1793 |
ff_xvmc_init_block(s);//set s->block
|
1794 |
|
1795 |
if(mpeg_decode_mb(s, s->block) < 0) |
1796 |
return -1; |
1797 |
|
1798 |
if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs |
1799 |
const int wrap = s->b8_stride; |
1800 |
int xy = s->mb_x*2 + s->mb_y*2*wrap; |
1801 |
int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride); |
1802 |
int motion_x, motion_y, dir, i;
|
1803 |
|
1804 |
for(i=0; i<2; i++){ |
1805 |
for(dir=0; dir<2; dir++){ |
1806 |
if (s->mb_intra || (dir==1 && s->pict_type != FF_B_TYPE)) { |
1807 |
motion_x = motion_y = 0;
|
1808 |
}else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){ |
1809 |
motion_x = s->mv[dir][0][0]; |
1810 |
motion_y = s->mv[dir][0][1]; |
1811 |
} else /*if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8))*/ { |
1812 |
motion_x = s->mv[dir][i][0];
|
1813 |
motion_y = s->mv[dir][i][1];
|
1814 |
} |
1815 |
|
1816 |
s->current_picture.motion_val[dir][xy ][0] = motion_x;
|
1817 |
s->current_picture.motion_val[dir][xy ][1] = motion_y;
|
1818 |
s->current_picture.motion_val[dir][xy + 1][0] = motion_x; |
1819 |
s->current_picture.motion_val[dir][xy + 1][1] = motion_y; |
1820 |
s->current_picture.ref_index [dir][b8_xy ]= |
1821 |
s->current_picture.ref_index [dir][b8_xy + 1]= s->field_select[dir][i];
|
1822 |
assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1); |
1823 |
} |
1824 |
xy += wrap; |
1825 |
b8_xy +=2;
|
1826 |
} |
1827 |
} |
1828 |
|
1829 |
s->dest[0] += 16 >> lowres; |
1830 |
s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift; |
1831 |
s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift; |
1832 |
|
1833 |
MPV_decode_mb(s, s->block); |
1834 |
|
1835 |
if (++s->mb_x >= s->mb_width) {
|
1836 |
const int mb_size= 16>>s->avctx->lowres; |
1837 |
|
1838 |
ff_draw_horiz_band(s, mb_size*(s->mb_y>>field_pic), mb_size); |
1839 |
MPV_report_decode_progress(s); |
1840 |
|
1841 |
s->mb_x = 0;
|
1842 |
s->mb_y += 1<<field_pic;
|
1843 |
|
1844 |
if(s->mb_y >= s->mb_height){
|
1845 |
int left= get_bits_left(&s->gb);
|
1846 |
int is_d10= s->chroma_format==2 && s->pict_type==FF_I_TYPE && avctx->profile==0 && avctx->level==5 |
1847 |
&& s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0 |
1848 |
&& s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/; |
1849 |
|
1850 |
if(left < 0 || (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) |
1851 |
|| (avctx->error_recognition >= FF_ER_AGGRESSIVE && left>8)){
|
1852 |
av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X\n", left, show_bits(&s->gb, FFMIN(left, 23))); |
1853 |
return -1; |
1854 |
}else
|
1855 |
goto eos;
|
1856 |
} |
1857 |
|
1858 |
ff_init_block_index(s); |
1859 |
} |
1860 |
|
1861 |
/* skip mb handling */
|
1862 |
if (s->mb_skip_run == -1) { |
1863 |
/* read increment again */
|
1864 |
s->mb_skip_run = 0;
|
1865 |
for(;;) {
|
1866 |
int code = get_vlc2(&s->gb, mbincr_vlc.table, MBINCR_VLC_BITS, 2); |
1867 |
if (code < 0){ |
1868 |
av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
|
1869 |
return -1; |
1870 |
} |
1871 |
if (code >= 33) { |
1872 |
if (code == 33) { |
1873 |
s->mb_skip_run += 33;
|
1874 |
}else if(code == 35){ |
1875 |
if(s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0){ |
1876 |
av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
|
1877 |
return -1; |
1878 |
} |
1879 |
goto eos; /* end of slice */ |
1880 |
} |
1881 |
/* otherwise, stuffing, nothing to do */
|
1882 |
} else {
|
1883 |
s->mb_skip_run += code; |
1884 |
break;
|
1885 |
} |
1886 |
} |
1887 |
if(s->mb_skip_run){
|
1888 |
int i;
|
1889 |
if(s->pict_type == FF_I_TYPE){
|
1890 |
av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y);
|
1891 |
return -1; |
1892 |
} |
1893 |
|
1894 |
/* skip mb */
|
1895 |
s->mb_intra = 0;
|
1896 |
for(i=0;i<12;i++) |
1897 |
s->block_last_index[i] = -1;
|
1898 |
if(s->picture_structure == PICT_FRAME)
|
1899 |
s->mv_type = MV_TYPE_16X16; |
1900 |
else
|
1901 |
s->mv_type = MV_TYPE_FIELD; |
1902 |
if (s->pict_type == FF_P_TYPE) {
|
1903 |
/* if P type, zero motion vector is implied */
|
1904 |
s->mv_dir = MV_DIR_FORWARD; |
1905 |
s->mv[0][0][0] = s->mv[0][0][1] = 0; |
1906 |
s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0; |
1907 |
s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0; |
1908 |
s->field_select[0][0]= (s->picture_structure - 1) & 1; |
1909 |
} else {
|
1910 |
/* if B type, reuse previous vectors and directions */
|
1911 |
s->mv[0][0][0] = s->last_mv[0][0][0]; |
1912 |
s->mv[0][0][1] = s->last_mv[0][0][1]; |
1913 |
s->mv[1][0][0] = s->last_mv[1][0][0]; |
1914 |
s->mv[1][0][1] = s->last_mv[1][0][1]; |
1915 |
} |
1916 |
} |
1917 |
} |
1918 |
} |
1919 |
eos: // end of slice |
1920 |
*buf += (get_bits_count(&s->gb)-1)/8; |
1921 |
//printf("y %d %d %d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
|
1922 |
return 0; |
1923 |
} |
1924 |
|
1925 |
static int slice_decode_thread(AVCodecContext *c, void *arg){ |
1926 |
MpegEncContext *s= *(void**)arg;
|
1927 |
const uint8_t *buf= s->gb.buffer;
|
1928 |
int mb_y= s->start_mb_y;
|
1929 |
const int field_pic= s->picture_structure != PICT_FRAME; |
1930 |
|
1931 |
s->error_count= (3*(s->end_mb_y - s->start_mb_y)*s->mb_width) >> field_pic;
|
1932 |
|
1933 |
for(;;){
|
1934 |
uint32_t start_code; |
1935 |
int ret;
|
1936 |
|
1937 |
ret= mpeg_decode_slice((Mpeg1Context*)s, mb_y, &buf, s->gb.buffer_end - buf); |
1938 |
emms_c(); |
1939 |
//av_log(c, AV_LOG_DEBUG, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
|
1940 |
//ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, s->start_mb_y, s->end_mb_y, s->error_count);
|
1941 |
if(ret < 0){ |
1942 |
if(s->resync_mb_x>=0 && s->resync_mb_y>=0) |
1943 |
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); |
1944 |
}else{
|
1945 |
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
|
1946 |
} |
1947 |
|
1948 |
if(s->mb_y == s->end_mb_y)
|
1949 |
return 0; |
1950 |
|
1951 |
start_code= -1;
|
1952 |
buf = ff_find_start_code(buf, s->gb.buffer_end, &start_code); |
1953 |
mb_y= (start_code - SLICE_MIN_START_CODE) << field_pic; |
1954 |
if (s->picture_structure == PICT_BOTTOM_FIELD)
|
1955 |
mb_y++; |
1956 |
if(mb_y < 0 || mb_y >= s->end_mb_y) |
1957 |
return -1; |
1958 |
} |
1959 |
|
1960 |
return 0; //not reached |
1961 |
} |
1962 |
|
1963 |
/**
|
1964 |
* Handle slice ends.
|
1965 |
* @return 1 if it seems to be the last slice
|
1966 |
*/
|
1967 |
static int slice_end(AVCodecContext *avctx, AVFrame *pict) |
1968 |
{ |
1969 |
Mpeg1Context *s1 = avctx->priv_data; |
1970 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
1971 |
|
1972 |
if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
|
1973 |
return 0; |
1974 |
|
1975 |
if (s->avctx->hwaccel) {
|
1976 |
if (s->avctx->hwaccel->end_frame(s->avctx) < 0) |
1977 |
av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode picture\n");
|
1978 |
} |
1979 |
|
1980 |
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
|
1981 |
ff_xvmc_field_end(s); |
1982 |
|
1983 |
/* end of slice reached */
|
1984 |
if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) { |
1985 |
/* end of image */
|
1986 |
|
1987 |
s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2; |
1988 |
|
1989 |
ff_er_frame_end(s); |
1990 |
|
1991 |
MPV_frame_end(s); |
1992 |
|
1993 |
if (s->pict_type == FF_B_TYPE || s->low_delay) {
|
1994 |
*pict= *(AVFrame*)s->current_picture_ptr; |
1995 |
ff_print_debug_info(s, pict); |
1996 |
} else {
|
1997 |
if (avctx->active_thread_type&FF_THREAD_FRAME)
|
1998 |
s->picture_number++; |
1999 |
/* latency of 1 frame for I- and P-frames */
|
2000 |
/* XXX: use another variable than picture_number */
|
2001 |
if (s->last_picture_ptr != NULL) { |
2002 |
*pict= *(AVFrame*)s->last_picture_ptr; |
2003 |
ff_print_debug_info(s, pict); |
2004 |
} |
2005 |
} |
2006 |
|
2007 |
return 1; |
2008 |
} else {
|
2009 |
return 0; |
2010 |
} |
2011 |
} |
2012 |
|
2013 |
static int mpeg1_decode_sequence(AVCodecContext *avctx, |
2014 |
const uint8_t *buf, int buf_size) |
2015 |
{ |
2016 |
Mpeg1Context *s1 = avctx->priv_data; |
2017 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
2018 |
int width,height;
|
2019 |
int i, v, j;
|
2020 |
|
2021 |
init_get_bits(&s->gb, buf, buf_size*8);
|
2022 |
|
2023 |
width = get_bits(&s->gb, 12);
|
2024 |
height = get_bits(&s->gb, 12);
|
2025 |
if (width <= 0 || height <= 0) |
2026 |
return -1; |
2027 |
s->aspect_ratio_info= get_bits(&s->gb, 4);
|
2028 |
if (s->aspect_ratio_info == 0) { |
2029 |
av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
|
2030 |
if (avctx->error_recognition >= FF_ER_COMPLIANT)
|
2031 |
return -1; |
2032 |
} |
2033 |
s->frame_rate_index = get_bits(&s->gb, 4);
|
2034 |
if (s->frame_rate_index == 0 || s->frame_rate_index > 13) |
2035 |
return -1; |
2036 |
s->bit_rate = get_bits(&s->gb, 18) * 400; |
2037 |
if (get_bits1(&s->gb) == 0) /* marker */ |
2038 |
return -1; |
2039 |
s->width = width; |
2040 |
s->height = height; |
2041 |
|
2042 |
s->avctx->rc_buffer_size= get_bits(&s->gb, 10) * 1024*16; |
2043 |
skip_bits(&s->gb, 1);
|
2044 |
|
2045 |
/* get matrix */
|
2046 |
if (get_bits1(&s->gb)) {
|
2047 |
load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
|
2048 |
} else {
|
2049 |
for(i=0;i<64;i++) { |
2050 |
j = s->dsp.idct_permutation[i]; |
2051 |
v = ff_mpeg1_default_intra_matrix[i]; |
2052 |
s->intra_matrix[j] = v; |
2053 |
s->chroma_intra_matrix[j] = v; |
2054 |
} |
2055 |
} |
2056 |
if (get_bits1(&s->gb)) {
|
2057 |
load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
|
2058 |
} else {
|
2059 |
for(i=0;i<64;i++) { |
2060 |
int j= s->dsp.idct_permutation[i];
|
2061 |
v = ff_mpeg1_default_non_intra_matrix[i]; |
2062 |
s->inter_matrix[j] = v; |
2063 |
s->chroma_inter_matrix[j] = v; |
2064 |
} |
2065 |
} |
2066 |
|
2067 |
if(show_bits(&s->gb, 23) != 0){ |
2068 |
av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
|
2069 |
return -1; |
2070 |
} |
2071 |
|
2072 |
/* we set MPEG-2 parameters so that it emulates MPEG-1 */
|
2073 |
s->progressive_sequence = 1;
|
2074 |
s->progressive_frame = 1;
|
2075 |
s->picture_structure = PICT_FRAME; |
2076 |
s->frame_pred_frame_dct = 1;
|
2077 |
s->chroma_format = 1;
|
2078 |
s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG1VIDEO; |
2079 |
avctx->sub_id = 1; /* indicates MPEG-1 */ |
2080 |
s->out_format = FMT_MPEG1; |
2081 |
s->swap_uv = 0;//AFAIK VCR2 does not have SEQ_HEADER |
2082 |
if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1; |
2083 |
|
2084 |
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
2085 |
av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d\n",
|
2086 |
s->avctx->rc_buffer_size, s->bit_rate); |
2087 |
|
2088 |
return 0; |
2089 |
} |
2090 |
|
2091 |
static int vcr2_init_sequence(AVCodecContext *avctx) |
2092 |
{ |
2093 |
Mpeg1Context *s1 = avctx->priv_data; |
2094 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
2095 |
int i, v;
|
2096 |
|
2097 |
/* start new MPEG-1 context decoding */
|
2098 |
s->out_format = FMT_MPEG1; |
2099 |
if (s1->mpeg_enc_ctx_allocated) {
|
2100 |
MPV_common_end(s); |
2101 |
} |
2102 |
s->width = avctx->coded_width; |
2103 |
s->height = avctx->coded_height; |
2104 |
avctx->has_b_frames= 0; //true? |
2105 |
s->low_delay= 1;
|
2106 |
|
2107 |
avctx->pix_fmt = mpeg_get_pixelformat(avctx); |
2108 |
avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); |
2109 |
|
2110 |
if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT || avctx->hwaccel )
|
2111 |
if( avctx->idct_algo == FF_IDCT_AUTO )
|
2112 |
avctx->idct_algo = FF_IDCT_SIMPLE; |
2113 |
|
2114 |
if (MPV_common_init(s) < 0) |
2115 |
return -1; |
2116 |
exchange_uv(s);//common init reset pblocks, so we swap them here
|
2117 |
s->swap_uv = 1;// in case of xvmc we need to swap uv for each MB |
2118 |
s1->mpeg_enc_ctx_allocated = 1;
|
2119 |
|
2120 |
for(i=0;i<64;i++) { |
2121 |
int j= s->dsp.idct_permutation[i];
|
2122 |
v = ff_mpeg1_default_intra_matrix[i]; |
2123 |
s->intra_matrix[j] = v; |
2124 |
s->chroma_intra_matrix[j] = v; |
2125 |
|
2126 |
v = ff_mpeg1_default_non_intra_matrix[i]; |
2127 |
s->inter_matrix[j] = v; |
2128 |
s->chroma_inter_matrix[j] = v; |
2129 |
} |
2130 |
|
2131 |
s->progressive_sequence = 1;
|
2132 |
s->progressive_frame = 1;
|
2133 |
s->picture_structure = PICT_FRAME; |
2134 |
s->frame_pred_frame_dct = 1;
|
2135 |
s->chroma_format = 1;
|
2136 |
s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO; |
2137 |
avctx->sub_id = 2; /* indicates MPEG-2 */ |
2138 |
s1->save_width = s->width; |
2139 |
s1->save_height = s->height; |
2140 |
s1->save_progressive_seq = s->progressive_sequence; |
2141 |
return 0; |
2142 |
} |
2143 |
|
2144 |
|
2145 |
static void mpeg_decode_user_data(AVCodecContext *avctx, |
2146 |
const uint8_t *p, int buf_size) |
2147 |
{ |
2148 |
const uint8_t *buf_end = p+buf_size;
|
2149 |
|
2150 |
/* we parse the DTG active format information */
|
2151 |
if (buf_end - p >= 5 && |
2152 |
p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') { |
2153 |
int flags = p[4]; |
2154 |
p += 5;
|
2155 |
if (flags & 0x80) { |
2156 |
/* skip event id */
|
2157 |
p += 2;
|
2158 |
} |
2159 |
if (flags & 0x40) { |
2160 |
if (buf_end - p < 1) |
2161 |
return;
|
2162 |
avctx->dtg_active_format = p[0] & 0x0f; |
2163 |
} |
2164 |
} |
2165 |
} |
2166 |
|
2167 |
static void mpeg_decode_gop(AVCodecContext *avctx, |
2168 |
const uint8_t *buf, int buf_size){ |
2169 |
Mpeg1Context *s1 = avctx->priv_data; |
2170 |
MpegEncContext *s = &s1->mpeg_enc_ctx; |
2171 |
|
2172 |
int drop_frame_flag;
|
2173 |
int time_code_hours, time_code_minutes;
|
2174 |
int time_code_seconds, time_code_pictures;
|
2175 |
int broken_link;
|
2176 |
|
2177 |
init_get_bits(&s->gb, buf, buf_size*8);
|
2178 |
|
2179 |
drop_frame_flag = get_bits1(&s->gb); |
2180 |
|
2181 |
time_code_hours=get_bits(&s->gb,5);
|
2182 |
time_code_minutes = get_bits(&s->gb,6);
|
2183 |
skip_bits1(&s->gb);//marker bit
|
2184 |
time_code_seconds = get_bits(&s->gb,6);
|
2185 |
time_code_pictures = get_bits(&s->gb,6);
|
2186 |
|
2187 |
s->closed_gop = get_bits1(&s->gb); |
2188 |
/*broken_link indicate that after editing the
|
2189 |
reference frames of the first B-Frames after GOP I-Frame
|
2190 |
are missing (open gop)*/
|
2191 |
broken_link = get_bits1(&s->gb); |
2192 |
|
2193 |
if(s->avctx->debug & FF_DEBUG_PICT_INFO)
|
2194 |
av_log(s->avctx, AV_LOG_DEBUG, "GOP (%2d:%02d:%02d.[%02d]) closed_gop=%d broken_link=%d\n",
|
2195 |
time_code_hours, time_code_minutes, time_code_seconds, |
2196 |
time_code_pictures, s->closed_gop, broken_link); |
2197 |
} |
2198 |
/**
|
2199 |
* Find the end of the current frame in the bitstream.
|
2200 |
* @return the position of the first byte of the next frame, or -1
|
2201 |
*/
|
2202 |
int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s) |
2203 |
{ |
2204 |
int i;
|
2205 |
uint32_t state= pc->state; |
2206 |
|
2207 |
/* EOF considered as end of frame */
|
2208 |
if (buf_size == 0) |
2209 |
return 0; |
2210 |
|
2211 |
/*
|
2212 |
0 frame start -> 1/4
|
2213 |
1 first_SEQEXT -> 0/2
|
2214 |
2 first field start -> 3/0
|
2215 |
3 second_SEQEXT -> 2/0
|
2216 |
4 searching end
|
2217 |
*/
|
2218 |
|
2219 |
for(i=0; i<buf_size; i++){ |
2220 |
assert(pc->frame_start_found>=0 && pc->frame_start_found<=4); |
2221 |
if(pc->frame_start_found&1){ |
2222 |
if(state == EXT_START_CODE && (buf[i]&0xF0) != 0x80) |
2223 |
pc->frame_start_found--; |
2224 |
else if(state == EXT_START_CODE+2){ |
2225 |
if((buf[i]&3) == 3) pc->frame_start_found= 0; |
2226 |
else pc->frame_start_found= (pc->frame_start_found+1)&3; |
2227 |
} |
2228 |
state++; |
2229 |
}else{
|
2230 |
i= ff_find_start_code(buf+i, buf+buf_size, &state) - buf - 1;
|
2231 |
if(pc->frame_start_found==0 && state >= SLICE_MIN_START_CODE && state <= SLICE_MAX_START_CODE){ |
2232 |
i++; |
2233 |
pc->frame_start_found=4;
|
2234 |
} |
2235 |
if(state == SEQ_END_CODE){
|
2236 |
pc->state=-1;
|
2237 |
return i+1; |
2238 |
} |
2239 |
if(pc->frame_start_found==2 && state == SEQ_START_CODE) |
2240 |
pc->frame_start_found= 0;
|
2241 |
if(pc->frame_start_found<4 && state == EXT_START_CODE) |
2242 |
pc->frame_start_found++; |
2243 |
if(pc->frame_start_found == 4 && (state&0xFFFFFF00) == 0x100){ |
2244 |
if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
|
2245 |
pc->frame_start_found=0;
|
2246 |
pc->state=-1;
|
2247 |
return i-3; |
2248 |
} |
2249 |
} |
2250 |
if(pc->frame_start_found == 0 && s && state == PICTURE_START_CODE){ |
2251 |
ff_fetch_timestamp(s, i-3, 1); |
2252 |
} |
2253 |
} |
2254 |
} |
2255 |
pc->state= state; |
2256 |
return END_NOT_FOUND;
|
2257 |
} |
2258 |
|
2259 |
static int decode_chunks(AVCodecContext *avctx, |
2260 |
AVFrame *picture, int *data_size,
|
2261 |
const uint8_t *buf, int buf_size); |
2262 |
|
2263 |
/* handle buffering and image synchronisation */
|
2264 |
static int mpeg_decode_frame(AVCodecContext *avctx, |
2265 |
void *data, int *data_size, |
2266 |
AVPacket *avpkt) |
2267 |
{ |
2268 |
const uint8_t *buf = avpkt->data;
|
2269 |
int buf_size = avpkt->size;
|
2270 |
Mpeg1Context *s = avctx->priv_data; |
2271 |
AVFrame *picture = data; |
2272 |
MpegEncContext *s2 = &s->mpeg_enc_ctx; |
2273 |
av_dlog(avctx, "fill_buffer\n");
|
2274 |
|
2275 |
if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) { |
2276 |
/* special case for last picture */
|
2277 |
if (s2->low_delay==0 && s2->next_picture_ptr) { |
2278 |
*picture= *(AVFrame*)s2->next_picture_ptr; |
2279 |
s2->next_picture_ptr= NULL;
|
2280 |
|
2281 |
*data_size = sizeof(AVFrame);
|
2282 |
} |
2283 |
return buf_size;
|
2284 |
} |
2285 |
|
2286 |
if(s2->flags&CODEC_FLAG_TRUNCATED){
|
2287 |
int next= ff_mpeg1_find_frame_end(&s2->parse_context, buf, buf_size, NULL); |
2288 |
|
2289 |
if( ff_combine_frame(&s2->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0 ) |
2290 |
return buf_size;
|
2291 |
} |
2292 |
|
2293 |
#if 0
|
2294 |
if (s->repeat_field % 2 == 1) {
|
2295 |
s->repeat_field++;
|
2296 |
//fprintf(stderr,"\nRepeating last frame: %d -> %d! pict: %d %d", avctx->frame_number-1, avctx->frame_number,
|
2297 |
// s2->picture_number, s->repeat_field);
|
2298 |
if (avctx->flags & CODEC_FLAG_REPEAT_FIELD) {
|
2299 |
*data_size = sizeof(AVPicture);
|
2300 |
goto the_end;
|
2301 |
}
|
2302 |
}
|
2303 |
#endif
|
2304 |
|
2305 |
if(s->mpeg_enc_ctx_allocated==0 && avctx->codec_tag == AV_RL32("VCR2")) |
2306 |
vcr2_init_sequence(avctx); |
2307 |
|
2308 |
s->slice_count= 0;
|
2309 |
|
2310 |
if(avctx->extradata && !avctx->frame_number)
|
2311 |
decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size); |
2312 |
|
2313 |
return decode_chunks(avctx, picture, data_size, buf, buf_size);
|
2314 |
} |
2315 |
|
2316 |
static int decode_chunks(AVCodecContext *avctx, |
2317 |
AVFrame *picture, int *data_size,
|
2318 |
const uint8_t *buf, int buf_size) |
2319 |
{ |
2320 |
Mpeg1Context *s = avctx->priv_data; |
2321 |
MpegEncContext *s2 = &s->mpeg_enc_ctx; |
2322 |
const uint8_t *buf_ptr = buf;
|
2323 |
const uint8_t *buf_end = buf + buf_size;
|
2324 |
int ret, input_size;
|
2325 |
int last_code= 0; |
2326 |
|
2327 |
for(;;) {
|
2328 |
/* find next start code */
|
2329 |
uint32_t start_code = -1;
|
2330 |
buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code); |
2331 |
if (start_code > 0x1ff){ |
2332 |
if(s2->pict_type != FF_B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){
|
2333 |
if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){
|
2334 |
int i;
|
2335 |
|
2336 |
avctx->execute(avctx, slice_decode_thread, &s2->thread_context[0], NULL, s->slice_count, sizeof(void*)); |
2337 |
for(i=0; i<s->slice_count; i++) |
2338 |
s2->error_count += s2->thread_context[i]->error_count; |
2339 |
} |
2340 |
|
2341 |
if (CONFIG_MPEG_VDPAU_DECODER && uses_vdpau(avctx))
|
2342 |
ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count); |
2343 |
|
2344 |
if (slice_end(avctx, picture)) {
|
2345 |
if(s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice |
2346 |
*data_size = sizeof(AVPicture);
|
2347 |
} |
2348 |
} |
2349 |
s2->pict_type= 0;
|
2350 |
return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index); |
2351 |
} |
2352 |
|
2353 |
input_size = buf_end - buf_ptr; |
2354 |
|
2355 |
if(avctx->debug & FF_DEBUG_STARTCODE){
|
2356 |
av_log(avctx, AV_LOG_DEBUG, "%3X at %td left %d\n", start_code, buf_ptr-buf, input_size);
|
2357 |
} |
2358 |
|
2359 |
/* prepare data for next start code */
|
2360 |
switch(start_code) {
|
2361 |
case SEQ_START_CODE:
|
2362 |
if(last_code == 0){ |
2363 |
mpeg1_decode_sequence(avctx, buf_ptr, |
2364 |
input_size); |
2365 |
s->sync=1;
|
2366 |
}else{
|
2367 |
av_log(avctx, AV_LOG_ERROR, "ignoring SEQ_START_CODE after %X\n", last_code);
|
2368 |
} |
2369 |
break;
|
2370 |
|
2371 |
case PICTURE_START_CODE:
|
2372 |
if (HAVE_THREADS && (avctx->active_thread_type&FF_THREAD_SLICE) && s->slice_count) {
|
2373 |
int i;
|
2374 |
|
2375 |
avctx->execute(avctx, slice_decode_thread, |
2376 |
s2->thread_context, NULL,
|
2377 |
s->slice_count, sizeof(void*)); |
2378 |
for (i = 0; i < s->slice_count; i++) |
2379 |
s2->error_count += s2->thread_context[i]->error_count; |
2380 |
s->slice_count = 0;
|
2381 |
} |
2382 |
if(last_code == 0 || last_code == SLICE_MIN_START_CODE){ |
2383 |
if(mpeg_decode_postinit(avctx) < 0){ |
2384 |
av_log(avctx, AV_LOG_ERROR, "mpeg_decode_postinit() failure\n");
|
2385 |
return -1; |
2386 |
} |
2387 |
|
2388 |
/* we have a complete image: we try to decompress it */
|
2389 |
if(mpeg1_decode_picture(avctx,
|
2390 |
buf_ptr, input_size) < 0)
|
2391 |
s2->pict_type=0;
|
2392 |
s2->first_slice = 1;
|
2393 |
last_code= PICTURE_START_CODE; |
2394 |
}else{
|
2395 |
av_log(avctx, AV_LOG_ERROR, "ignoring pic after %X\n", last_code);
|
2396 |
} |
2397 |
break;
|
2398 |
case EXT_START_CODE:
|
2399 |
init_get_bits(&s2->gb, buf_ptr, input_size*8);
|
2400 |
|
2401 |
switch(get_bits(&s2->gb, 4)) { |
2402 |
case 0x1: |
2403 |
if(last_code == 0){ |
2404 |
mpeg_decode_sequence_extension(s); |
2405 |
}else{
|
2406 |
av_log(avctx, AV_LOG_ERROR, "ignoring seq ext after %X\n", last_code);
|
2407 |
} |
2408 |
break;
|
2409 |
case 0x2: |
2410 |
mpeg_decode_sequence_display_extension(s); |
2411 |
break;
|
2412 |
case 0x3: |
2413 |
mpeg_decode_quant_matrix_extension(s2); |
2414 |
break;
|
2415 |
case 0x7: |
2416 |
mpeg_decode_picture_display_extension(s); |
2417 |
break;
|
2418 |
case 0x8: |
2419 |
if(last_code == PICTURE_START_CODE){
|
2420 |
mpeg_decode_picture_coding_extension(s); |
2421 |
}else{
|
2422 |
av_log(avctx, AV_LOG_ERROR, "ignoring pic cod ext after %X\n", last_code);
|
2423 |
} |
2424 |
break;
|
2425 |
} |
2426 |
break;
|
2427 |
case USER_START_CODE:
|
2428 |
mpeg_decode_user_data(avctx, |
2429 |
buf_ptr, input_size); |
2430 |
break;
|
2431 |
case GOP_START_CODE:
|
2432 |
if(last_code == 0){ |
2433 |
s2->first_field=0;
|
2434 |
mpeg_decode_gop(avctx, |
2435 |
buf_ptr, input_size); |
2436 |
s->sync=1;
|
2437 |
}else{
|
2438 |
av_log(avctx, AV_LOG_ERROR, "ignoring GOP_START_CODE after %X\n", last_code);
|
2439 |
} |
2440 |
break;
|
2441 |
default:
|
2442 |
if (start_code >= SLICE_MIN_START_CODE &&
|
2443 |
start_code <= SLICE_MAX_START_CODE && last_code!=0) {
|
2444 |
const int field_pic= s2->picture_structure != PICT_FRAME; |
2445 |
int mb_y= (start_code - SLICE_MIN_START_CODE) << field_pic;
|
2446 |
last_code= SLICE_MIN_START_CODE; |
2447 |
|
2448 |
if(s2->picture_structure == PICT_BOTTOM_FIELD)
|
2449 |
mb_y++; |
2450 |
|
2451 |
if (mb_y >= s2->mb_height){
|
2452 |
av_log(s2->avctx, AV_LOG_ERROR, "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
|
2453 |
return -1; |
2454 |
} |
2455 |
|
2456 |
if(s2->last_picture_ptr==NULL){ |
2457 |
/* Skip B-frames if we do not have reference frames and gop is not closed */
|
2458 |
if(s2->pict_type==FF_B_TYPE){
|
2459 |
if(!s2->closed_gop)
|
2460 |
break;
|
2461 |
} |
2462 |
} |
2463 |
if(s2->pict_type==FF_I_TYPE)
|
2464 |
s->sync=1;
|
2465 |
if(s2->next_picture_ptr==NULL){ |
2466 |
/* Skip P-frames if we do not have a reference frame or we have an invalid header. */
|
2467 |
if(s2->pict_type==FF_P_TYPE && !s->sync) break; |
2468 |
} |
2469 |
#if FF_API_HURRY_UP
|
2470 |
/* Skip B-frames if we are in a hurry. */
|
2471 |
if(avctx->hurry_up && s2->pict_type==FF_B_TYPE) break; |
2472 |
#endif
|
2473 |
if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==FF_B_TYPE)
|
2474 |
||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=FF_I_TYPE) |
2475 |
|| avctx->skip_frame >= AVDISCARD_ALL) |
2476 |
break;
|
2477 |
#if FF_API_HURRY_UP
|
2478 |
/* Skip everything if we are in a hurry>=5. */
|
2479 |
if(avctx->hurry_up>=5) break; |
2480 |
#endif
|
2481 |
|
2482 |
if (!s->mpeg_enc_ctx_allocated) break; |
2483 |
|
2484 |
if(s2->codec_id == CODEC_ID_MPEG2VIDEO){
|
2485 |
if(mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom)
|
2486 |
break;
|
2487 |
} |
2488 |
|
2489 |
if(!s2->pict_type){
|
2490 |
av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
|
2491 |
break;
|
2492 |
} |
2493 |
|
2494 |
if(s2->first_slice){
|
2495 |
s2->first_slice=0;
|
2496 |
if(mpeg_field_start(s2, buf, buf_size) < 0) |
2497 |
return -1; |
2498 |
} |
2499 |
if(!s2->current_picture_ptr){
|
2500 |
av_log(avctx, AV_LOG_ERROR, "current_picture not initialized\n");
|
2501 |
return -1; |
2502 |
} |
2503 |
|
2504 |
if (uses_vdpau(avctx)) {
|
2505 |
s->slice_count++; |
2506 |
break;
|
2507 |
} |
2508 |
|
2509 |
if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){
|
2510 |
int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count; |
2511 |
if(threshold <= mb_y){
|
2512 |
MpegEncContext *thread_context= s2->thread_context[s->slice_count]; |
2513 |
|
2514 |
thread_context->start_mb_y= mb_y; |
2515 |
thread_context->end_mb_y = s2->mb_height; |
2516 |
if(s->slice_count){
|
2517 |
s2->thread_context[s->slice_count-1]->end_mb_y= mb_y;
|
2518 |
ff_update_duplicate_context(thread_context, s2); |
2519 |
} |
2520 |
init_get_bits(&thread_context->gb, buf_ptr, input_size*8);
|
2521 |
s->slice_count++; |
2522 |
} |
2523 |
buf_ptr += 2; //FIXME add minimum number of bytes per slice |
2524 |
}else{
|
2525 |
ret = mpeg_decode_slice(s, mb_y, &buf_ptr, input_size); |
2526 |
emms_c(); |
2527 |
|
2528 |
if(ret < 0){ |
2529 |
if(s2->resync_mb_x>=0 && s2->resync_mb_y>=0) |
2530 |
ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); |
2531 |
}else{
|
2532 |
ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, AC_END|DC_END|MV_END);
|
2533 |
} |
2534 |
} |
2535 |
} |
2536 |
break;
|
2537 |
} |
2538 |
} |
2539 |
} |
2540 |
|
2541 |
static void flush(AVCodecContext *avctx){ |
2542 |
Mpeg1Context *s = avctx->priv_data; |
2543 |
|
2544 |
s->sync=0;
|
2545 |
|
2546 |
ff_mpeg_flush(avctx); |
2547 |
} |
2548 |
|
2549 |
static int mpeg_decode_end(AVCodecContext *avctx) |
2550 |
{ |
2551 |
Mpeg1Context *s = avctx->priv_data; |
2552 |
|
2553 |
if (s->mpeg_enc_ctx_allocated)
|
2554 |
MPV_common_end(&s->mpeg_enc_ctx); |
2555 |
return 0; |
2556 |
} |
2557 |
|
2558 |
static const AVProfile mpeg2_video_profiles[] = { |
2559 |
{ FF_PROFILE_MPEG2_422, "4:2:2" },
|
2560 |
{ FF_PROFILE_MPEG2_HIGH, "High" },
|
2561 |
{ FF_PROFILE_MPEG2_SS, "Spatially Scalable" },
|
2562 |
{ FF_PROFILE_MPEG2_SNR_SCALABLE, "SNR Scalable" },
|
2563 |
{ FF_PROFILE_MPEG2_MAIN, "Main" },
|
2564 |
{ FF_PROFILE_MPEG2_SIMPLE, "Simple" },
|
2565 |
{ FF_PROFILE_RESERVED, "Reserved" },
|
2566 |
{ FF_PROFILE_RESERVED, "Reserved" },
|
2567 |
}; |
2568 |
|
2569 |
|
2570 |
AVCodec ff_mpeg1video_decoder = { |
2571 |
"mpeg1video",
|
2572 |
AVMEDIA_TYPE_VIDEO, |
2573 |
CODEC_ID_MPEG1VIDEO, |
2574 |
sizeof(Mpeg1Context),
|
2575 |
mpeg_decode_init, |
2576 |
NULL,
|
2577 |
mpeg_decode_end, |
2578 |
mpeg_decode_frame, |
2579 |
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, |
2580 |
.flush= flush, |
2581 |
.max_lowres= 3,
|
2582 |
.long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),
|
2583 |
.update_thread_context= ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context) |
2584 |
}; |
2585 |
|
2586 |
AVCodec ff_mpeg2video_decoder = { |
2587 |
"mpeg2video",
|
2588 |
AVMEDIA_TYPE_VIDEO, |
2589 |
CODEC_ID_MPEG2VIDEO, |
2590 |
sizeof(Mpeg1Context),
|
2591 |
mpeg_decode_init, |
2592 |
NULL,
|
2593 |
mpeg_decode_end, |
2594 |
mpeg_decode_frame, |
2595 |
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, |
2596 |
.flush= flush, |
2597 |
.max_lowres= 3,
|
2598 |
.long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"),
|
2599 |
.profiles = NULL_IF_CONFIG_SMALL(mpeg2_video_profiles), |
2600 |
}; |
2601 |
|
2602 |
//legacy decoder
|
2603 |
AVCodec ff_mpegvideo_decoder = { |
2604 |
"mpegvideo",
|
2605 |
AVMEDIA_TYPE_VIDEO, |
2606 |
CODEC_ID_MPEG2VIDEO, |
2607 |
sizeof(Mpeg1Context),
|
2608 |
mpeg_decode_init, |
2609 |
NULL,
|
2610 |
mpeg_decode_end, |
2611 |
mpeg_decode_frame, |
2612 |
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, |
2613 |
.flush= flush, |
2614 |
.max_lowres= 3,
|
2615 |
.long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),
|
2616 |
}; |
2617 |
|
2618 |
#if CONFIG_MPEG_XVMC_DECODER
|
2619 |
static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx){ |
2620 |
if( avctx->active_thread_type & FF_THREAD_SLICE )
|
2621 |
return -1; |
2622 |
if( !(avctx->slice_flags & SLICE_FLAG_CODED_ORDER) )
|
2623 |
return -1; |
2624 |
if( !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD) ){
|
2625 |
av_dlog(avctx, "mpeg12.c: XvMC decoder will work better if SLICE_FLAG_ALLOW_FIELD is set\n");
|
2626 |
} |
2627 |
mpeg_decode_init(avctx); |
2628 |
|
2629 |
avctx->pix_fmt = PIX_FMT_XVMC_MPEG2_IDCT; |
2630 |
avctx->xvmc_acceleration = 2;//2 - the blocks are packed! |
2631 |
|
2632 |
return 0; |
2633 |
} |
2634 |
|
2635 |
AVCodec ff_mpeg_xvmc_decoder = { |
2636 |
"mpegvideo_xvmc",
|
2637 |
AVMEDIA_TYPE_VIDEO, |
2638 |
CODEC_ID_MPEG2VIDEO_XVMC, |
2639 |
sizeof(Mpeg1Context),
|
2640 |
mpeg_mc_decode_init, |
2641 |
NULL,
|
2642 |
mpeg_decode_end, |
2643 |
mpeg_decode_frame, |
2644 |
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL | CODEC_CAP_DELAY, |
2645 |
.flush= flush, |
2646 |
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video XvMC (X-Video Motion Compensation)"),
|
2647 |
}; |
2648 |
|
2649 |
#endif
|
2650 |
|
2651 |
#if CONFIG_MPEG_VDPAU_DECODER
|
2652 |
AVCodec ff_mpeg_vdpau_decoder = { |
2653 |
"mpegvideo_vdpau",
|
2654 |
AVMEDIA_TYPE_VIDEO, |
2655 |
CODEC_ID_MPEG2VIDEO, |
2656 |
sizeof(Mpeg1Context),
|
2657 |
mpeg_decode_init, |
2658 |
NULL,
|
2659 |
mpeg_decode_end, |
2660 |
mpeg_decode_frame, |
2661 |
CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, |
2662 |
.flush= flush, |
2663 |
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"),
|
2664 |
}; |
2665 |
#endif
|
2666 |
|
2667 |
#if CONFIG_MPEG1_VDPAU_DECODER
|
2668 |
AVCodec ff_mpeg1_vdpau_decoder = { |
2669 |
"mpeg1video_vdpau",
|
2670 |
AVMEDIA_TYPE_VIDEO, |
2671 |
CODEC_ID_MPEG1VIDEO, |
2672 |
sizeof(Mpeg1Context),
|
2673 |
mpeg_decode_init, |
2674 |
NULL,
|
2675 |
mpeg_decode_end, |
2676 |
mpeg_decode_frame, |
2677 |
CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, |
2678 |
.flush= flush, |
2679 |
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"),
|
2680 |
}; |
2681 |
#endif
|
2682 |
|