ffmpeg / libavcodec / mpegvideo.c @ 2f349de2
History | View | Annotate | Download (43.5 KB)
1 |
/*
|
---|---|
2 |
* The simplest mpeg encoder (well, it was the simplest!)
|
3 |
* Copyright (c) 2000,2001 Gerard Lantau.
|
4 |
*
|
5 |
* This program is free software; you can redistribute it and/or modify
|
6 |
* it under the terms of the GNU General Public License as published by
|
7 |
* the Free Software Foundation; either version 2 of the License, or
|
8 |
* (at your option) any later version.
|
9 |
*
|
10 |
* This program is distributed in the hope that it will be useful,
|
11 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
13 |
* GNU General Public License for more details.
|
14 |
*
|
15 |
* You should have received a copy of the GNU General Public License
|
16 |
* along with this program; if not, write to the Free Software
|
17 |
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
18 |
*/
|
19 |
#include <stdlib.h> |
20 |
#include <stdio.h> |
21 |
#include <math.h> |
22 |
#include <string.h> |
23 |
#include "avcodec.h" |
24 |
#include "dsputil.h" |
25 |
#include "mpegvideo.h" |
26 |
|
27 |
#ifdef USE_FASTMEMCPY
|
28 |
#include "fastmemcpy.h" |
29 |
#endif
|
30 |
|
31 |
static void encode_picture(MpegEncContext *s, int picture_number); |
32 |
static void rate_control_init(MpegEncContext *s); |
33 |
static int rate_estimate_qscale(MpegEncContext *s); |
34 |
static void dct_unquantize_mpeg1_c(MpegEncContext *s, |
35 |
DCTELEM *block, int n, int qscale); |
36 |
static void dct_unquantize_h263_c(MpegEncContext *s, |
37 |
DCTELEM *block, int n, int qscale); |
38 |
static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w); |
39 |
static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); |
40 |
|
41 |
int (*dct_quantize)(MpegEncContext *s, DCTELEM *block, int n, int qscale)= dct_quantize_c; |
42 |
void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w)= draw_edges_c; |
43 |
|
44 |
#define EDGE_WIDTH 16 |
45 |
|
46 |
/* enable all paranoid tests for rounding, overflows, etc... */
|
47 |
//#define PARANOID
|
48 |
|
49 |
//#define DEBUG
|
50 |
|
51 |
/* for jpeg fast DCT */
|
52 |
#define CONST_BITS 14 |
53 |
|
54 |
static const unsigned short aanscales[64] = { |
55 |
/* precomputed values scaled up by 14 bits */
|
56 |
16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, |
57 |
22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270, |
58 |
21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906, |
59 |
19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315, |
60 |
16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, |
61 |
12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552, |
62 |
8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446, |
63 |
4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247 |
64 |
}; |
65 |
|
66 |
static UINT8 h263_chroma_roundtab[16] = { |
67 |
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, |
68 |
}; |
69 |
|
70 |
/* default motion estimation */
|
71 |
int motion_estimation_method = ME_LOG;
|
72 |
|
73 |
extern UINT8 zigzag_end[64]; |
74 |
|
75 |
static void convert_matrix(int *qmat, UINT16 *qmat16, const UINT16 *quant_matrix, int qscale) |
76 |
{ |
77 |
int i;
|
78 |
|
79 |
if (av_fdct == jpeg_fdct_ifast) {
|
80 |
for(i=0;i<64;i++) { |
81 |
/* 16 <= qscale * quant_matrix[i] <= 7905 */
|
82 |
/* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
|
83 |
/* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
|
84 |
/* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
|
85 |
|
86 |
qmat[block_permute_op(i)] = (int)((UINT64_C(1) << (QMAT_SHIFT + 11)) / |
87 |
(aanscales[i] * qscale * quant_matrix[block_permute_op(i)])); |
88 |
} |
89 |
} else {
|
90 |
for(i=0;i<64;i++) { |
91 |
/* We can safely suppose that 16 <= quant_matrix[i] <= 255
|
92 |
So 16 <= qscale * quant_matrix[i] <= 7905
|
93 |
so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
|
94 |
so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
|
95 |
*/
|
96 |
qmat[i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
|
97 |
qmat16[i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[block_permute_op(i)]);
|
98 |
} |
99 |
} |
100 |
} |
101 |
|
102 |
/* init common structure for both encoder and decoder */
|
103 |
int MPV_common_init(MpegEncContext *s)
|
104 |
{ |
105 |
int c_size, i;
|
106 |
UINT8 *pict; |
107 |
|
108 |
if (s->out_format == FMT_H263)
|
109 |
s->dct_unquantize = dct_unquantize_h263_c; |
110 |
else
|
111 |
s->dct_unquantize = dct_unquantize_mpeg1_c; |
112 |
|
113 |
#ifdef HAVE_MMX
|
114 |
MPV_common_init_mmx(s); |
115 |
#endif
|
116 |
s->mb_width = (s->width + 15) / 16; |
117 |
s->mb_height = (s->height + 15) / 16; |
118 |
s->linesize = s->mb_width * 16 + 2 * EDGE_WIDTH; |
119 |
|
120 |
for(i=0;i<3;i++) { |
121 |
int w, h, shift, pict_start;
|
122 |
|
123 |
w = s->linesize; |
124 |
h = s->mb_height * 16 + 2 * EDGE_WIDTH; |
125 |
shift = (i == 0) ? 0 : 1; |
126 |
c_size = (w >> shift) * (h >> shift); |
127 |
pict_start = (w >> shift) * (EDGE_WIDTH >> shift) + (EDGE_WIDTH >> shift); |
128 |
|
129 |
pict = av_mallocz(c_size); |
130 |
if (pict == NULL) |
131 |
goto fail;
|
132 |
s->last_picture_base[i] = pict; |
133 |
s->last_picture[i] = pict + pict_start; |
134 |
|
135 |
pict = av_mallocz(c_size); |
136 |
if (pict == NULL) |
137 |
goto fail;
|
138 |
s->next_picture_base[i] = pict; |
139 |
s->next_picture[i] = pict + pict_start; |
140 |
|
141 |
if (s->has_b_frames) {
|
142 |
pict = av_mallocz(c_size); |
143 |
if (pict == NULL) |
144 |
goto fail;
|
145 |
s->aux_picture_base[i] = pict; |
146 |
s->aux_picture[i] = pict + pict_start; |
147 |
} |
148 |
} |
149 |
|
150 |
if (s->out_format == FMT_H263) {
|
151 |
int size;
|
152 |
/* MV prediction */
|
153 |
size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); |
154 |
s->motion_val = malloc(size * 2 * sizeof(INT16)); |
155 |
if (s->motion_val == NULL) |
156 |
goto fail;
|
157 |
memset(s->motion_val, 0, size * 2 * sizeof(INT16)); |
158 |
} |
159 |
|
160 |
if (s->h263_pred) {
|
161 |
int y_size, c_size, i, size;
|
162 |
|
163 |
/* dc values */
|
164 |
|
165 |
y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); |
166 |
c_size = (s->mb_width + 2) * (s->mb_height + 2); |
167 |
size = y_size + 2 * c_size;
|
168 |
s->dc_val[0] = malloc(size * sizeof(INT16)); |
169 |
if (s->dc_val[0] == NULL) |
170 |
goto fail;
|
171 |
s->dc_val[1] = s->dc_val[0] + y_size; |
172 |
s->dc_val[2] = s->dc_val[1] + c_size; |
173 |
for(i=0;i<size;i++) |
174 |
s->dc_val[0][i] = 1024; |
175 |
|
176 |
/* ac values */
|
177 |
s->ac_val[0] = av_mallocz(size * sizeof(INT16) * 16); |
178 |
if (s->ac_val[0] == NULL) |
179 |
goto fail;
|
180 |
s->ac_val[1] = s->ac_val[0] + y_size; |
181 |
s->ac_val[2] = s->ac_val[1] + c_size; |
182 |
|
183 |
/* cbp values */
|
184 |
s->coded_block = av_mallocz(y_size); |
185 |
if (!s->coded_block)
|
186 |
goto fail;
|
187 |
|
188 |
/* which mb is a intra block */
|
189 |
s->mbintra_table = av_mallocz(s->mb_width * s->mb_height); |
190 |
if (!s->mbintra_table)
|
191 |
goto fail;
|
192 |
memset(s->mbintra_table, 1, s->mb_width * s->mb_height);
|
193 |
} |
194 |
/* default structure is frame */
|
195 |
s->picture_structure = PICT_FRAME; |
196 |
|
197 |
/* init macroblock skip table */
|
198 |
if (!s->encoding) {
|
199 |
s->mbskip_table = av_mallocz(s->mb_width * s->mb_height); |
200 |
if (!s->mbskip_table)
|
201 |
goto fail;
|
202 |
} |
203 |
|
204 |
s->context_initialized = 1;
|
205 |
return 0; |
206 |
fail:
|
207 |
if (s->motion_val)
|
208 |
free(s->motion_val); |
209 |
if (s->dc_val[0]) |
210 |
free(s->dc_val[0]);
|
211 |
if (s->ac_val[0]) |
212 |
free(s->ac_val[0]);
|
213 |
if (s->coded_block)
|
214 |
free(s->coded_block); |
215 |
if (s->mbintra_table)
|
216 |
free(s->mbintra_table); |
217 |
if (s->mbskip_table)
|
218 |
free(s->mbskip_table); |
219 |
for(i=0;i<3;i++) { |
220 |
if (s->last_picture_base[i])
|
221 |
free(s->last_picture_base[i]); |
222 |
if (s->next_picture_base[i])
|
223 |
free(s->next_picture_base[i]); |
224 |
if (s->aux_picture_base[i])
|
225 |
free(s->aux_picture_base[i]); |
226 |
} |
227 |
return -1; |
228 |
} |
229 |
|
230 |
/* init common structure for both encoder and decoder */
|
231 |
void MPV_common_end(MpegEncContext *s)
|
232 |
{ |
233 |
int i;
|
234 |
|
235 |
if (s->motion_val)
|
236 |
free(s->motion_val); |
237 |
if (s->h263_pred) {
|
238 |
free(s->dc_val[0]);
|
239 |
free(s->ac_val[0]);
|
240 |
free(s->coded_block); |
241 |
free(s->mbintra_table); |
242 |
} |
243 |
if (s->mbskip_table)
|
244 |
free(s->mbskip_table); |
245 |
for(i=0;i<3;i++) { |
246 |
free(s->last_picture_base[i]); |
247 |
free(s->next_picture_base[i]); |
248 |
if (s->has_b_frames)
|
249 |
free(s->aux_picture_base[i]); |
250 |
} |
251 |
s->context_initialized = 0;
|
252 |
} |
253 |
|
254 |
/* init video encoder */
|
255 |
int MPV_encode_init(AVCodecContext *avctx)
|
256 |
{ |
257 |
MpegEncContext *s = avctx->priv_data; |
258 |
int i;
|
259 |
|
260 |
s->bit_rate = avctx->bit_rate; |
261 |
s->frame_rate = avctx->frame_rate; |
262 |
s->width = avctx->width; |
263 |
s->height = avctx->height; |
264 |
s->gop_size = avctx->gop_size; |
265 |
s->rtp_mode = avctx->rtp_mode; |
266 |
s->rtp_payload_size = avctx->rtp_payload_size; |
267 |
s->avctx = avctx; |
268 |
|
269 |
if (s->gop_size <= 1) { |
270 |
s->intra_only = 1;
|
271 |
s->gop_size = 12;
|
272 |
} else {
|
273 |
s->intra_only = 0;
|
274 |
} |
275 |
s->full_search = motion_estimation_method; |
276 |
|
277 |
s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE); |
278 |
|
279 |
switch(avctx->codec->id) {
|
280 |
case CODEC_ID_MPEG1VIDEO:
|
281 |
s->out_format = FMT_MPEG1; |
282 |
break;
|
283 |
case CODEC_ID_MJPEG:
|
284 |
s->out_format = FMT_MJPEG; |
285 |
s->intra_only = 1; /* force intra only for jpeg */ |
286 |
if (mjpeg_init(s) < 0) |
287 |
return -1; |
288 |
break;
|
289 |
case CODEC_ID_H263:
|
290 |
if (h263_get_picture_format(s->width, s->height) == 7){ |
291 |
printf("Input picture size isn't suitable for h263 codec! try h263+\n");
|
292 |
return -1; |
293 |
} |
294 |
s->out_format = FMT_H263; |
295 |
break;
|
296 |
case CODEC_ID_H263P:
|
297 |
s->out_format = FMT_H263; |
298 |
s->rtp_mode = 1;
|
299 |
s->rtp_payload_size = 1200;
|
300 |
s->h263_plus = 1;
|
301 |
s->unrestricted_mv = 1;
|
302 |
|
303 |
/* These are just to be sure */
|
304 |
s->umvplus = 0;
|
305 |
s->umvplus_dec = 0;
|
306 |
break;
|
307 |
case CODEC_ID_RV10:
|
308 |
s->out_format = FMT_H263; |
309 |
s->h263_rv10 = 1;
|
310 |
break;
|
311 |
case CODEC_ID_MPEG4:
|
312 |
s->out_format = FMT_H263; |
313 |
s->h263_pred = 1;
|
314 |
s->unrestricted_mv = 1;
|
315 |
break;
|
316 |
case CODEC_ID_MSMPEG4:
|
317 |
s->out_format = FMT_H263; |
318 |
s->h263_msmpeg4 = 1;
|
319 |
s->h263_pred = 1;
|
320 |
s->unrestricted_mv = 1;
|
321 |
break;
|
322 |
default:
|
323 |
return -1; |
324 |
} |
325 |
|
326 |
if (s->out_format == FMT_H263)
|
327 |
h263_encode_init_vlc(s); |
328 |
|
329 |
s->encoding = 1;
|
330 |
|
331 |
/* init */
|
332 |
if (MPV_common_init(s) < 0) |
333 |
return -1; |
334 |
|
335 |
/* init default q matrix */
|
336 |
for(i=0;i<64;i++) { |
337 |
s->intra_matrix[i] = default_intra_matrix[i]; |
338 |
s->non_intra_matrix[i] = default_non_intra_matrix[i]; |
339 |
} |
340 |
|
341 |
/* rate control init */
|
342 |
rate_control_init(s); |
343 |
|
344 |
s->picture_number = 0;
|
345 |
s->fake_picture_number = 0;
|
346 |
/* motion detector init */
|
347 |
s->f_code = 1;
|
348 |
|
349 |
return 0; |
350 |
} |
351 |
|
352 |
int MPV_encode_end(AVCodecContext *avctx)
|
353 |
{ |
354 |
MpegEncContext *s = avctx->priv_data; |
355 |
|
356 |
#ifdef STATS
|
357 |
print_stats(); |
358 |
#endif
|
359 |
MPV_common_end(s); |
360 |
if (s->out_format == FMT_MJPEG)
|
361 |
mjpeg_close(s); |
362 |
return 0; |
363 |
} |
364 |
|
365 |
/* draw the edges of width 'w' of an image of size width, height */
|
366 |
static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w) |
367 |
{ |
368 |
UINT8 *ptr, *last_line; |
369 |
int i;
|
370 |
|
371 |
last_line = buf + (height - 1) * wrap;
|
372 |
for(i=0;i<w;i++) { |
373 |
/* top and bottom */
|
374 |
memcpy(buf - (i + 1) * wrap, buf, width);
|
375 |
memcpy(last_line + (i + 1) * wrap, last_line, width);
|
376 |
} |
377 |
/* left and right */
|
378 |
ptr = buf; |
379 |
for(i=0;i<height;i++) { |
380 |
memset(ptr - w, ptr[0], w);
|
381 |
memset(ptr + width, ptr[width-1], w);
|
382 |
ptr += wrap; |
383 |
} |
384 |
/* corners */
|
385 |
for(i=0;i<w;i++) { |
386 |
memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */ |
387 |
memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */ |
388 |
memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */ |
389 |
memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */ |
390 |
} |
391 |
} |
392 |
|
393 |
/* generic function for encode/decode called before a frame is coded/decoded */
|
394 |
void MPV_frame_start(MpegEncContext *s)
|
395 |
{ |
396 |
int i;
|
397 |
UINT8 *tmp; |
398 |
|
399 |
s->mb_skiped = 0;
|
400 |
if (s->pict_type == B_TYPE) {
|
401 |
for(i=0;i<3;i++) { |
402 |
s->current_picture[i] = s->aux_picture[i]; |
403 |
} |
404 |
} else {
|
405 |
for(i=0;i<3;i++) { |
406 |
/* swap next and last */
|
407 |
tmp = s->last_picture[i]; |
408 |
s->last_picture[i] = s->next_picture[i]; |
409 |
s->next_picture[i] = tmp; |
410 |
s->current_picture[i] = tmp; |
411 |
} |
412 |
} |
413 |
} |
414 |
|
415 |
/* generic function for encode/decode called after a frame has been coded/decoded */
|
416 |
void MPV_frame_end(MpegEncContext *s)
|
417 |
{ |
418 |
/* draw edge for correct motion prediction if outside */
|
419 |
if (s->pict_type != B_TYPE && !s->intra_only) {
|
420 |
if(s->avctx==NULL || s->avctx->codec->id!=CODEC_ID_MPEG4){ |
421 |
draw_edges(s->current_picture[0], s->linesize, s->mb_width*16, s->mb_height*16, EDGE_WIDTH); |
422 |
draw_edges(s->current_picture[1], s->linesize/2, s->mb_width*8, s->mb_height*8, EDGE_WIDTH/2); |
423 |
draw_edges(s->current_picture[2], s->linesize/2, s->mb_width*8, s->mb_height*8, EDGE_WIDTH/2); |
424 |
}else{
|
425 |
/* OpenDivx, but i dunno how to distinguish it from mpeg4 */
|
426 |
draw_edges(s->current_picture[0], s->linesize, s->width, s->height, EDGE_WIDTH);
|
427 |
draw_edges(s->current_picture[1], s->linesize/2, s->width/2, s->height/2, EDGE_WIDTH/2); |
428 |
draw_edges(s->current_picture[2], s->linesize/2, s->width/2, s->height/2, EDGE_WIDTH/2); |
429 |
} |
430 |
} |
431 |
emms_c(); |
432 |
} |
433 |
|
434 |
int MPV_encode_picture(AVCodecContext *avctx,
|
435 |
unsigned char *buf, int buf_size, void *data) |
436 |
{ |
437 |
MpegEncContext *s = avctx->priv_data; |
438 |
AVPicture *pict = data; |
439 |
int i, j;
|
440 |
|
441 |
if (s->fixed_qscale)
|
442 |
s->qscale = avctx->quality; |
443 |
|
444 |
init_put_bits(&s->pb, buf, buf_size, NULL, NULL); |
445 |
|
446 |
if (!s->intra_only) {
|
447 |
/* first picture of GOP is intra */
|
448 |
if ((s->picture_number % s->gop_size) == 0) |
449 |
s->pict_type = I_TYPE; |
450 |
else
|
451 |
s->pict_type = P_TYPE; |
452 |
} else {
|
453 |
s->pict_type = I_TYPE; |
454 |
} |
455 |
avctx->key_frame = (s->pict_type == I_TYPE); |
456 |
|
457 |
MPV_frame_start(s); |
458 |
|
459 |
for(i=0;i<3;i++) { |
460 |
UINT8 *src = pict->data[i]; |
461 |
UINT8 *dest = s->current_picture[i]; |
462 |
int src_wrap = pict->linesize[i];
|
463 |
int dest_wrap = s->linesize;
|
464 |
int w = s->width;
|
465 |
int h = s->height;
|
466 |
|
467 |
if (i >= 1) { |
468 |
dest_wrap >>= 1;
|
469 |
w >>= 1;
|
470 |
h >>= 1;
|
471 |
} |
472 |
|
473 |
if(s->intra_only && dest_wrap==src_wrap){
|
474 |
s->current_picture[i] = pict->data[i]; |
475 |
}else {
|
476 |
for(j=0;j<h;j++) { |
477 |
memcpy(dest, src, w); |
478 |
dest += dest_wrap; |
479 |
src += src_wrap; |
480 |
} |
481 |
} |
482 |
s->new_picture[i] = s->current_picture[i]; |
483 |
} |
484 |
|
485 |
encode_picture(s, s->picture_number); |
486 |
|
487 |
MPV_frame_end(s); |
488 |
s->picture_number++; |
489 |
|
490 |
if (s->out_format == FMT_MJPEG)
|
491 |
mjpeg_picture_trailer(s); |
492 |
|
493 |
flush_put_bits(&s->pb); |
494 |
s->total_bits += (s->pb.buf_ptr - s->pb.buf) * 8;
|
495 |
avctx->quality = s->qscale; |
496 |
return s->pb.buf_ptr - s->pb.buf;
|
497 |
} |
498 |
|
499 |
static inline int clip(int a, int amin, int amax) |
500 |
{ |
501 |
if (a < amin)
|
502 |
return amin;
|
503 |
else if (a > amax) |
504 |
return amax;
|
505 |
else
|
506 |
return a;
|
507 |
} |
508 |
|
509 |
/* apply one mpeg motion vector to the three components */
|
510 |
static inline void mpeg_motion(MpegEncContext *s, |
511 |
UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr, |
512 |
int dest_offset,
|
513 |
UINT8 **ref_picture, int src_offset,
|
514 |
int field_based, op_pixels_func *pix_op,
|
515 |
int motion_x, int motion_y, int h) |
516 |
{ |
517 |
UINT8 *ptr; |
518 |
int dxy, offset, mx, my, src_x, src_y, height, linesize;
|
519 |
|
520 |
dxy = ((motion_y & 1) << 1) | (motion_x & 1); |
521 |
src_x = s->mb_x * 16 + (motion_x >> 1); |
522 |
src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1); |
523 |
|
524 |
/* WARNING: do no forget half pels */
|
525 |
height = s->height >> field_based; |
526 |
src_x = clip(src_x, -16, s->width);
|
527 |
if (src_x == s->width)
|
528 |
dxy &= ~1;
|
529 |
src_y = clip(src_y, -16, height);
|
530 |
if (src_y == height)
|
531 |
dxy &= ~2;
|
532 |
linesize = s->linesize << field_based; |
533 |
ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
|
534 |
dest_y += dest_offset; |
535 |
pix_op[dxy](dest_y, ptr, linesize, h); |
536 |
pix_op[dxy](dest_y + 8, ptr + 8, linesize, h); |
537 |
|
538 |
if (s->out_format == FMT_H263) {
|
539 |
dxy = 0;
|
540 |
if ((motion_x & 3) != 0) |
541 |
dxy |= 1;
|
542 |
if ((motion_y & 3) != 0) |
543 |
dxy |= 2;
|
544 |
mx = motion_x >> 2;
|
545 |
my = motion_y >> 2;
|
546 |
} else {
|
547 |
mx = motion_x / 2;
|
548 |
my = motion_y / 2;
|
549 |
dxy = ((my & 1) << 1) | (mx & 1); |
550 |
mx >>= 1;
|
551 |
my >>= 1;
|
552 |
} |
553 |
|
554 |
src_x = s->mb_x * 8 + mx;
|
555 |
src_y = s->mb_y * (8 >> field_based) + my;
|
556 |
src_x = clip(src_x, -8, s->width >> 1); |
557 |
if (src_x == (s->width >> 1)) |
558 |
dxy &= ~1;
|
559 |
src_y = clip(src_y, -8, height >> 1); |
560 |
if (src_y == (height >> 1)) |
561 |
dxy &= ~2;
|
562 |
|
563 |
offset = (src_y * (linesize >> 1)) + src_x + (src_offset >> 1); |
564 |
ptr = ref_picture[1] + offset;
|
565 |
pix_op[dxy](dest_cb + (dest_offset >> 1), ptr, linesize >> 1, h >> 1); |
566 |
ptr = ref_picture[2] + offset;
|
567 |
pix_op[dxy](dest_cr + (dest_offset >> 1), ptr, linesize >> 1, h >> 1); |
568 |
} |
569 |
|
570 |
static inline void MPV_motion(MpegEncContext *s, |
571 |
UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr, |
572 |
int dir, UINT8 **ref_picture,
|
573 |
op_pixels_func *pix_op) |
574 |
{ |
575 |
int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
|
576 |
int mb_x, mb_y, i;
|
577 |
UINT8 *ptr, *dest; |
578 |
|
579 |
mb_x = s->mb_x; |
580 |
mb_y = s->mb_y; |
581 |
|
582 |
switch(s->mv_type) {
|
583 |
case MV_TYPE_16X16:
|
584 |
mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
|
585 |
ref_picture, 0,
|
586 |
0, pix_op,
|
587 |
s->mv[dir][0][0], s->mv[dir][0][1], 16); |
588 |
break;
|
589 |
case MV_TYPE_8X8:
|
590 |
for(i=0;i<4;i++) { |
591 |
motion_x = s->mv[dir][i][0];
|
592 |
motion_y = s->mv[dir][i][1];
|
593 |
|
594 |
dxy = ((motion_y & 1) << 1) | (motion_x & 1); |
595 |
src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8; |
596 |
src_y = mb_y * 16 + (motion_y >> 1) + ((i >> 1) & 1) * 8; |
597 |
|
598 |
/* WARNING: do no forget half pels */
|
599 |
src_x = clip(src_x, -16, s->width);
|
600 |
if (src_x == s->width)
|
601 |
dxy &= ~1;
|
602 |
src_y = clip(src_y, -16, s->height);
|
603 |
if (src_y == s->height)
|
604 |
dxy &= ~2;
|
605 |
|
606 |
ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
|
607 |
dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize; |
608 |
pix_op[dxy](dest, ptr, s->linesize, 8);
|
609 |
} |
610 |
/* In case of 8X8, we construct a single chroma motion vector
|
611 |
with a special rounding */
|
612 |
mx = 0;
|
613 |
my = 0;
|
614 |
for(i=0;i<4;i++) { |
615 |
mx += s->mv[dir][i][0];
|
616 |
my += s->mv[dir][i][1];
|
617 |
} |
618 |
if (mx >= 0) |
619 |
mx = (h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1)); |
620 |
else {
|
621 |
mx = -mx; |
622 |
mx = -(h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1)); |
623 |
} |
624 |
if (my >= 0) |
625 |
my = (h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1)); |
626 |
else {
|
627 |
my = -my; |
628 |
my = -(h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1)); |
629 |
} |
630 |
dxy = ((my & 1) << 1) | (mx & 1); |
631 |
mx >>= 1;
|
632 |
my >>= 1;
|
633 |
|
634 |
src_x = mb_x * 8 + mx;
|
635 |
src_y = mb_y * 8 + my;
|
636 |
src_x = clip(src_x, -8, s->width/2); |
637 |
if (src_x == s->width/2) |
638 |
dxy &= ~1;
|
639 |
src_y = clip(src_y, -8, s->height/2); |
640 |
if (src_y == s->height/2) |
641 |
dxy &= ~2;
|
642 |
|
643 |
offset = (src_y * (s->linesize >> 1)) + src_x;
|
644 |
ptr = ref_picture[1] + offset;
|
645 |
pix_op[dxy](dest_cb, ptr, s->linesize >> 1, 8); |
646 |
ptr = ref_picture[2] + offset;
|
647 |
pix_op[dxy](dest_cr, ptr, s->linesize >> 1, 8); |
648 |
break;
|
649 |
case MV_TYPE_FIELD:
|
650 |
if (s->picture_structure == PICT_FRAME) {
|
651 |
/* top field */
|
652 |
mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
|
653 |
ref_picture, s->field_select[dir][0] ? s->linesize : 0, |
654 |
1, pix_op,
|
655 |
s->mv[dir][0][0], s->mv[dir][0][1], 8); |
656 |
/* bottom field */
|
657 |
mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize, |
658 |
ref_picture, s->field_select[dir][1] ? s->linesize : 0, |
659 |
1, pix_op,
|
660 |
s->mv[dir][1][0], s->mv[dir][1][1], 8); |
661 |
} else {
|
662 |
|
663 |
|
664 |
} |
665 |
break;
|
666 |
} |
667 |
} |
668 |
|
669 |
|
670 |
/* put block[] to dest[] */
|
671 |
static inline void put_dct(MpegEncContext *s, |
672 |
DCTELEM *block, int i, UINT8 *dest, int line_size) |
673 |
{ |
674 |
if (!s->mpeg2)
|
675 |
s->dct_unquantize(s, block, i, s->qscale); |
676 |
ff_idct (block); |
677 |
put_pixels_clamped(block, dest, line_size); |
678 |
} |
679 |
|
680 |
/* add block[] to dest[] */
|
681 |
static inline void add_dct(MpegEncContext *s, |
682 |
DCTELEM *block, int i, UINT8 *dest, int line_size) |
683 |
{ |
684 |
if (s->block_last_index[i] >= 0) { |
685 |
if (!s->mpeg2)
|
686 |
if(s->encoding || (!s->h263_msmpeg4))
|
687 |
s->dct_unquantize(s, block, i, s->qscale); |
688 |
ff_idct (block); |
689 |
add_pixels_clamped(block, dest, line_size); |
690 |
} |
691 |
} |
692 |
|
693 |
/* generic function called after a macroblock has been parsed by the
|
694 |
decoder or after it has been encoded by the encoder.
|
695 |
|
696 |
Important variables used:
|
697 |
s->mb_intra : true if intra macroblock
|
698 |
s->mv_dir : motion vector direction
|
699 |
s->mv_type : motion vector type
|
700 |
s->mv : motion vector
|
701 |
s->interlaced_dct : true if interlaced dct used (mpeg2)
|
702 |
*/
|
703 |
void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) |
704 |
{ |
705 |
int mb_x, mb_y, motion_x, motion_y;
|
706 |
int dct_linesize, dct_offset;
|
707 |
op_pixels_func *op_pix; |
708 |
|
709 |
mb_x = s->mb_x; |
710 |
mb_y = s->mb_y; |
711 |
|
712 |
#ifdef FF_POSTPROCESS
|
713 |
quant_store[mb_y][mb_x]=s->qscale; |
714 |
//printf("[%02d][%02d] %d\n",mb_x,mb_y,s->qscale);
|
715 |
#endif
|
716 |
|
717 |
/* update DC predictors for P macroblocks */
|
718 |
if (!s->mb_intra) {
|
719 |
if (s->h263_pred) {
|
720 |
if(s->mbintra_table[mb_x + mb_y*s->mb_width])
|
721 |
{ |
722 |
int wrap, x, y, v;
|
723 |
s->mbintra_table[mb_x + mb_y*s->mb_width]=0;
|
724 |
|
725 |
wrap = 2 * s->mb_width + 2; |
726 |
v = 1024;
|
727 |
x = 2 * mb_x + 1; |
728 |
y = 2 * mb_y + 1; |
729 |
|
730 |
s->dc_val[0][(x) + (y) * wrap] = v;
|
731 |
s->dc_val[0][(x + 1) + (y) * wrap] = v; |
732 |
s->dc_val[0][(x) + (y + 1) * wrap] = v; |
733 |
s->dc_val[0][(x + 1) + (y + 1) * wrap] = v; |
734 |
/* ac pred */
|
735 |
memset(s->ac_val[0][(x) + (y) * wrap], 0, 16 * sizeof(INT16)); |
736 |
memset(s->ac_val[0][(x + 1) + (y) * wrap], 0, 16 * sizeof(INT16)); |
737 |
memset(s->ac_val[0][(x) + (y + 1) * wrap], 0, 16 * sizeof(INT16)); |
738 |
memset(s->ac_val[0][(x + 1) + (y + 1) * wrap], 0, 16 * sizeof(INT16)); |
739 |
if (s->h263_msmpeg4) {
|
740 |
s->coded_block[(x) + (y) * wrap] = 0;
|
741 |
s->coded_block[(x + 1) + (y) * wrap] = 0; |
742 |
s->coded_block[(x) + (y + 1) * wrap] = 0; |
743 |
s->coded_block[(x + 1) + (y + 1) * wrap] = 0; |
744 |
} |
745 |
/* chroma */
|
746 |
wrap = s->mb_width + 2;
|
747 |
x = mb_x + 1;
|
748 |
y = mb_y + 1;
|
749 |
s->dc_val[1][(x) + (y) * wrap] = v;
|
750 |
s->dc_val[2][(x) + (y) * wrap] = v;
|
751 |
/* ac pred */
|
752 |
memset(s->ac_val[1][(x) + (y) * wrap], 0, 16 * sizeof(INT16)); |
753 |
memset(s->ac_val[2][(x) + (y) * wrap], 0, 16 * sizeof(INT16)); |
754 |
} |
755 |
} else {
|
756 |
s->last_dc[0] = 128 << s->intra_dc_precision; |
757 |
s->last_dc[1] = 128 << s->intra_dc_precision; |
758 |
s->last_dc[2] = 128 << s->intra_dc_precision; |
759 |
} |
760 |
} |
761 |
else if (s->h263_pred) |
762 |
s->mbintra_table[mb_x + mb_y*s->mb_width]=1;
|
763 |
|
764 |
/* update motion predictor */
|
765 |
if (s->out_format == FMT_H263) {
|
766 |
int x, y, wrap;
|
767 |
|
768 |
x = 2 * mb_x + 1; |
769 |
y = 2 * mb_y + 1; |
770 |
wrap = 2 * s->mb_width + 2; |
771 |
if (s->mb_intra) {
|
772 |
motion_x = 0;
|
773 |
motion_y = 0;
|
774 |
goto motion_init;
|
775 |
} else if (s->mv_type == MV_TYPE_16X16) { |
776 |
motion_x = s->mv[0][0][0]; |
777 |
motion_y = s->mv[0][0][1]; |
778 |
motion_init:
|
779 |
/* no update if 8X8 because it has been done during parsing */
|
780 |
s->motion_val[(x) + (y) * wrap][0] = motion_x;
|
781 |
s->motion_val[(x) + (y) * wrap][1] = motion_y;
|
782 |
s->motion_val[(x + 1) + (y) * wrap][0] = motion_x; |
783 |
s->motion_val[(x + 1) + (y) * wrap][1] = motion_y; |
784 |
s->motion_val[(x) + (y + 1) * wrap][0] = motion_x; |
785 |
s->motion_val[(x) + (y + 1) * wrap][1] = motion_y; |
786 |
s->motion_val[(x + 1) + (y + 1) * wrap][0] = motion_x; |
787 |
s->motion_val[(x + 1) + (y + 1) * wrap][1] = motion_y; |
788 |
} |
789 |
} |
790 |
|
791 |
if (!s->intra_only) {
|
792 |
UINT8 *dest_y, *dest_cb, *dest_cr; |
793 |
UINT8 *mbskip_ptr; |
794 |
|
795 |
/* avoid copy if macroblock skipped in last frame too */
|
796 |
if (!s->encoding && s->pict_type != B_TYPE) {
|
797 |
mbskip_ptr = &s->mbskip_table[s->mb_y * s->mb_width + s->mb_x]; |
798 |
if (s->mb_skiped) {
|
799 |
s->mb_skiped = 0;
|
800 |
/* if previous was skipped too, then nothing to do ! */
|
801 |
if (*mbskip_ptr != 0) |
802 |
goto the_end;
|
803 |
*mbskip_ptr = 1; /* indicate that this time we skiped it */ |
804 |
} else {
|
805 |
*mbskip_ptr = 0; /* not skipped */ |
806 |
} |
807 |
} |
808 |
|
809 |
dest_y = s->current_picture[0] + (mb_y * 16 * s->linesize) + mb_x * 16; |
810 |
dest_cb = s->current_picture[1] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8; |
811 |
dest_cr = s->current_picture[2] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8; |
812 |
|
813 |
if (s->interlaced_dct) {
|
814 |
dct_linesize = s->linesize * 2;
|
815 |
dct_offset = s->linesize; |
816 |
} else {
|
817 |
dct_linesize = s->linesize; |
818 |
dct_offset = s->linesize * 8;
|
819 |
} |
820 |
|
821 |
if (!s->mb_intra) {
|
822 |
/* motion handling */
|
823 |
if (!s->no_rounding)
|
824 |
op_pix = put_pixels_tab; |
825 |
else
|
826 |
op_pix = put_no_rnd_pixels_tab; |
827 |
|
828 |
if (s->mv_dir & MV_DIR_FORWARD) {
|
829 |
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix);
|
830 |
if (!s->no_rounding)
|
831 |
op_pix = avg_pixels_tab; |
832 |
else
|
833 |
op_pix = avg_no_rnd_pixels_tab; |
834 |
} |
835 |
if (s->mv_dir & MV_DIR_BACKWARD) {
|
836 |
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix);
|
837 |
} |
838 |
|
839 |
/* add dct residue */
|
840 |
add_dct(s, block[0], 0, dest_y, dct_linesize); |
841 |
add_dct(s, block[1], 1, dest_y + 8, dct_linesize); |
842 |
add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); |
843 |
add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); |
844 |
|
845 |
add_dct(s, block[4], 4, dest_cb, s->linesize >> 1); |
846 |
add_dct(s, block[5], 5, dest_cr, s->linesize >> 1); |
847 |
} else {
|
848 |
/* dct only in intra block */
|
849 |
put_dct(s, block[0], 0, dest_y, dct_linesize); |
850 |
put_dct(s, block[1], 1, dest_y + 8, dct_linesize); |
851 |
put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); |
852 |
put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); |
853 |
|
854 |
put_dct(s, block[4], 4, dest_cb, s->linesize >> 1); |
855 |
put_dct(s, block[5], 5, dest_cr, s->linesize >> 1); |
856 |
} |
857 |
} |
858 |
the_end:
|
859 |
emms_c(); |
860 |
} |
861 |
|
862 |
static void encode_picture(MpegEncContext *s, int picture_number) |
863 |
{ |
864 |
int mb_x, mb_y, wrap, last_gob;
|
865 |
UINT8 *ptr; |
866 |
int i, motion_x, motion_y;
|
867 |
|
868 |
s->picture_number = picture_number; |
869 |
if (!s->fixed_qscale)
|
870 |
s->qscale = rate_estimate_qscale(s); |
871 |
|
872 |
/* precompute matrix */
|
873 |
if (s->out_format == FMT_MJPEG) {
|
874 |
/* for mjpeg, we do include qscale in the matrix */
|
875 |
s->intra_matrix[0] = default_intra_matrix[0]; |
876 |
for(i=1;i<64;i++) |
877 |
s->intra_matrix[i] = (default_intra_matrix[i] * s->qscale) >> 3;
|
878 |
convert_matrix(s->q_intra_matrix, s->q_intra_matrix16, s->intra_matrix, 8);
|
879 |
} else {
|
880 |
convert_matrix(s->q_intra_matrix, s->q_intra_matrix16, s->intra_matrix, s->qscale); |
881 |
convert_matrix(s->q_non_intra_matrix, s->q_non_intra_matrix16, s->non_intra_matrix, s->qscale); |
882 |
} |
883 |
|
884 |
switch(s->out_format) {
|
885 |
case FMT_MJPEG:
|
886 |
mjpeg_picture_header(s); |
887 |
break;
|
888 |
case FMT_H263:
|
889 |
if (s->h263_msmpeg4)
|
890 |
msmpeg4_encode_picture_header(s, picture_number); |
891 |
else if (s->h263_pred) |
892 |
mpeg4_encode_picture_header(s, picture_number); |
893 |
else if (s->h263_rv10) |
894 |
rv10_encode_picture_header(s, picture_number); |
895 |
else
|
896 |
h263_encode_picture_header(s, picture_number); |
897 |
break;
|
898 |
case FMT_MPEG1:
|
899 |
mpeg1_encode_picture_header(s, picture_number); |
900 |
break;
|
901 |
} |
902 |
|
903 |
/* init last dc values */
|
904 |
/* note: quant matrix value (8) is implied here */
|
905 |
s->last_dc[0] = 128; |
906 |
s->last_dc[1] = 128; |
907 |
s->last_dc[2] = 128; |
908 |
s->mb_incr = 1;
|
909 |
s->last_mv[0][0][0] = 0; |
910 |
s->last_mv[0][0][1] = 0; |
911 |
s->mv_type = MV_TYPE_16X16; |
912 |
s->mv_dir = MV_DIR_FORWARD; |
913 |
|
914 |
/* Get the GOB height based on picture height */
|
915 |
if (s->out_format == FMT_H263 && s->h263_plus) {
|
916 |
if (s->height <= 400) |
917 |
s->gob_index = 1;
|
918 |
else if (s->height <= 800) |
919 |
s->gob_index = 2;
|
920 |
else
|
921 |
s->gob_index = 4;
|
922 |
} |
923 |
|
924 |
for(mb_y=0; mb_y < s->mb_height; mb_y++) { |
925 |
/* Put GOB header based on RTP MTU */
|
926 |
if (!mb_y) {
|
927 |
s->ptr_lastgob = s->pb.buf_ptr; |
928 |
s->ptr_last_mb_line = s->pb.buf_ptr; |
929 |
} else if (s->out_format == FMT_H263 && s->h263_plus) { |
930 |
last_gob = h263_encode_gob_header(s, mb_y); |
931 |
if (last_gob) {
|
932 |
//fprintf(stderr,"\nLast GOB size: %d", last_gob);
|
933 |
s->first_gob_line = 1;
|
934 |
} else
|
935 |
s->first_gob_line = 0;
|
936 |
} |
937 |
for(mb_x=0; mb_x < s->mb_width; mb_x++) { |
938 |
|
939 |
s->mb_x = mb_x; |
940 |
s->mb_y = mb_y; |
941 |
|
942 |
/* compute motion vector and macro block type (intra or non intra) */
|
943 |
motion_x = 0;
|
944 |
motion_y = 0;
|
945 |
if (s->pict_type == P_TYPE) {
|
946 |
s->mb_intra = estimate_motion(s, mb_x, mb_y, |
947 |
&motion_x, |
948 |
&motion_y); |
949 |
} else {
|
950 |
s->mb_intra = 1;
|
951 |
} |
952 |
|
953 |
/* get the pixels */
|
954 |
wrap = s->linesize; |
955 |
ptr = s->new_picture[0] + (mb_y * 16 * wrap) + mb_x * 16; |
956 |
get_pixels(s->block[0], ptr, wrap);
|
957 |
get_pixels(s->block[1], ptr + 8, wrap); |
958 |
get_pixels(s->block[2], ptr + 8 * wrap, wrap); |
959 |
get_pixels(s->block[3], ptr + 8 * wrap + 8, wrap); |
960 |
wrap = s->linesize >> 1;
|
961 |
ptr = s->new_picture[1] + (mb_y * 8 * wrap) + mb_x * 8; |
962 |
get_pixels(s->block[4], ptr, wrap);
|
963 |
|
964 |
wrap = s->linesize >> 1;
|
965 |
ptr = s->new_picture[2] + (mb_y * 8 * wrap) + mb_x * 8; |
966 |
get_pixels(s->block[5], ptr, wrap);
|
967 |
|
968 |
/* subtract previous frame if non intra */
|
969 |
if (!s->mb_intra) {
|
970 |
int dxy, offset, mx, my;
|
971 |
|
972 |
dxy = ((motion_y & 1) << 1) | (motion_x & 1); |
973 |
ptr = s->last_picture[0] +
|
974 |
((mb_y * 16 + (motion_y >> 1)) * s->linesize) + |
975 |
(mb_x * 16 + (motion_x >> 1)); |
976 |
|
977 |
sub_pixels_2(s->block[0], ptr, s->linesize, dxy);
|
978 |
sub_pixels_2(s->block[1], ptr + 8, s->linesize, dxy); |
979 |
sub_pixels_2(s->block[2], ptr + s->linesize * 8, s->linesize, dxy); |
980 |
sub_pixels_2(s->block[3], ptr + 8 + s->linesize * 8, s->linesize ,dxy); |
981 |
|
982 |
if (s->out_format == FMT_H263) {
|
983 |
/* special rounding for h263 */
|
984 |
dxy = 0;
|
985 |
if ((motion_x & 3) != 0) |
986 |
dxy |= 1;
|
987 |
if ((motion_y & 3) != 0) |
988 |
dxy |= 2;
|
989 |
mx = motion_x >> 2;
|
990 |
my = motion_y >> 2;
|
991 |
} else {
|
992 |
mx = motion_x / 2;
|
993 |
my = motion_y / 2;
|
994 |
dxy = ((my & 1) << 1) | (mx & 1); |
995 |
mx >>= 1;
|
996 |
my >>= 1;
|
997 |
} |
998 |
offset = ((mb_y * 8 + my) * (s->linesize >> 1)) + (mb_x * 8 + mx); |
999 |
ptr = s->last_picture[1] + offset;
|
1000 |
sub_pixels_2(s->block[4], ptr, s->linesize >> 1, dxy); |
1001 |
ptr = s->last_picture[2] + offset;
|
1002 |
sub_pixels_2(s->block[5], ptr, s->linesize >> 1, dxy); |
1003 |
} |
1004 |
emms_c(); |
1005 |
|
1006 |
/* DCT & quantize */
|
1007 |
if (s->h263_msmpeg4) {
|
1008 |
msmpeg4_dc_scale(s); |
1009 |
} else if (s->h263_pred) { |
1010 |
h263_dc_scale(s); |
1011 |
} else {
|
1012 |
/* default quantization values */
|
1013 |
s->y_dc_scale = 8;
|
1014 |
s->c_dc_scale = 8;
|
1015 |
} |
1016 |
for(i=0;i<6;i++) { |
1017 |
s->block_last_index[i] = dct_quantize(s, s->block[i], i, s->qscale); |
1018 |
} |
1019 |
|
1020 |
/* huffman encode */
|
1021 |
switch(s->out_format) {
|
1022 |
case FMT_MPEG1:
|
1023 |
mpeg1_encode_mb(s, s->block, motion_x, motion_y); |
1024 |
break;
|
1025 |
case FMT_H263:
|
1026 |
if (s->h263_msmpeg4)
|
1027 |
msmpeg4_encode_mb(s, s->block, motion_x, motion_y); |
1028 |
else
|
1029 |
h263_encode_mb(s, s->block, motion_x, motion_y); |
1030 |
break;
|
1031 |
case FMT_MJPEG:
|
1032 |
mjpeg_encode_mb(s, s->block); |
1033 |
break;
|
1034 |
} |
1035 |
|
1036 |
/* decompress blocks so that we keep the state of the decoder */
|
1037 |
s->mv[0][0][0] = motion_x; |
1038 |
s->mv[0][0][1] = motion_y; |
1039 |
|
1040 |
MPV_decode_mb(s, s->block); |
1041 |
} |
1042 |
/* Obtain average MB line size for RTP */
|
1043 |
if (!mb_y)
|
1044 |
s->mb_line_avgsize = s->pb.buf_ptr - s->ptr_last_mb_line; |
1045 |
else
|
1046 |
s->mb_line_avgsize = (s->mb_line_avgsize + s->pb.buf_ptr - s->ptr_last_mb_line) >> 1;
|
1047 |
//fprintf(stderr, "\nMB line: %d\tSize: %u\tAvg. Size: %u", s->mb_y,
|
1048 |
// (s->pb.buf_ptr - s->ptr_last_mb_line), s->mb_line_avgsize);
|
1049 |
s->ptr_last_mb_line = s->pb.buf_ptr; |
1050 |
} |
1051 |
|
1052 |
if (s->h263_msmpeg4)
|
1053 |
msmpeg4_encode_ext_header(s); |
1054 |
|
1055 |
//if (s->gob_number)
|
1056 |
// fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
|
1057 |
} |
1058 |
|
1059 |
static int dct_quantize_c(MpegEncContext *s, |
1060 |
DCTELEM *block, int n,
|
1061 |
int qscale)
|
1062 |
{ |
1063 |
int i, j, level, last_non_zero, q;
|
1064 |
const int *qmat; |
1065 |
int minLevel, maxLevel;
|
1066 |
|
1067 |
if(s->avctx!=NULL && s->avctx->codec->id==CODEC_ID_MPEG4){ |
1068 |
/* mpeg4 */
|
1069 |
minLevel= -2048;
|
1070 |
maxLevel= 2047;
|
1071 |
}else if(s->out_format==FMT_MPEG1){ |
1072 |
/* mpeg1 */
|
1073 |
minLevel= -255;
|
1074 |
maxLevel= 255;
|
1075 |
}else{
|
1076 |
/* h263 / msmpeg4 */
|
1077 |
minLevel= -128;
|
1078 |
maxLevel= 127;
|
1079 |
} |
1080 |
|
1081 |
av_fdct (block); |
1082 |
|
1083 |
/* we need this permutation so that we correct the IDCT
|
1084 |
permutation. will be moved into DCT code */
|
1085 |
block_permute(block); |
1086 |
|
1087 |
if (s->mb_intra) {
|
1088 |
if (n < 4) |
1089 |
q = s->y_dc_scale; |
1090 |
else
|
1091 |
q = s->c_dc_scale; |
1092 |
q = q << 3;
|
1093 |
|
1094 |
/* note: block[0] is assumed to be positive */
|
1095 |
block[0] = (block[0] + (q >> 1)) / q; |
1096 |
i = 1;
|
1097 |
last_non_zero = 0;
|
1098 |
if (s->out_format == FMT_H263) {
|
1099 |
qmat = s->q_non_intra_matrix; |
1100 |
} else {
|
1101 |
qmat = s->q_intra_matrix; |
1102 |
} |
1103 |
} else {
|
1104 |
i = 0;
|
1105 |
last_non_zero = -1;
|
1106 |
qmat = s->q_non_intra_matrix; |
1107 |
} |
1108 |
|
1109 |
for(;i<64;i++) { |
1110 |
j = zigzag_direct[i]; |
1111 |
level = block[j]; |
1112 |
level = level * qmat[j]; |
1113 |
#ifdef PARANOID
|
1114 |
{ |
1115 |
static int count = 0; |
1116 |
int level1, level2, qmat1;
|
1117 |
double val;
|
1118 |
if (qmat == s->q_non_intra_matrix) {
|
1119 |
qmat1 = default_non_intra_matrix[j] * s->qscale; |
1120 |
} else {
|
1121 |
qmat1 = default_intra_matrix[j] * s->qscale; |
1122 |
} |
1123 |
if (av_fdct != jpeg_fdct_ifast)
|
1124 |
val = ((double)block[j] * 8.0) / (double)qmat1; |
1125 |
else
|
1126 |
val = ((double)block[j] * 8.0 * 2048.0) / |
1127 |
((double)qmat1 * aanscales[j]);
|
1128 |
level1 = (int)val;
|
1129 |
level2 = level / (1 << (QMAT_SHIFT - 3)); |
1130 |
if (level1 != level2) {
|
1131 |
fprintf(stderr, "%d: quant error qlevel=%d wanted=%d level=%d qmat1=%d qmat=%d wantedf=%0.6f\n",
|
1132 |
count, level2, level1, block[j], qmat1, qmat[j], |
1133 |
val); |
1134 |
count++; |
1135 |
} |
1136 |
|
1137 |
} |
1138 |
#endif
|
1139 |
/* XXX: slight error for the low range. Test should be equivalent to
|
1140 |
(level <= -(1 << (QMAT_SHIFT - 3)) || level >= (1 <<
|
1141 |
(QMAT_SHIFT - 3)))
|
1142 |
*/
|
1143 |
if (((level << (31 - (QMAT_SHIFT - 3))) >> (31 - (QMAT_SHIFT - 3))) != |
1144 |
level) { |
1145 |
level = level / (1 << (QMAT_SHIFT - 3)); |
1146 |
/* XXX: currently, this code is not optimal. the range should be:
|
1147 |
mpeg1: -255..255
|
1148 |
mpeg2: -2048..2047
|
1149 |
h263: -128..127
|
1150 |
mpeg4: -2048..2047
|
1151 |
*/
|
1152 |
if (level > maxLevel)
|
1153 |
level = maxLevel; |
1154 |
else if (level < minLevel) |
1155 |
level = minLevel; |
1156 |
|
1157 |
block[j] = level; |
1158 |
last_non_zero = i; |
1159 |
} else {
|
1160 |
block[j] = 0;
|
1161 |
} |
1162 |
} |
1163 |
return last_non_zero;
|
1164 |
} |
1165 |
|
1166 |
static void dct_unquantize_mpeg1_c(MpegEncContext *s, |
1167 |
DCTELEM *block, int n, int qscale) |
1168 |
{ |
1169 |
int i, level, nCoeffs;
|
1170 |
const UINT16 *quant_matrix;
|
1171 |
|
1172 |
if(s->alternate_scan) nCoeffs= 64; |
1173 |
else nCoeffs= s->block_last_index[n]+1; |
1174 |
|
1175 |
if (s->mb_intra) {
|
1176 |
if (n < 4) |
1177 |
block[0] = block[0] * s->y_dc_scale; |
1178 |
else
|
1179 |
block[0] = block[0] * s->c_dc_scale; |
1180 |
/* XXX: only mpeg1 */
|
1181 |
quant_matrix = s->intra_matrix; |
1182 |
for(i=1;i<nCoeffs;i++) { |
1183 |
int j= zigzag_direct[i];
|
1184 |
level = block[j]; |
1185 |
if (level) {
|
1186 |
if (level < 0) { |
1187 |
level = -level; |
1188 |
level = (int)(level * qscale * quant_matrix[j]) >> 3; |
1189 |
level = (level - 1) | 1; |
1190 |
level = -level; |
1191 |
} else {
|
1192 |
level = (int)(level * qscale * quant_matrix[j]) >> 3; |
1193 |
level = (level - 1) | 1; |
1194 |
} |
1195 |
#ifdef PARANOID
|
1196 |
if (level < -2048 || level > 2047) |
1197 |
fprintf(stderr, "unquant error %d %d\n", i, level);
|
1198 |
#endif
|
1199 |
block[j] = level; |
1200 |
} |
1201 |
} |
1202 |
} else {
|
1203 |
i = 0;
|
1204 |
quant_matrix = s->non_intra_matrix; |
1205 |
for(;i<nCoeffs;i++) {
|
1206 |
int j= zigzag_direct[i];
|
1207 |
level = block[j]; |
1208 |
if (level) {
|
1209 |
if (level < 0) { |
1210 |
level = -level; |
1211 |
level = (((level << 1) + 1) * qscale * |
1212 |
((int) (quant_matrix[j]))) >> 4; |
1213 |
level = (level - 1) | 1; |
1214 |
level = -level; |
1215 |
} else {
|
1216 |
level = (((level << 1) + 1) * qscale * |
1217 |
((int) (quant_matrix[j]))) >> 4; |
1218 |
level = (level - 1) | 1; |
1219 |
} |
1220 |
#ifdef PARANOID
|
1221 |
if (level < -2048 || level > 2047) |
1222 |
fprintf(stderr, "unquant error %d %d\n", i, level);
|
1223 |
#endif
|
1224 |
block[j] = level; |
1225 |
} |
1226 |
} |
1227 |
} |
1228 |
} |
1229 |
|
1230 |
static void dct_unquantize_h263_c(MpegEncContext *s, |
1231 |
DCTELEM *block, int n, int qscale) |
1232 |
{ |
1233 |
int i, level, qmul, qadd;
|
1234 |
int nCoeffs;
|
1235 |
|
1236 |
if (s->mb_intra) {
|
1237 |
if (n < 4) |
1238 |
block[0] = block[0] * s->y_dc_scale; |
1239 |
else
|
1240 |
block[0] = block[0] * s->c_dc_scale; |
1241 |
i = 1;
|
1242 |
nCoeffs= 64; //does not allways use zigzag table |
1243 |
} else {
|
1244 |
i = 0;
|
1245 |
nCoeffs= zigzag_end[ s->block_last_index[n] ]; |
1246 |
} |
1247 |
|
1248 |
qmul = s->qscale << 1;
|
1249 |
qadd = (s->qscale - 1) | 1; |
1250 |
|
1251 |
for(;i<nCoeffs;i++) {
|
1252 |
level = block[i]; |
1253 |
if (level) {
|
1254 |
if (level < 0) { |
1255 |
level = level * qmul - qadd; |
1256 |
} else {
|
1257 |
level = level * qmul + qadd; |
1258 |
} |
1259 |
#ifdef PARANOID
|
1260 |
if (level < -2048 || level > 2047) |
1261 |
fprintf(stderr, "unquant error %d %d\n", i, level);
|
1262 |
#endif
|
1263 |
block[i] = level; |
1264 |
} |
1265 |
} |
1266 |
} |
1267 |
|
1268 |
/* rate control */
|
1269 |
|
1270 |
/* an I frame is I_FRAME_SIZE_RATIO bigger than a P frame */
|
1271 |
#define I_FRAME_SIZE_RATIO 3.0 |
1272 |
#define QSCALE_K 20 |
1273 |
|
1274 |
static void rate_control_init(MpegEncContext *s) |
1275 |
{ |
1276 |
s->wanted_bits = 0;
|
1277 |
|
1278 |
if (s->intra_only) {
|
1279 |
s->I_frame_bits = ((INT64)s->bit_rate * FRAME_RATE_BASE) / s->frame_rate; |
1280 |
s->P_frame_bits = s->I_frame_bits; |
1281 |
} else {
|
1282 |
s->P_frame_bits = (int) ((float)(s->gop_size * s->bit_rate) / |
1283 |
(float)((float)s->frame_rate / FRAME_RATE_BASE * (I_FRAME_SIZE_RATIO + s->gop_size - 1))); |
1284 |
s->I_frame_bits = (int)(s->P_frame_bits * I_FRAME_SIZE_RATIO);
|
1285 |
} |
1286 |
|
1287 |
#if defined(DEBUG)
|
1288 |
printf("I_frame_size=%d P_frame_size=%d\n",
|
1289 |
s->I_frame_bits, s->P_frame_bits); |
1290 |
#endif
|
1291 |
} |
1292 |
|
1293 |
|
1294 |
/*
|
1295 |
* This heuristic is rather poor, but at least we do not have to
|
1296 |
* change the qscale at every macroblock.
|
1297 |
*/
|
1298 |
static int rate_estimate_qscale(MpegEncContext *s) |
1299 |
{ |
1300 |
INT64 diff, total_bits = s->total_bits; |
1301 |
float q;
|
1302 |
int qscale, qmin;
|
1303 |
|
1304 |
if (s->pict_type == I_TYPE) {
|
1305 |
s->wanted_bits += s->I_frame_bits; |
1306 |
} else {
|
1307 |
s->wanted_bits += s->P_frame_bits; |
1308 |
} |
1309 |
diff = s->wanted_bits - total_bits; |
1310 |
q = 31.0 - (float)diff / (QSCALE_K * s->mb_height * s->mb_width); |
1311 |
/* adjust for I frame */
|
1312 |
if (s->pict_type == I_TYPE && !s->intra_only) {
|
1313 |
q /= I_FRAME_SIZE_RATIO; |
1314 |
} |
1315 |
|
1316 |
/* using a too small Q scale leeds to problems in mpeg1 and h263
|
1317 |
because AC coefficients are clamped to 255 or 127 */
|
1318 |
qmin = 3;
|
1319 |
if (q < qmin)
|
1320 |
q = qmin; |
1321 |
else if (q > 31) |
1322 |
q = 31;
|
1323 |
qscale = (int)(q + 0.5); |
1324 |
#if defined(DEBUG)
|
1325 |
printf("%d: total=%0.0f br=%0.1f diff=%d qest=%0.1f\n",
|
1326 |
s->picture_number, |
1327 |
(double)total_bits,
|
1328 |
(float)s->frame_rate / FRAME_RATE_BASE *
|
1329 |
total_bits / s->picture_number, |
1330 |
diff, q); |
1331 |
#endif
|
1332 |
return qscale;
|
1333 |
} |
1334 |
|
1335 |
AVCodec mpeg1video_encoder = { |
1336 |
"mpeg1video",
|
1337 |
CODEC_TYPE_VIDEO, |
1338 |
CODEC_ID_MPEG1VIDEO, |
1339 |
sizeof(MpegEncContext),
|
1340 |
MPV_encode_init, |
1341 |
MPV_encode_picture, |
1342 |
MPV_encode_end, |
1343 |
}; |
1344 |
|
1345 |
AVCodec h263_encoder = { |
1346 |
"h263",
|
1347 |
CODEC_TYPE_VIDEO, |
1348 |
CODEC_ID_H263, |
1349 |
sizeof(MpegEncContext),
|
1350 |
MPV_encode_init, |
1351 |
MPV_encode_picture, |
1352 |
MPV_encode_end, |
1353 |
}; |
1354 |
|
1355 |
AVCodec h263p_encoder = { |
1356 |
"h263p",
|
1357 |
CODEC_TYPE_VIDEO, |
1358 |
CODEC_ID_H263P, |
1359 |
sizeof(MpegEncContext),
|
1360 |
MPV_encode_init, |
1361 |
MPV_encode_picture, |
1362 |
MPV_encode_end, |
1363 |
}; |
1364 |
|
1365 |
AVCodec rv10_encoder = { |
1366 |
"rv10",
|
1367 |
CODEC_TYPE_VIDEO, |
1368 |
CODEC_ID_RV10, |
1369 |
sizeof(MpegEncContext),
|
1370 |
MPV_encode_init, |
1371 |
MPV_encode_picture, |
1372 |
MPV_encode_end, |
1373 |
}; |
1374 |
|
1375 |
AVCodec mjpeg_encoder = { |
1376 |
"mjpeg",
|
1377 |
CODEC_TYPE_VIDEO, |
1378 |
CODEC_ID_MJPEG, |
1379 |
sizeof(MpegEncContext),
|
1380 |
MPV_encode_init, |
1381 |
MPV_encode_picture, |
1382 |
MPV_encode_end, |
1383 |
}; |
1384 |
|
1385 |
AVCodec mpeg4_encoder = { |
1386 |
"mpeg4",
|
1387 |
CODEC_TYPE_VIDEO, |
1388 |
CODEC_ID_MPEG4, |
1389 |
sizeof(MpegEncContext),
|
1390 |
MPV_encode_init, |
1391 |
MPV_encode_picture, |
1392 |
MPV_encode_end, |
1393 |
}; |
1394 |
|
1395 |
AVCodec msmpeg4_encoder = { |
1396 |
"msmpeg4",
|
1397 |
CODEC_TYPE_VIDEO, |
1398 |
CODEC_ID_MSMPEG4, |
1399 |
sizeof(MpegEncContext),
|
1400 |
MPV_encode_init, |
1401 |
MPV_encode_picture, |
1402 |
MPV_encode_end, |
1403 |
}; |