ffmpeg / libavcodec / mpegvideo.c @ 01dbbd0a
History | View | Annotate | Download (44.1 KB)
1 |
/*
|
---|---|
2 |
* The simplest mpeg encoder (well, it was the simplest!)
|
3 |
* Copyright (c) 2000,2001 Gerard Lantau.
|
4 |
*
|
5 |
* This program is free software; you can redistribute it and/or modify
|
6 |
* it under the terms of the GNU General Public License as published by
|
7 |
* the Free Software Foundation; either version 2 of the License, or
|
8 |
* (at your option) any later version.
|
9 |
*
|
10 |
* This program is distributed in the hope that it will be useful,
|
11 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
13 |
* GNU General Public License for more details.
|
14 |
*
|
15 |
* You should have received a copy of the GNU General Public License
|
16 |
* along with this program; if not, write to the Free Software
|
17 |
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
18 |
*/
|
19 |
#include <stdlib.h> |
20 |
#include <stdio.h> |
21 |
#include <math.h> |
22 |
#include <string.h> |
23 |
#include "avcodec.h" |
24 |
#include "dsputil.h" |
25 |
#include "mpegvideo.h" |
26 |
|
27 |
#ifdef USE_FASTMEMCPY
|
28 |
#include "fastmemcpy.h" |
29 |
#endif
|
30 |
|
31 |
static void encode_picture(MpegEncContext *s, int picture_number); |
32 |
static void rate_control_init(MpegEncContext *s); |
33 |
static int rate_estimate_qscale(MpegEncContext *s); |
34 |
static void dct_unquantize_mpeg1_c(MpegEncContext *s, |
35 |
DCTELEM *block, int n, int qscale); |
36 |
static void dct_unquantize_h263_c(MpegEncContext *s, |
37 |
DCTELEM *block, int n, int qscale); |
38 |
static int dct_quantize(MpegEncContext *s, DCTELEM *block, int n, int qscale); |
39 |
static int dct_quantize_mmx(MpegEncContext *s, |
40 |
DCTELEM *block, int n,
|
41 |
int qscale);
|
42 |
#define EDGE_WIDTH 16 |
43 |
|
44 |
/* enable all paranoid tests for rounding, overflows, etc... */
|
45 |
//#define PARANOID
|
46 |
|
47 |
//#define DEBUG
|
48 |
|
49 |
/* for jpeg fast DCT */
|
50 |
#define CONST_BITS 14 |
51 |
|
52 |
static const unsigned short aanscales[64] = { |
53 |
/* precomputed values scaled up by 14 bits */
|
54 |
16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, |
55 |
22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270, |
56 |
21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906, |
57 |
19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315, |
58 |
16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, |
59 |
12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552, |
60 |
8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446, |
61 |
4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247 |
62 |
}; |
63 |
|
64 |
static UINT8 h263_chroma_roundtab[16] = { |
65 |
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, |
66 |
}; |
67 |
|
68 |
/* default motion estimation */
|
69 |
int motion_estimation_method = ME_LOG;
|
70 |
|
71 |
/* XXX: should use variable shift ? */
|
72 |
#define QMAT_SHIFT_MMX 19 |
73 |
#define QMAT_SHIFT 25 |
74 |
|
75 |
static void convert_matrix(int *qmat, const UINT16 *quant_matrix, int qscale) |
76 |
{ |
77 |
int i;
|
78 |
|
79 |
if (av_fdct == jpeg_fdct_ifast) {
|
80 |
for(i=0;i<64;i++) { |
81 |
/* 16 <= qscale * quant_matrix[i] <= 7905 */
|
82 |
/* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
|
83 |
|
84 |
qmat[i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 11)) / |
85 |
(aanscales[i] * qscale * quant_matrix[i])); |
86 |
} |
87 |
} else {
|
88 |
for(i=0;i<64;i++) { |
89 |
/* We can safely suppose that 16 <= quant_matrix[i] <= 255
|
90 |
So 16 <= qscale * quant_matrix[i] <= 7905
|
91 |
so (1 << QMAT_SHIFT) / 16 >= qmat[i] >= (1 << QMAT_SHIFT) / 7905
|
92 |
*/
|
93 |
qmat[i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
|
94 |
} |
95 |
} |
96 |
} |
97 |
|
98 |
/* init common structure for both encoder and decoder */
|
99 |
int MPV_common_init(MpegEncContext *s)
|
100 |
{ |
101 |
int c_size, i;
|
102 |
UINT8 *pict; |
103 |
|
104 |
if (s->out_format == FMT_H263)
|
105 |
s->dct_unquantize = dct_unquantize_h263_c; |
106 |
else
|
107 |
s->dct_unquantize = dct_unquantize_mpeg1_c; |
108 |
|
109 |
#ifdef HAVE_MMX
|
110 |
MPV_common_init_mmx(s); |
111 |
#endif
|
112 |
s->mb_width = (s->width + 15) / 16; |
113 |
s->mb_height = (s->height + 15) / 16; |
114 |
s->linesize = s->mb_width * 16 + 2 * EDGE_WIDTH; |
115 |
|
116 |
for(i=0;i<3;i++) { |
117 |
int w, h, shift, pict_start;
|
118 |
|
119 |
w = s->linesize; |
120 |
h = s->mb_height * 16 + 2 * EDGE_WIDTH; |
121 |
shift = (i == 0) ? 0 : 1; |
122 |
c_size = (w >> shift) * (h >> shift); |
123 |
pict_start = (w >> shift) * (EDGE_WIDTH >> shift) + (EDGE_WIDTH >> shift); |
124 |
|
125 |
pict = av_mallocz(c_size); |
126 |
if (pict == NULL) |
127 |
goto fail;
|
128 |
s->last_picture_base[i] = pict; |
129 |
s->last_picture[i] = pict + pict_start; |
130 |
|
131 |
pict = av_mallocz(c_size); |
132 |
if (pict == NULL) |
133 |
goto fail;
|
134 |
s->next_picture_base[i] = pict; |
135 |
s->next_picture[i] = pict + pict_start; |
136 |
|
137 |
if (s->has_b_frames) {
|
138 |
pict = av_mallocz(c_size); |
139 |
if (pict == NULL) |
140 |
goto fail;
|
141 |
s->aux_picture_base[i] = pict; |
142 |
s->aux_picture[i] = pict + pict_start; |
143 |
} |
144 |
} |
145 |
|
146 |
if (s->out_format == FMT_H263) {
|
147 |
int size;
|
148 |
/* MV prediction */
|
149 |
size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); |
150 |
s->motion_val = malloc(size * 2 * sizeof(INT16)); |
151 |
if (s->motion_val == NULL) |
152 |
goto fail;
|
153 |
memset(s->motion_val, 0, size * 2 * sizeof(INT16)); |
154 |
} |
155 |
|
156 |
if (s->h263_pred) {
|
157 |
int y_size, c_size, i, size;
|
158 |
|
159 |
/* dc values */
|
160 |
|
161 |
y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2); |
162 |
c_size = (s->mb_width + 2) * (s->mb_height + 2); |
163 |
size = y_size + 2 * c_size;
|
164 |
s->dc_val[0] = malloc(size * sizeof(INT16)); |
165 |
if (s->dc_val[0] == NULL) |
166 |
goto fail;
|
167 |
s->dc_val[1] = s->dc_val[0] + y_size; |
168 |
s->dc_val[2] = s->dc_val[1] + c_size; |
169 |
for(i=0;i<size;i++) |
170 |
s->dc_val[0][i] = 1024; |
171 |
|
172 |
/* ac values */
|
173 |
s->ac_val[0] = av_mallocz(size * sizeof(INT16) * 16); |
174 |
if (s->ac_val[0] == NULL) |
175 |
goto fail;
|
176 |
s->ac_val[1] = s->ac_val[0] + y_size; |
177 |
s->ac_val[2] = s->ac_val[1] + c_size; |
178 |
|
179 |
/* cbp values */
|
180 |
s->coded_block = av_mallocz(y_size); |
181 |
if (!s->coded_block)
|
182 |
goto fail;
|
183 |
} |
184 |
|
185 |
/* which mb is a intra block */
|
186 |
s->mbintra_table = av_mallocz(s->mb_width * s->mb_height); |
187 |
if (!s->mbintra_table)
|
188 |
goto fail;
|
189 |
memset(s->mbintra_table, 1, s->mb_width * s->mb_height);
|
190 |
/* default structure is frame */
|
191 |
s->picture_structure = PICT_FRAME; |
192 |
|
193 |
/* init macroblock skip table */
|
194 |
if (!s->encoding) {
|
195 |
s->mbskip_table = av_mallocz(s->mb_width * s->mb_height); |
196 |
if (!s->mbskip_table)
|
197 |
goto fail;
|
198 |
} |
199 |
|
200 |
s->context_initialized = 1;
|
201 |
return 0; |
202 |
fail:
|
203 |
if (s->motion_val)
|
204 |
free(s->motion_val); |
205 |
if (s->dc_val[0]) |
206 |
free(s->dc_val[0]);
|
207 |
if (s->ac_val[0]) |
208 |
free(s->ac_val[0]);
|
209 |
if (s->coded_block)
|
210 |
free(s->coded_block); |
211 |
if (s->mbintra_table)
|
212 |
{ free(s->mbintra_table);s->mbintra_table=NULL; }
|
213 |
if (s->mbskip_table)
|
214 |
free(s->mbskip_table); |
215 |
for(i=0;i<3;i++) { |
216 |
if (s->last_picture_base[i])
|
217 |
free(s->last_picture_base[i]); |
218 |
if (s->next_picture_base[i])
|
219 |
free(s->next_picture_base[i]); |
220 |
if (s->aux_picture_base[i])
|
221 |
free(s->aux_picture_base[i]); |
222 |
} |
223 |
return -1; |
224 |
} |
225 |
|
226 |
/* init common structure for both encoder and decoder */
|
227 |
void MPV_common_end(MpegEncContext *s)
|
228 |
{ |
229 |
int i;
|
230 |
|
231 |
if (s->motion_val)
|
232 |
free(s->motion_val); |
233 |
if (s->h263_pred) {
|
234 |
free(s->dc_val[0]);
|
235 |
free(s->ac_val[0]);
|
236 |
free(s->coded_block); |
237 |
{ free(s->mbintra_table);s->mbintra_table=NULL; }
|
238 |
} |
239 |
if (s->mbskip_table)
|
240 |
free(s->mbskip_table); |
241 |
for(i=0;i<3;i++) { |
242 |
free(s->last_picture_base[i]); |
243 |
free(s->next_picture_base[i]); |
244 |
if (s->has_b_frames)
|
245 |
free(s->aux_picture_base[i]); |
246 |
} |
247 |
s->context_initialized = 0;
|
248 |
} |
249 |
|
250 |
/* init video encoder */
|
251 |
int MPV_encode_init(AVCodecContext *avctx)
|
252 |
{ |
253 |
MpegEncContext *s = avctx->priv_data; |
254 |
int i;
|
255 |
|
256 |
s->bit_rate = avctx->bit_rate; |
257 |
s->frame_rate = avctx->frame_rate; |
258 |
s->width = avctx->width; |
259 |
s->height = avctx->height; |
260 |
s->gop_size = avctx->gop_size; |
261 |
s->rtp_mode = avctx->rtp_mode; |
262 |
s->rtp_payload_size = avctx->rtp_payload_size; |
263 |
s->avctx = avctx; |
264 |
|
265 |
if (s->gop_size <= 1) { |
266 |
s->intra_only = 1;
|
267 |
s->gop_size = 12;
|
268 |
} else {
|
269 |
s->intra_only = 0;
|
270 |
} |
271 |
s->full_search = motion_estimation_method; |
272 |
|
273 |
s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE); |
274 |
|
275 |
switch(avctx->codec->id) {
|
276 |
case CODEC_ID_MPEG1VIDEO:
|
277 |
s->out_format = FMT_MPEG1; |
278 |
break;
|
279 |
case CODEC_ID_MJPEG:
|
280 |
s->out_format = FMT_MJPEG; |
281 |
s->intra_only = 1; /* force intra only for jpeg */ |
282 |
if (mjpeg_init(s) < 0) |
283 |
return -1; |
284 |
break;
|
285 |
case CODEC_ID_H263:
|
286 |
if (h263_get_picture_format(s->width, s->height) == 7){ |
287 |
printf("Input picture size isn't suitable for h263 codec! try h263+\n");
|
288 |
return -1; |
289 |
} |
290 |
s->out_format = FMT_H263; |
291 |
break;
|
292 |
case CODEC_ID_H263P:
|
293 |
s->out_format = FMT_H263; |
294 |
s->rtp_mode = 1;
|
295 |
s->rtp_payload_size = 1200;
|
296 |
s->h263_plus = 1;
|
297 |
s->unrestricted_mv = 1;
|
298 |
|
299 |
/* These are just to be sure */
|
300 |
s->umvplus = 0;
|
301 |
s->umvplus_dec = 0;
|
302 |
break;
|
303 |
case CODEC_ID_RV10:
|
304 |
s->out_format = FMT_H263; |
305 |
s->h263_rv10 = 1;
|
306 |
break;
|
307 |
case CODEC_ID_MPEG4:
|
308 |
s->out_format = FMT_H263; |
309 |
s->h263_pred = 1;
|
310 |
s->unrestricted_mv = 1;
|
311 |
break;
|
312 |
case CODEC_ID_MSMPEG4:
|
313 |
s->out_format = FMT_H263; |
314 |
s->h263_msmpeg4 = 1;
|
315 |
s->h263_pred = 1;
|
316 |
s->unrestricted_mv = 1;
|
317 |
break;
|
318 |
default:
|
319 |
return -1; |
320 |
} |
321 |
|
322 |
if (s->out_format == FMT_H263)
|
323 |
h263_encode_init_vlc(s); |
324 |
|
325 |
s->encoding = 1;
|
326 |
|
327 |
/* init */
|
328 |
if (MPV_common_init(s) < 0) |
329 |
return -1; |
330 |
|
331 |
/* init default q matrix */
|
332 |
for(i=0;i<64;i++) { |
333 |
s->intra_matrix[i] = default_intra_matrix[i]; |
334 |
s->non_intra_matrix[i] = default_non_intra_matrix[i]; |
335 |
} |
336 |
|
337 |
/* rate control init */
|
338 |
rate_control_init(s); |
339 |
|
340 |
s->picture_number = 0;
|
341 |
s->fake_picture_number = 0;
|
342 |
/* motion detector init */
|
343 |
s->f_code = 1;
|
344 |
|
345 |
return 0; |
346 |
} |
347 |
|
348 |
int MPV_encode_end(AVCodecContext *avctx)
|
349 |
{ |
350 |
MpegEncContext *s = avctx->priv_data; |
351 |
|
352 |
#ifdef STATS
|
353 |
print_stats(); |
354 |
#endif
|
355 |
MPV_common_end(s); |
356 |
if (s->out_format == FMT_MJPEG)
|
357 |
mjpeg_close(s); |
358 |
return 0; |
359 |
} |
360 |
|
361 |
/* draw the edges of width 'w' of an image of size width, height */
|
362 |
static void draw_edges(UINT8 *buf, int wrap, int width, int height, int w) |
363 |
{ |
364 |
UINT8 *ptr, *last_line; |
365 |
int i;
|
366 |
|
367 |
last_line = buf + (height - 1) * wrap;
|
368 |
for(i=0;i<w;i++) { |
369 |
/* top and bottom */
|
370 |
memcpy(buf - (i + 1) * wrap, buf, width);
|
371 |
memcpy(last_line + (i + 1) * wrap, last_line, width);
|
372 |
} |
373 |
/* left and right */
|
374 |
ptr = buf; |
375 |
for(i=0;i<height;i++) { |
376 |
memset(ptr - w, ptr[0], w);
|
377 |
memset(ptr + width, ptr[width-1], w);
|
378 |
ptr += wrap; |
379 |
} |
380 |
/* corners */
|
381 |
for(i=0;i<w;i++) { |
382 |
memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */ |
383 |
memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */ |
384 |
memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */ |
385 |
memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */ |
386 |
} |
387 |
} |
388 |
|
389 |
/* generic function for encode/decode called before a frame is coded/decoded */
|
390 |
void MPV_frame_start(MpegEncContext *s)
|
391 |
{ |
392 |
int i;
|
393 |
UINT8 *tmp; |
394 |
|
395 |
s->mb_skiped = 0;
|
396 |
if (s->pict_type == B_TYPE) {
|
397 |
for(i=0;i<3;i++) { |
398 |
s->current_picture[i] = s->aux_picture[i]; |
399 |
} |
400 |
} else {
|
401 |
for(i=0;i<3;i++) { |
402 |
/* swap next and last */
|
403 |
tmp = s->last_picture[i]; |
404 |
s->last_picture[i] = s->next_picture[i]; |
405 |
s->next_picture[i] = tmp; |
406 |
s->current_picture[i] = tmp; |
407 |
} |
408 |
} |
409 |
} |
410 |
|
411 |
/* generic function for encode/decode called after a frame has been coded/decoded */
|
412 |
void MPV_frame_end(MpegEncContext *s)
|
413 |
{ |
414 |
/* draw edge for correct motion prediction if outside */
|
415 |
if (s->pict_type != B_TYPE) {
|
416 |
if(s->avctx->codec->id!=CODEC_ID_MPEG4){
|
417 |
draw_edges(s->current_picture[0], s->linesize, s->mb_width*16, s->mb_height*16, EDGE_WIDTH); |
418 |
draw_edges(s->current_picture[1], s->linesize/2, s->mb_width*8, s->mb_height*8, EDGE_WIDTH/2); |
419 |
draw_edges(s->current_picture[2], s->linesize/2, s->mb_width*8, s->mb_height*8, EDGE_WIDTH/2); |
420 |
}else{
|
421 |
/* OpenDivx, but i dunno how to distinguish it from mpeg4 */
|
422 |
draw_edges(s->current_picture[0], s->linesize, s->width, s->height, EDGE_WIDTH);
|
423 |
draw_edges(s->current_picture[1], s->linesize/2, s->width/2, s->height/2, EDGE_WIDTH/2); |
424 |
draw_edges(s->current_picture[2], s->linesize/2, s->width/2, s->height/2, EDGE_WIDTH/2); |
425 |
} |
426 |
} |
427 |
} |
428 |
|
429 |
int MPV_encode_picture(AVCodecContext *avctx,
|
430 |
unsigned char *buf, int buf_size, void *data) |
431 |
{ |
432 |
MpegEncContext *s = avctx->priv_data; |
433 |
AVPicture *pict = data; |
434 |
int i, j;
|
435 |
|
436 |
if (s->fixed_qscale)
|
437 |
s->qscale = avctx->quality; |
438 |
|
439 |
init_put_bits(&s->pb, buf, buf_size, NULL, NULL); |
440 |
|
441 |
if (!s->intra_only) {
|
442 |
/* first picture of GOP is intra */
|
443 |
if ((s->picture_number % s->gop_size) == 0) |
444 |
s->pict_type = I_TYPE; |
445 |
else
|
446 |
s->pict_type = P_TYPE; |
447 |
} else {
|
448 |
s->pict_type = I_TYPE; |
449 |
} |
450 |
avctx->key_frame = (s->pict_type == I_TYPE); |
451 |
|
452 |
MPV_frame_start(s); |
453 |
|
454 |
for(i=0;i<3;i++) { |
455 |
UINT8 *src = pict->data[i]; |
456 |
UINT8 *dest = s->current_picture[i]; |
457 |
int src_wrap = pict->linesize[i];
|
458 |
int dest_wrap = s->linesize;
|
459 |
int w = s->width;
|
460 |
int h = s->height;
|
461 |
|
462 |
if (i >= 1) { |
463 |
dest_wrap >>= 1;
|
464 |
w >>= 1;
|
465 |
h >>= 1;
|
466 |
} |
467 |
|
468 |
for(j=0;j<h;j++) { |
469 |
memcpy(dest, src, w); |
470 |
dest += dest_wrap; |
471 |
src += src_wrap; |
472 |
} |
473 |
s->new_picture[i] = s->current_picture[i]; |
474 |
} |
475 |
|
476 |
encode_picture(s, s->picture_number); |
477 |
|
478 |
MPV_frame_end(s); |
479 |
s->picture_number++; |
480 |
|
481 |
if (s->out_format == FMT_MJPEG)
|
482 |
mjpeg_picture_trailer(s); |
483 |
|
484 |
flush_put_bits(&s->pb); |
485 |
s->total_bits += (s->pb.buf_ptr - s->pb.buf) * 8;
|
486 |
avctx->quality = s->qscale; |
487 |
return s->pb.buf_ptr - s->pb.buf;
|
488 |
} |
489 |
|
490 |
static inline int clip(int a, int amin, int amax) |
491 |
{ |
492 |
if (a < amin)
|
493 |
return amin;
|
494 |
else if (a > amax) |
495 |
return amax;
|
496 |
else
|
497 |
return a;
|
498 |
} |
499 |
|
500 |
/* apply one mpeg motion vector to the three components */
|
501 |
static inline void mpeg_motion(MpegEncContext *s, |
502 |
UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr, |
503 |
int dest_offset,
|
504 |
UINT8 **ref_picture, int src_offset,
|
505 |
int field_based, op_pixels_func *pix_op,
|
506 |
int motion_x, int motion_y, int h) |
507 |
{ |
508 |
UINT8 *ptr; |
509 |
int dxy, offset, mx, my, src_x, src_y, height, linesize;
|
510 |
|
511 |
dxy = ((motion_y & 1) << 1) | (motion_x & 1); |
512 |
src_x = s->mb_x * 16 + (motion_x >> 1); |
513 |
src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1); |
514 |
|
515 |
/* WARNING: do no forget half pels */
|
516 |
height = s->height >> field_based; |
517 |
src_x = clip(src_x, -16, s->width);
|
518 |
if (src_x == s->width)
|
519 |
dxy &= ~1;
|
520 |
src_y = clip(src_y, -16, height);
|
521 |
if (src_y == height)
|
522 |
dxy &= ~2;
|
523 |
linesize = s->linesize << field_based; |
524 |
ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
|
525 |
dest_y += dest_offset; |
526 |
pix_op[dxy](dest_y, ptr, linesize, h); |
527 |
pix_op[dxy](dest_y + 8, ptr + 8, linesize, h); |
528 |
|
529 |
if (s->out_format == FMT_H263) {
|
530 |
dxy = 0;
|
531 |
if ((motion_x & 3) != 0) |
532 |
dxy |= 1;
|
533 |
if ((motion_y & 3) != 0) |
534 |
dxy |= 2;
|
535 |
mx = motion_x >> 2;
|
536 |
my = motion_y >> 2;
|
537 |
} else {
|
538 |
mx = motion_x / 2;
|
539 |
my = motion_y / 2;
|
540 |
dxy = ((my & 1) << 1) | (mx & 1); |
541 |
mx >>= 1;
|
542 |
my >>= 1;
|
543 |
} |
544 |
|
545 |
src_x = s->mb_x * 8 + mx;
|
546 |
src_y = s->mb_y * (8 >> field_based) + my;
|
547 |
src_x = clip(src_x, -8, s->width >> 1); |
548 |
if (src_x == (s->width >> 1)) |
549 |
dxy &= ~1;
|
550 |
src_y = clip(src_y, -8, height >> 1); |
551 |
if (src_y == (height >> 1)) |
552 |
dxy &= ~2;
|
553 |
|
554 |
offset = (src_y * (linesize >> 1)) + src_x + (src_offset >> 1); |
555 |
ptr = ref_picture[1] + offset;
|
556 |
pix_op[dxy](dest_cb + (dest_offset >> 1), ptr, linesize >> 1, h >> 1); |
557 |
ptr = ref_picture[2] + offset;
|
558 |
pix_op[dxy](dest_cr + (dest_offset >> 1), ptr, linesize >> 1, h >> 1); |
559 |
} |
560 |
|
561 |
static inline void MPV_motion(MpegEncContext *s, |
562 |
UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr, |
563 |
int dir, UINT8 **ref_picture,
|
564 |
op_pixels_func *pix_op) |
565 |
{ |
566 |
int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
|
567 |
int mb_x, mb_y, i;
|
568 |
UINT8 *ptr, *dest; |
569 |
|
570 |
mb_x = s->mb_x; |
571 |
mb_y = s->mb_y; |
572 |
|
573 |
switch(s->mv_type) {
|
574 |
case MV_TYPE_16X16:
|
575 |
mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
|
576 |
ref_picture, 0,
|
577 |
0, pix_op,
|
578 |
s->mv[dir][0][0], s->mv[dir][0][1], 16); |
579 |
break;
|
580 |
case MV_TYPE_8X8:
|
581 |
for(i=0;i<4;i++) { |
582 |
motion_x = s->mv[dir][i][0];
|
583 |
motion_y = s->mv[dir][i][1];
|
584 |
|
585 |
dxy = ((motion_y & 1) << 1) | (motion_x & 1); |
586 |
src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8; |
587 |
src_y = mb_y * 16 + (motion_y >> 1) + ((i >> 1) & 1) * 8; |
588 |
|
589 |
/* WARNING: do no forget half pels */
|
590 |
src_x = clip(src_x, -16, s->width);
|
591 |
if (src_x == s->width)
|
592 |
dxy &= ~1;
|
593 |
src_y = clip(src_y, -16, s->height);
|
594 |
if (src_y == s->height)
|
595 |
dxy &= ~2;
|
596 |
|
597 |
ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
|
598 |
dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize; |
599 |
pix_op[dxy](dest, ptr, s->linesize, 8);
|
600 |
} |
601 |
/* In case of 8X8, we construct a single chroma motion vector
|
602 |
with a special rounding */
|
603 |
mx = 0;
|
604 |
my = 0;
|
605 |
for(i=0;i<4;i++) { |
606 |
mx += s->mv[dir][i][0];
|
607 |
my += s->mv[dir][i][1];
|
608 |
} |
609 |
if (mx >= 0) |
610 |
mx = (h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1)); |
611 |
else {
|
612 |
mx = -mx; |
613 |
mx = -(h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1)); |
614 |
} |
615 |
if (my >= 0) |
616 |
my = (h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1)); |
617 |
else {
|
618 |
my = -my; |
619 |
my = -(h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1)); |
620 |
} |
621 |
dxy = ((my & 1) << 1) | (mx & 1); |
622 |
mx >>= 1;
|
623 |
my >>= 1;
|
624 |
|
625 |
src_x = mb_x * 8 + mx;
|
626 |
src_y = mb_y * 8 + my;
|
627 |
src_x = clip(src_x, -8, s->width/2); |
628 |
if (src_x == s->width/2) |
629 |
dxy &= ~1;
|
630 |
src_y = clip(src_y, -8, s->height/2); |
631 |
if (src_y == s->height/2) |
632 |
dxy &= ~2;
|
633 |
|
634 |
offset = (src_y * (s->linesize >> 1)) + src_x;
|
635 |
ptr = ref_picture[1] + offset;
|
636 |
pix_op[dxy](dest_cb, ptr, s->linesize >> 1, 8); |
637 |
ptr = ref_picture[2] + offset;
|
638 |
pix_op[dxy](dest_cr, ptr, s->linesize >> 1, 8); |
639 |
break;
|
640 |
case MV_TYPE_FIELD:
|
641 |
if (s->picture_structure == PICT_FRAME) {
|
642 |
/* top field */
|
643 |
mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
|
644 |
ref_picture, s->field_select[dir][0] ? s->linesize : 0, |
645 |
1, pix_op,
|
646 |
s->mv[dir][0][0], s->mv[dir][0][1], 8); |
647 |
/* bottom field */
|
648 |
mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize, |
649 |
ref_picture, s->field_select[dir][1] ? s->linesize : 0, |
650 |
1, pix_op,
|
651 |
s->mv[dir][1][0], s->mv[dir][1][1], 8); |
652 |
} else {
|
653 |
|
654 |
|
655 |
} |
656 |
break;
|
657 |
} |
658 |
} |
659 |
|
660 |
|
661 |
/* put block[] to dest[] */
|
662 |
static inline void put_dct(MpegEncContext *s, |
663 |
DCTELEM *block, int i, UINT8 *dest, int line_size) |
664 |
{ |
665 |
if (!s->mpeg2)
|
666 |
s->dct_unquantize(s, block, i, s->qscale); |
667 |
ff_idct (block); |
668 |
put_pixels_clamped(block, dest, line_size); |
669 |
} |
670 |
|
671 |
/* add block[] to dest[] */
|
672 |
static inline void add_dct(MpegEncContext *s, |
673 |
DCTELEM *block, int i, UINT8 *dest, int line_size) |
674 |
{ |
675 |
if (s->block_last_index[i] >= 0) { |
676 |
if (!s->mpeg2)
|
677 |
s->dct_unquantize(s, block, i, s->qscale); |
678 |
ff_idct (block); |
679 |
add_pixels_clamped(block, dest, line_size); |
680 |
} |
681 |
} |
682 |
|
683 |
/* generic function called after a macroblock has been parsed by the
|
684 |
decoder or after it has been encoded by the encoder.
|
685 |
|
686 |
Important variables used:
|
687 |
s->mb_intra : true if intra macroblock
|
688 |
s->mv_dir : motion vector direction
|
689 |
s->mv_type : motion vector type
|
690 |
s->mv : motion vector
|
691 |
s->interlaced_dct : true if interlaced dct used (mpeg2)
|
692 |
*/
|
693 |
void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64]) |
694 |
{ |
695 |
int mb_x, mb_y, motion_x, motion_y;
|
696 |
int dct_linesize, dct_offset;
|
697 |
op_pixels_func *op_pix; |
698 |
|
699 |
mb_x = s->mb_x; |
700 |
mb_y = s->mb_y; |
701 |
|
702 |
#ifdef FF_POSTPROCESS
|
703 |
quant_store[mb_y][mb_x]=s->qscale; |
704 |
//printf("[%02d][%02d] %d\n",mb_x,mb_y,s->qscale);
|
705 |
#endif
|
706 |
|
707 |
/* update DC predictors for P macroblocks */
|
708 |
if (!s->mb_intra) {
|
709 |
if (s->h263_pred) {
|
710 |
if(s->mbintra_table[mb_x + mb_y*s->mb_width])
|
711 |
{ |
712 |
int wrap, x, y, v;
|
713 |
s->mbintra_table[mb_x + mb_y*s->mb_width]=0;
|
714 |
|
715 |
wrap = 2 * s->mb_width + 2; |
716 |
v = 1024;
|
717 |
x = 2 * mb_x + 1; |
718 |
y = 2 * mb_y + 1; |
719 |
|
720 |
s->dc_val[0][(x) + (y) * wrap] = v;
|
721 |
s->dc_val[0][(x + 1) + (y) * wrap] = v; |
722 |
s->dc_val[0][(x) + (y + 1) * wrap] = v; |
723 |
s->dc_val[0][(x + 1) + (y + 1) * wrap] = v; |
724 |
/* ac pred */
|
725 |
memset(s->ac_val[0][(x) + (y) * wrap], 0, 16 * sizeof(INT16)); |
726 |
memset(s->ac_val[0][(x + 1) + (y) * wrap], 0, 16 * sizeof(INT16)); |
727 |
memset(s->ac_val[0][(x) + (y + 1) * wrap], 0, 16 * sizeof(INT16)); |
728 |
memset(s->ac_val[0][(x + 1) + (y + 1) * wrap], 0, 16 * sizeof(INT16)); |
729 |
if (s->h263_msmpeg4) {
|
730 |
s->coded_block[(x) + (y) * wrap] = 0;
|
731 |
s->coded_block[(x + 1) + (y) * wrap] = 0; |
732 |
s->coded_block[(x) + (y + 1) * wrap] = 0; |
733 |
s->coded_block[(x + 1) + (y + 1) * wrap] = 0; |
734 |
} |
735 |
/* chroma */
|
736 |
wrap = s->mb_width + 2;
|
737 |
x = mb_x + 1;
|
738 |
y = mb_y + 1;
|
739 |
s->dc_val[1][(x) + (y) * wrap] = v;
|
740 |
s->dc_val[2][(x) + (y) * wrap] = v;
|
741 |
/* ac pred */
|
742 |
memset(s->ac_val[1][(x) + (y) * wrap], 0, 16 * sizeof(INT16)); |
743 |
memset(s->ac_val[2][(x) + (y) * wrap], 0, 16 * sizeof(INT16)); |
744 |
} |
745 |
} else {
|
746 |
s->last_dc[0] = 128 << s->intra_dc_precision; |
747 |
s->last_dc[1] = 128 << s->intra_dc_precision; |
748 |
s->last_dc[2] = 128 << s->intra_dc_precision; |
749 |
} |
750 |
} |
751 |
else
|
752 |
s->mbintra_table[mb_x + mb_y*s->mb_width]=1;
|
753 |
|
754 |
/* update motion predictor */
|
755 |
if (s->out_format == FMT_H263) {
|
756 |
int x, y, wrap;
|
757 |
|
758 |
x = 2 * mb_x + 1; |
759 |
y = 2 * mb_y + 1; |
760 |
wrap = 2 * s->mb_width + 2; |
761 |
if (s->mb_intra) {
|
762 |
motion_x = 0;
|
763 |
motion_y = 0;
|
764 |
goto motion_init;
|
765 |
} else if (s->mv_type == MV_TYPE_16X16) { |
766 |
motion_x = s->mv[0][0][0]; |
767 |
motion_y = s->mv[0][0][1]; |
768 |
motion_init:
|
769 |
/* no update if 8X8 because it has been done during parsing */
|
770 |
s->motion_val[(x) + (y) * wrap][0] = motion_x;
|
771 |
s->motion_val[(x) + (y) * wrap][1] = motion_y;
|
772 |
s->motion_val[(x + 1) + (y) * wrap][0] = motion_x; |
773 |
s->motion_val[(x + 1) + (y) * wrap][1] = motion_y; |
774 |
s->motion_val[(x) + (y + 1) * wrap][0] = motion_x; |
775 |
s->motion_val[(x) + (y + 1) * wrap][1] = motion_y; |
776 |
s->motion_val[(x + 1) + (y + 1) * wrap][0] = motion_x; |
777 |
s->motion_val[(x + 1) + (y + 1) * wrap][1] = motion_y; |
778 |
} |
779 |
} |
780 |
|
781 |
if (!s->intra_only) {
|
782 |
UINT8 *dest_y, *dest_cb, *dest_cr; |
783 |
UINT8 *mbskip_ptr; |
784 |
|
785 |
/* avoid copy if macroblock skipped in last frame too */
|
786 |
if (!s->encoding && s->pict_type != B_TYPE) {
|
787 |
mbskip_ptr = &s->mbskip_table[s->mb_y * s->mb_width + s->mb_x]; |
788 |
if (s->mb_skiped) {
|
789 |
s->mb_skiped = 0;
|
790 |
/* if previous was skipped too, then nothing to do ! */
|
791 |
if (*mbskip_ptr != 0) |
792 |
goto the_end;
|
793 |
*mbskip_ptr = 1; /* indicate that this time we skiped it */ |
794 |
} else {
|
795 |
*mbskip_ptr = 0; /* not skipped */ |
796 |
} |
797 |
} |
798 |
|
799 |
dest_y = s->current_picture[0] + (mb_y * 16 * s->linesize) + mb_x * 16; |
800 |
dest_cb = s->current_picture[1] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8; |
801 |
dest_cr = s->current_picture[2] + (mb_y * 8 * (s->linesize >> 1)) + mb_x * 8; |
802 |
|
803 |
if (s->interlaced_dct) {
|
804 |
dct_linesize = s->linesize * 2;
|
805 |
dct_offset = s->linesize; |
806 |
} else {
|
807 |
dct_linesize = s->linesize; |
808 |
dct_offset = s->linesize * 8;
|
809 |
} |
810 |
|
811 |
if (!s->mb_intra) {
|
812 |
/* motion handling */
|
813 |
if (!s->no_rounding)
|
814 |
op_pix = put_pixels_tab; |
815 |
else
|
816 |
op_pix = put_no_rnd_pixels_tab; |
817 |
|
818 |
if (s->mv_dir & MV_DIR_FORWARD) {
|
819 |
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix);
|
820 |
if (!s->no_rounding)
|
821 |
op_pix = avg_pixels_tab; |
822 |
else
|
823 |
op_pix = avg_no_rnd_pixels_tab; |
824 |
} |
825 |
if (s->mv_dir & MV_DIR_BACKWARD) {
|
826 |
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix);
|
827 |
} |
828 |
|
829 |
/* add dct residue */
|
830 |
add_dct(s, block[0], 0, dest_y, dct_linesize); |
831 |
add_dct(s, block[1], 1, dest_y + 8, dct_linesize); |
832 |
add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); |
833 |
add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); |
834 |
|
835 |
add_dct(s, block[4], 4, dest_cb, s->linesize >> 1); |
836 |
add_dct(s, block[5], 5, dest_cr, s->linesize >> 1); |
837 |
} else {
|
838 |
/* dct only in intra block */
|
839 |
put_dct(s, block[0], 0, dest_y, dct_linesize); |
840 |
put_dct(s, block[1], 1, dest_y + 8, dct_linesize); |
841 |
put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize); |
842 |
put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize); |
843 |
|
844 |
put_dct(s, block[4], 4, dest_cb, s->linesize >> 1); |
845 |
put_dct(s, block[5], 5, dest_cr, s->linesize >> 1); |
846 |
} |
847 |
} |
848 |
the_end:
|
849 |
emms_c(); |
850 |
} |
851 |
|
852 |
static void encode_picture(MpegEncContext *s, int picture_number) |
853 |
{ |
854 |
int mb_x, mb_y, wrap, last_gob;
|
855 |
UINT8 *ptr; |
856 |
int i, motion_x, motion_y;
|
857 |
|
858 |
s->picture_number = picture_number; |
859 |
if (!s->fixed_qscale)
|
860 |
s->qscale = rate_estimate_qscale(s); |
861 |
|
862 |
/* precompute matrix */
|
863 |
if (s->out_format == FMT_MJPEG) {
|
864 |
/* for mjpeg, we do include qscale in the matrix */
|
865 |
s->intra_matrix[0] = default_intra_matrix[0]; |
866 |
for(i=1;i<64;i++) |
867 |
s->intra_matrix[i] = (default_intra_matrix[i] * s->qscale) >> 3;
|
868 |
convert_matrix(s->q_intra_matrix, s->intra_matrix, 8);
|
869 |
} else {
|
870 |
convert_matrix(s->q_intra_matrix, s->intra_matrix, s->qscale); |
871 |
convert_matrix(s->q_non_intra_matrix, s->non_intra_matrix, s->qscale); |
872 |
} |
873 |
|
874 |
switch(s->out_format) {
|
875 |
case FMT_MJPEG:
|
876 |
mjpeg_picture_header(s); |
877 |
break;
|
878 |
case FMT_H263:
|
879 |
if (s->h263_msmpeg4)
|
880 |
msmpeg4_encode_picture_header(s, picture_number); |
881 |
else if (s->h263_pred) |
882 |
mpeg4_encode_picture_header(s, picture_number); |
883 |
else if (s->h263_rv10) |
884 |
rv10_encode_picture_header(s, picture_number); |
885 |
else
|
886 |
h263_encode_picture_header(s, picture_number); |
887 |
break;
|
888 |
case FMT_MPEG1:
|
889 |
mpeg1_encode_picture_header(s, picture_number); |
890 |
break;
|
891 |
} |
892 |
|
893 |
/* init last dc values */
|
894 |
/* note: quant matrix value (8) is implied here */
|
895 |
s->last_dc[0] = 128; |
896 |
s->last_dc[1] = 128; |
897 |
s->last_dc[2] = 128; |
898 |
s->mb_incr = 1;
|
899 |
s->last_mv[0][0][0] = 0; |
900 |
s->last_mv[0][0][1] = 0; |
901 |
s->mv_type = MV_TYPE_16X16; |
902 |
s->mv_dir = MV_DIR_FORWARD; |
903 |
|
904 |
/* Get the GOB height based on picture height */
|
905 |
if (s->out_format == FMT_H263 && s->h263_plus) {
|
906 |
if (s->height <= 400) |
907 |
s->gob_index = 1;
|
908 |
else if (s->height <= 800) |
909 |
s->gob_index = 2;
|
910 |
else
|
911 |
s->gob_index = 4;
|
912 |
} |
913 |
|
914 |
for(mb_y=0; mb_y < s->mb_height; mb_y++) { |
915 |
/* Put GOB header based on RTP MTU */
|
916 |
if (!mb_y) {
|
917 |
s->ptr_lastgob = s->pb.buf_ptr; |
918 |
s->ptr_last_mb_line = s->pb.buf_ptr; |
919 |
} else if (s->out_format == FMT_H263 && s->h263_plus) { |
920 |
last_gob = h263_encode_gob_header(s, mb_y); |
921 |
if (last_gob) {
|
922 |
//fprintf(stderr,"\nLast GOB size: %d", last_gob);
|
923 |
s->first_gob_line = 1;
|
924 |
} else
|
925 |
s->first_gob_line = 0;
|
926 |
} |
927 |
for(mb_x=0; mb_x < s->mb_width; mb_x++) { |
928 |
|
929 |
s->mb_x = mb_x; |
930 |
s->mb_y = mb_y; |
931 |
|
932 |
/* compute motion vector and macro block type (intra or non intra) */
|
933 |
motion_x = 0;
|
934 |
motion_y = 0;
|
935 |
if (s->pict_type == P_TYPE) {
|
936 |
s->mb_intra = estimate_motion(s, mb_x, mb_y, |
937 |
&motion_x, |
938 |
&motion_y); |
939 |
} else {
|
940 |
s->mb_intra = 1;
|
941 |
} |
942 |
|
943 |
/* get the pixels */
|
944 |
wrap = s->linesize; |
945 |
ptr = s->new_picture[0] + (mb_y * 16 * wrap) + mb_x * 16; |
946 |
get_pixels(s->block[0], ptr, wrap);
|
947 |
get_pixels(s->block[1], ptr + 8, wrap); |
948 |
get_pixels(s->block[2], ptr + 8 * wrap, wrap); |
949 |
get_pixels(s->block[3], ptr + 8 * wrap + 8, wrap); |
950 |
wrap = s->linesize >> 1;
|
951 |
ptr = s->new_picture[1] + (mb_y * 8 * wrap) + mb_x * 8; |
952 |
get_pixels(s->block[4], ptr, wrap);
|
953 |
|
954 |
wrap = s->linesize >> 1;
|
955 |
ptr = s->new_picture[2] + (mb_y * 8 * wrap) + mb_x * 8; |
956 |
get_pixels(s->block[5], ptr, wrap);
|
957 |
|
958 |
/* subtract previous frame if non intra */
|
959 |
if (!s->mb_intra) {
|
960 |
int dxy, offset, mx, my;
|
961 |
|
962 |
dxy = ((motion_y & 1) << 1) | (motion_x & 1); |
963 |
ptr = s->last_picture[0] +
|
964 |
((mb_y * 16 + (motion_y >> 1)) * s->linesize) + |
965 |
(mb_x * 16 + (motion_x >> 1)); |
966 |
|
967 |
sub_pixels_2(s->block[0], ptr, s->linesize, dxy);
|
968 |
sub_pixels_2(s->block[1], ptr + 8, s->linesize, dxy); |
969 |
sub_pixels_2(s->block[2], ptr + s->linesize * 8, s->linesize, dxy); |
970 |
sub_pixels_2(s->block[3], ptr + 8 + s->linesize * 8, s->linesize ,dxy); |
971 |
|
972 |
if (s->out_format == FMT_H263) {
|
973 |
/* special rounding for h263 */
|
974 |
dxy = 0;
|
975 |
if ((motion_x & 3) != 0) |
976 |
dxy |= 1;
|
977 |
if ((motion_y & 3) != 0) |
978 |
dxy |= 2;
|
979 |
mx = motion_x >> 2;
|
980 |
my = motion_y >> 2;
|
981 |
} else {
|
982 |
mx = motion_x / 2;
|
983 |
my = motion_y / 2;
|
984 |
dxy = ((my & 1) << 1) | (mx & 1); |
985 |
mx >>= 1;
|
986 |
my >>= 1;
|
987 |
} |
988 |
offset = ((mb_y * 8 + my) * (s->linesize >> 1)) + (mb_x * 8 + mx); |
989 |
ptr = s->last_picture[1] + offset;
|
990 |
sub_pixels_2(s->block[4], ptr, s->linesize >> 1, dxy); |
991 |
ptr = s->last_picture[2] + offset;
|
992 |
sub_pixels_2(s->block[5], ptr, s->linesize >> 1, dxy); |
993 |
} |
994 |
emms_c(); |
995 |
|
996 |
/* DCT & quantize */
|
997 |
if (s->h263_msmpeg4) {
|
998 |
msmpeg4_dc_scale(s); |
999 |
} else if (s->h263_pred) { |
1000 |
h263_dc_scale(s); |
1001 |
} else {
|
1002 |
/* default quantization values */
|
1003 |
s->y_dc_scale = 8;
|
1004 |
s->c_dc_scale = 8;
|
1005 |
} |
1006 |
|
1007 |
for(i=0;i<6;i++) { |
1008 |
int last_index;
|
1009 |
if (av_fdct == jpeg_fdct_ifast)
|
1010 |
last_index = dct_quantize(s, s->block[i], i, s->qscale); |
1011 |
else
|
1012 |
last_index = dct_quantize_mmx(s, s->block[i], i, s->qscale); |
1013 |
s->block_last_index[i] = last_index; |
1014 |
} |
1015 |
|
1016 |
/* huffman encode */
|
1017 |
switch(s->out_format) {
|
1018 |
case FMT_MPEG1:
|
1019 |
mpeg1_encode_mb(s, s->block, motion_x, motion_y); |
1020 |
break;
|
1021 |
case FMT_H263:
|
1022 |
if (s->h263_msmpeg4)
|
1023 |
msmpeg4_encode_mb(s, s->block, motion_x, motion_y); |
1024 |
else
|
1025 |
h263_encode_mb(s, s->block, motion_x, motion_y); |
1026 |
break;
|
1027 |
case FMT_MJPEG:
|
1028 |
mjpeg_encode_mb(s, s->block); |
1029 |
break;
|
1030 |
} |
1031 |
|
1032 |
/* decompress blocks so that we keep the state of the decoder */
|
1033 |
s->mv[0][0][0] = motion_x; |
1034 |
s->mv[0][0][1] = motion_y; |
1035 |
|
1036 |
MPV_decode_mb(s, s->block); |
1037 |
} |
1038 |
/* Obtain average MB line size for RTP */
|
1039 |
if (!mb_y)
|
1040 |
s->mb_line_avgsize = s->pb.buf_ptr - s->ptr_last_mb_line; |
1041 |
else
|
1042 |
s->mb_line_avgsize = (s->mb_line_avgsize + s->pb.buf_ptr - s->ptr_last_mb_line) >> 1;
|
1043 |
//fprintf(stderr, "\nMB line: %d\tSize: %u\tAvg. Size: %u", s->mb_y,
|
1044 |
// (s->pb.buf_ptr - s->ptr_last_mb_line), s->mb_line_avgsize);
|
1045 |
s->ptr_last_mb_line = s->pb.buf_ptr; |
1046 |
} |
1047 |
//if (s->gob_number)
|
1048 |
// fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
|
1049 |
} |
1050 |
|
1051 |
static int dct_quantize(MpegEncContext *s, |
1052 |
DCTELEM *block, int n,
|
1053 |
int qscale)
|
1054 |
{ |
1055 |
int i, j, level, last_non_zero, q;
|
1056 |
const int *qmat; |
1057 |
|
1058 |
av_fdct (block); |
1059 |
|
1060 |
/* we need this permutation so that we correct the IDCT
|
1061 |
permutation. will be moved into DCT code */
|
1062 |
block_permute(block); |
1063 |
|
1064 |
if (s->mb_intra) {
|
1065 |
if (n < 4) |
1066 |
q = s->y_dc_scale; |
1067 |
else
|
1068 |
q = s->c_dc_scale; |
1069 |
q = q << 3;
|
1070 |
|
1071 |
/* note: block[0] is assumed to be positive */
|
1072 |
block[0] = (block[0] + (q >> 1)) / q; |
1073 |
i = 1;
|
1074 |
last_non_zero = 0;
|
1075 |
if (s->out_format == FMT_H263) {
|
1076 |
qmat = s->q_non_intra_matrix; |
1077 |
} else {
|
1078 |
qmat = s->q_intra_matrix; |
1079 |
} |
1080 |
} else {
|
1081 |
i = 0;
|
1082 |
last_non_zero = -1;
|
1083 |
qmat = s->q_non_intra_matrix; |
1084 |
} |
1085 |
|
1086 |
for(;i<64;i++) { |
1087 |
j = zigzag_direct[i]; |
1088 |
level = block[j]; |
1089 |
level = level * qmat[j]; |
1090 |
#ifdef PARANOID
|
1091 |
{ |
1092 |
static int count = 0; |
1093 |
int level1, level2, qmat1;
|
1094 |
double val;
|
1095 |
if (qmat == s->q_non_intra_matrix) {
|
1096 |
qmat1 = default_non_intra_matrix[j] * s->qscale; |
1097 |
} else {
|
1098 |
qmat1 = default_intra_matrix[j] * s->qscale; |
1099 |
} |
1100 |
if (av_fdct != jpeg_fdct_ifast)
|
1101 |
val = ((double)block[j] * 8.0) / (double)qmat1; |
1102 |
else
|
1103 |
val = ((double)block[j] * 8.0 * 2048.0) / |
1104 |
((double)qmat1 * aanscales[j]);
|
1105 |
level1 = (int)val;
|
1106 |
level2 = level / (1 << (QMAT_SHIFT - 3)); |
1107 |
if (level1 != level2) {
|
1108 |
fprintf(stderr, "%d: quant error qlevel=%d wanted=%d level=%d qmat1=%d qmat=%d wantedf=%0.6f\n",
|
1109 |
count, level2, level1, block[j], qmat1, qmat[j], |
1110 |
val); |
1111 |
count++; |
1112 |
} |
1113 |
|
1114 |
} |
1115 |
#endif
|
1116 |
/* XXX: slight error for the low range. Test should be equivalent to
|
1117 |
(level <= -(1 << (QMAT_SHIFT - 3)) || level >= (1 <<
|
1118 |
(QMAT_SHIFT - 3)))
|
1119 |
*/
|
1120 |
if (((level << (31 - (QMAT_SHIFT - 3))) >> (31 - (QMAT_SHIFT - 3))) != |
1121 |
level) { |
1122 |
level = level / (1 << (QMAT_SHIFT - 3)); |
1123 |
/* XXX: currently, this code is not optimal. the range should be:
|
1124 |
mpeg1: -255..255
|
1125 |
mpeg2: -2048..2047
|
1126 |
h263: -128..127
|
1127 |
mpeg4: -2048..2047
|
1128 |
*/
|
1129 |
if (level > 127) |
1130 |
level = 127;
|
1131 |
else if (level < -128) |
1132 |
level = -128;
|
1133 |
block[j] = level; |
1134 |
last_non_zero = i; |
1135 |
} else {
|
1136 |
block[j] = 0;
|
1137 |
} |
1138 |
} |
1139 |
return last_non_zero;
|
1140 |
} |
1141 |
|
1142 |
static int dct_quantize_mmx(MpegEncContext *s, |
1143 |
DCTELEM *block, int n,
|
1144 |
int qscale)
|
1145 |
{ |
1146 |
int i, j, level, last_non_zero, q;
|
1147 |
const int *qmat; |
1148 |
|
1149 |
av_fdct (block); |
1150 |
|
1151 |
/* we need this permutation so that we correct the IDCT
|
1152 |
permutation. will be moved into DCT code */
|
1153 |
block_permute(block); |
1154 |
|
1155 |
if (s->mb_intra) {
|
1156 |
if (n < 4) |
1157 |
q = s->y_dc_scale; |
1158 |
else
|
1159 |
q = s->c_dc_scale; |
1160 |
|
1161 |
/* note: block[0] is assumed to be positive */
|
1162 |
block[0] = (block[0] + (q >> 1)) / q; |
1163 |
i = 1;
|
1164 |
last_non_zero = 0;
|
1165 |
if (s->out_format == FMT_H263) {
|
1166 |
qmat = s->q_non_intra_matrix; |
1167 |
} else {
|
1168 |
qmat = s->q_intra_matrix; |
1169 |
} |
1170 |
} else {
|
1171 |
i = 0;
|
1172 |
last_non_zero = -1;
|
1173 |
qmat = s->q_non_intra_matrix; |
1174 |
} |
1175 |
|
1176 |
for(;i<64;i++) { |
1177 |
j = zigzag_direct[i]; |
1178 |
level = block[j]; |
1179 |
level = level * qmat[j]; |
1180 |
/* XXX: slight error for the low range. Test should be equivalent to
|
1181 |
(level <= -(1 << (QMAT_SHIFT_MMX - 3)) || level >= (1 <<
|
1182 |
(QMAT_SHIFT_MMX - 3)))
|
1183 |
*/
|
1184 |
if (((level << (31 - (QMAT_SHIFT_MMX - 3))) >> (31 - (QMAT_SHIFT_MMX - 3))) != |
1185 |
level) { |
1186 |
level = level / (1 << (QMAT_SHIFT_MMX - 3)); |
1187 |
/* XXX: currently, this code is not optimal. the range should be:
|
1188 |
mpeg1: -255..255
|
1189 |
mpeg2: -2048..2047
|
1190 |
h263: -128..127
|
1191 |
mpeg4: -2048..2047
|
1192 |
*/
|
1193 |
if (level > 127) |
1194 |
level = 127;
|
1195 |
else if (level < -128) |
1196 |
level = -128;
|
1197 |
block[j] = level; |
1198 |
last_non_zero = i; |
1199 |
} else {
|
1200 |
block[j] = 0;
|
1201 |
} |
1202 |
} |
1203 |
return last_non_zero;
|
1204 |
} |
1205 |
|
1206 |
static void dct_unquantize_mpeg1_c(MpegEncContext *s, |
1207 |
DCTELEM *block, int n, int qscale) |
1208 |
{ |
1209 |
int i, level;
|
1210 |
const UINT16 *quant_matrix;
|
1211 |
|
1212 |
if (s->mb_intra) {
|
1213 |
if (n < 4) |
1214 |
block[0] = block[0] * s->y_dc_scale; |
1215 |
else
|
1216 |
block[0] = block[0] * s->c_dc_scale; |
1217 |
/* XXX: only mpeg1 */
|
1218 |
quant_matrix = s->intra_matrix; |
1219 |
for(i=1;i<64;i++) { |
1220 |
level = block[i]; |
1221 |
if (level) {
|
1222 |
if (level < 0) { |
1223 |
level = -level; |
1224 |
level = (int)(level * qscale * quant_matrix[i]) >> 3; |
1225 |
level = (level - 1) | 1; |
1226 |
level = -level; |
1227 |
} else {
|
1228 |
level = (int)(level * qscale * quant_matrix[i]) >> 3; |
1229 |
level = (level - 1) | 1; |
1230 |
} |
1231 |
#ifdef PARANOID
|
1232 |
if (level < -2048 || level > 2047) |
1233 |
fprintf(stderr, "unquant error %d %d\n", i, level);
|
1234 |
#endif
|
1235 |
block[i] = level; |
1236 |
} |
1237 |
} |
1238 |
} else {
|
1239 |
i = 0;
|
1240 |
quant_matrix = s->non_intra_matrix; |
1241 |
for(;i<64;i++) { |
1242 |
level = block[i]; |
1243 |
if (level) {
|
1244 |
if (level < 0) { |
1245 |
level = -level; |
1246 |
level = (((level << 1) + 1) * qscale * |
1247 |
((int) (quant_matrix[i]))) >> 4; |
1248 |
level = (level - 1) | 1; |
1249 |
level = -level; |
1250 |
} else {
|
1251 |
level = (((level << 1) + 1) * qscale * |
1252 |
((int) (quant_matrix[i]))) >> 4; |
1253 |
level = (level - 1) | 1; |
1254 |
} |
1255 |
#ifdef PARANOID
|
1256 |
if (level < -2048 || level > 2047) |
1257 |
fprintf(stderr, "unquant error %d %d\n", i, level);
|
1258 |
#endif
|
1259 |
block[i] = level; |
1260 |
} |
1261 |
} |
1262 |
} |
1263 |
} |
1264 |
|
1265 |
static void dct_unquantize_h263_c(MpegEncContext *s, |
1266 |
DCTELEM *block, int n, int qscale) |
1267 |
{ |
1268 |
int i, level, qmul, qadd;
|
1269 |
|
1270 |
if (s->mb_intra) {
|
1271 |
if (n < 4) |
1272 |
block[0] = block[0] * s->y_dc_scale; |
1273 |
else
|
1274 |
block[0] = block[0] * s->c_dc_scale; |
1275 |
i = 1;
|
1276 |
} else {
|
1277 |
i = 0;
|
1278 |
} |
1279 |
|
1280 |
qmul = s->qscale << 1;
|
1281 |
qadd = (s->qscale - 1) | 1; |
1282 |
|
1283 |
for(;i<64;i++) { |
1284 |
level = block[i]; |
1285 |
if (level) {
|
1286 |
if (level < 0) { |
1287 |
level = level * qmul - qadd; |
1288 |
} else {
|
1289 |
level = level * qmul + qadd; |
1290 |
} |
1291 |
#ifdef PARANOID
|
1292 |
if (level < -2048 || level > 2047) |
1293 |
fprintf(stderr, "unquant error %d %d\n", i, level);
|
1294 |
#endif
|
1295 |
block[i] = level; |
1296 |
} |
1297 |
} |
1298 |
} |
1299 |
|
1300 |
/* rate control */
|
1301 |
|
1302 |
/* an I frame is I_FRAME_SIZE_RATIO bigger than a P frame */
|
1303 |
#define I_FRAME_SIZE_RATIO 3.0 |
1304 |
#define QSCALE_K 20 |
1305 |
|
1306 |
static void rate_control_init(MpegEncContext *s) |
1307 |
{ |
1308 |
s->wanted_bits = 0;
|
1309 |
|
1310 |
if (s->intra_only) {
|
1311 |
s->I_frame_bits = ((INT64)s->bit_rate * FRAME_RATE_BASE) / s->frame_rate; |
1312 |
s->P_frame_bits = s->I_frame_bits; |
1313 |
} else {
|
1314 |
s->P_frame_bits = (int) ((float)(s->gop_size * s->bit_rate) / |
1315 |
(float)((float)s->frame_rate / FRAME_RATE_BASE * (I_FRAME_SIZE_RATIO + s->gop_size - 1))); |
1316 |
s->I_frame_bits = (int)(s->P_frame_bits * I_FRAME_SIZE_RATIO);
|
1317 |
} |
1318 |
|
1319 |
#if defined(DEBUG)
|
1320 |
printf("I_frame_size=%d P_frame_size=%d\n",
|
1321 |
s->I_frame_bits, s->P_frame_bits); |
1322 |
#endif
|
1323 |
} |
1324 |
|
1325 |
|
1326 |
/*
|
1327 |
* This heuristic is rather poor, but at least we do not have to
|
1328 |
* change the qscale at every macroblock.
|
1329 |
*/
|
1330 |
static int rate_estimate_qscale(MpegEncContext *s) |
1331 |
{ |
1332 |
INT64 diff, total_bits = s->total_bits; |
1333 |
float q;
|
1334 |
int qscale, qmin;
|
1335 |
|
1336 |
if (s->pict_type == I_TYPE) {
|
1337 |
s->wanted_bits += s->I_frame_bits; |
1338 |
} else {
|
1339 |
s->wanted_bits += s->P_frame_bits; |
1340 |
} |
1341 |
diff = s->wanted_bits - total_bits; |
1342 |
q = 31.0 - (float)diff / (QSCALE_K * s->mb_height * s->mb_width); |
1343 |
/* adjust for I frame */
|
1344 |
if (s->pict_type == I_TYPE && !s->intra_only) {
|
1345 |
q /= I_FRAME_SIZE_RATIO; |
1346 |
} |
1347 |
|
1348 |
/* using a too small Q scale leeds to problems in mpeg1 and h263
|
1349 |
because AC coefficients are clamped to 255 or 127 */
|
1350 |
qmin = 3;
|
1351 |
if (q < qmin)
|
1352 |
q = qmin; |
1353 |
else if (q > 31) |
1354 |
q = 31;
|
1355 |
qscale = (int)(q + 0.5); |
1356 |
#if defined(DEBUG)
|
1357 |
printf("%d: total=%0.0f br=%0.1f diff=%d qest=%0.1f\n",
|
1358 |
s->picture_number, |
1359 |
(double)total_bits,
|
1360 |
(float)s->frame_rate / FRAME_RATE_BASE *
|
1361 |
total_bits / s->picture_number, |
1362 |
diff, q); |
1363 |
#endif
|
1364 |
return qscale;
|
1365 |
} |
1366 |
|
1367 |
AVCodec mpeg1video_encoder = { |
1368 |
"mpeg1video",
|
1369 |
CODEC_TYPE_VIDEO, |
1370 |
CODEC_ID_MPEG1VIDEO, |
1371 |
sizeof(MpegEncContext),
|
1372 |
MPV_encode_init, |
1373 |
MPV_encode_picture, |
1374 |
MPV_encode_end, |
1375 |
}; |
1376 |
|
1377 |
AVCodec h263_encoder = { |
1378 |
"h263",
|
1379 |
CODEC_TYPE_VIDEO, |
1380 |
CODEC_ID_H263, |
1381 |
sizeof(MpegEncContext),
|
1382 |
MPV_encode_init, |
1383 |
MPV_encode_picture, |
1384 |
MPV_encode_end, |
1385 |
}; |
1386 |
|
1387 |
AVCodec h263p_encoder = { |
1388 |
"h263p",
|
1389 |
CODEC_TYPE_VIDEO, |
1390 |
CODEC_ID_H263P, |
1391 |
sizeof(MpegEncContext),
|
1392 |
MPV_encode_init, |
1393 |
MPV_encode_picture, |
1394 |
MPV_encode_end, |
1395 |
}; |
1396 |
|
1397 |
AVCodec rv10_encoder = { |
1398 |
"rv10",
|
1399 |
CODEC_TYPE_VIDEO, |
1400 |
CODEC_ID_RV10, |
1401 |
sizeof(MpegEncContext),
|
1402 |
MPV_encode_init, |
1403 |
MPV_encode_picture, |
1404 |
MPV_encode_end, |
1405 |
}; |
1406 |
|
1407 |
AVCodec mjpeg_encoder = { |
1408 |
"mjpeg",
|
1409 |
CODEC_TYPE_VIDEO, |
1410 |
CODEC_ID_MJPEG, |
1411 |
sizeof(MpegEncContext),
|
1412 |
MPV_encode_init, |
1413 |
MPV_encode_picture, |
1414 |
MPV_encode_end, |
1415 |
}; |
1416 |
|
1417 |
AVCodec mpeg4_encoder = { |
1418 |
"mpeg4",
|
1419 |
CODEC_TYPE_VIDEO, |
1420 |
CODEC_ID_MPEG4, |
1421 |
sizeof(MpegEncContext),
|
1422 |
MPV_encode_init, |
1423 |
MPV_encode_picture, |
1424 |
MPV_encode_end, |
1425 |
}; |
1426 |
|
1427 |
AVCodec msmpeg4_encoder = { |
1428 |
"msmpeg4",
|
1429 |
CODEC_TYPE_VIDEO, |
1430 |
CODEC_ID_MSMPEG4, |
1431 |
sizeof(MpegEncContext),
|
1432 |
MPV_encode_init, |
1433 |
MPV_encode_picture, |
1434 |
MPV_encode_end, |
1435 |
}; |