ffmpeg / libavcodec / truemotion2.c @ f66e4f5f
History | View | Annotate | Download (24.8 KB)
1 |
/*
|
---|---|
2 |
* Duck/ON2 TrueMotion 2 Decoder
|
3 |
* Copyright (c) 2005 Konstantin Shishkov
|
4 |
*
|
5 |
* This file is part of FFmpeg.
|
6 |
*
|
7 |
* FFmpeg is free software; you can redistribute it and/or
|
8 |
* modify it under the terms of the GNU Lesser General Public
|
9 |
* License as published by the Free Software Foundation; either
|
10 |
* version 2.1 of the License, or (at your option) any later version.
|
11 |
*
|
12 |
* FFmpeg is distributed in the hope that it will be useful,
|
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
* Lesser General Public License for more details.
|
16 |
*
|
17 |
* You should have received a copy of the GNU Lesser General Public
|
18 |
* License along with FFmpeg; if not, write to the Free Software
|
19 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
20 |
*
|
21 |
*/
|
22 |
|
23 |
/**
|
24 |
* @file truemotion2.c
|
25 |
* Duck TrueMotion2 decoder.
|
26 |
*/
|
27 |
|
28 |
#include "avcodec.h" |
29 |
#include "common.h" |
30 |
#include "bitstream.h" |
31 |
#include "dsputil.h" |
32 |
|
33 |
#define TM2_ESCAPE 0x80000000 |
34 |
#define TM2_DELTAS 64 |
35 |
/* Huffman-coded streams of different types of blocks */
|
36 |
enum TM2_STREAMS{ TM2_C_HI = 0, TM2_C_LO, TM2_L_HI, TM2_L_LO, |
37 |
TM2_UPD, TM2_MOT, TM2_TYPE, TM2_NUM_STREAMS}; |
38 |
/* Block types */
|
39 |
enum TM2_BLOCKS{ TM2_HI_RES = 0, TM2_MED_RES, TM2_LOW_RES, TM2_NULL_RES, |
40 |
TM2_UPDATE, TM2_STILL, TM2_MOTION}; |
41 |
|
42 |
typedef struct TM2Context{ |
43 |
AVCodecContext *avctx; |
44 |
AVFrame pic; |
45 |
|
46 |
GetBitContext gb; |
47 |
DSPContext dsp; |
48 |
|
49 |
/* TM2 streams */
|
50 |
int *tokens[TM2_NUM_STREAMS];
|
51 |
int tok_lens[TM2_NUM_STREAMS];
|
52 |
int tok_ptrs[TM2_NUM_STREAMS];
|
53 |
int deltas[TM2_NUM_STREAMS][TM2_DELTAS];
|
54 |
/* for blocks decoding */
|
55 |
int D[4]; |
56 |
int CD[4]; |
57 |
int *last;
|
58 |
int *clast;
|
59 |
|
60 |
/* data for current and previous frame */
|
61 |
int *Y1, *U1, *V1, *Y2, *U2, *V2;
|
62 |
int cur;
|
63 |
} TM2Context; |
64 |
|
65 |
/**
|
66 |
* Huffman codes for each of streams
|
67 |
*/
|
68 |
typedef struct TM2Codes{ |
69 |
VLC vlc; ///< table for FFmpeg bitstream reader
|
70 |
int bits;
|
71 |
int *recode; ///< table for converting from code indexes to values |
72 |
int length;
|
73 |
} TM2Codes; |
74 |
|
75 |
/**
|
76 |
* structure for gathering Huffman codes information
|
77 |
*/
|
78 |
typedef struct TM2Huff{ |
79 |
int val_bits; ///< length of literal |
80 |
int max_bits; ///< maximum length of code |
81 |
int min_bits; ///< minimum length of code |
82 |
int nodes; ///< total number of nodes in tree |
83 |
int num; ///< current number filled |
84 |
int max_num; ///< total number of codes |
85 |
int *nums; ///< literals |
86 |
uint32_t *bits; ///< codes
|
87 |
int *lens; ///< codelengths |
88 |
} TM2Huff; |
89 |
|
90 |
static int tm2_read_tree(TM2Context *ctx, uint32_t prefix, int length, TM2Huff *huff) |
91 |
{ |
92 |
if(length > huff->max_bits) {
|
93 |
av_log(ctx->avctx, AV_LOG_ERROR, "Tree exceeded its given depth (%i)\n", huff->max_bits);
|
94 |
return -1; |
95 |
} |
96 |
|
97 |
if(!get_bits1(&ctx->gb)) { /* literal */ |
98 |
if (length == 0) { |
99 |
length = 1;
|
100 |
} |
101 |
if(huff->num >= huff->max_num) {
|
102 |
av_log(ctx->avctx, AV_LOG_DEBUG, "Too many literals\n");
|
103 |
return -1; |
104 |
} |
105 |
huff->nums[huff->num] = get_bits_long(&ctx->gb, huff->val_bits); |
106 |
huff->bits[huff->num] = prefix; |
107 |
huff->lens[huff->num] = length; |
108 |
huff->num++; |
109 |
return 0; |
110 |
} else { /* non-terminal node */ |
111 |
if(tm2_read_tree(ctx, prefix << 1, length + 1, huff) == -1) |
112 |
return -1; |
113 |
if(tm2_read_tree(ctx, (prefix << 1) | 1, length + 1, huff) == -1) |
114 |
return -1; |
115 |
} |
116 |
return 0; |
117 |
} |
118 |
|
119 |
static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code) |
120 |
{ |
121 |
TM2Huff huff; |
122 |
int res = 0; |
123 |
|
124 |
huff.val_bits = get_bits(&ctx->gb, 5);
|
125 |
huff.max_bits = get_bits(&ctx->gb, 5);
|
126 |
huff.min_bits = get_bits(&ctx->gb, 5);
|
127 |
huff.nodes = get_bits_long(&ctx->gb, 17);
|
128 |
huff.num = 0;
|
129 |
|
130 |
/* check for correct codes parameters */
|
131 |
if((huff.val_bits < 1) || (huff.val_bits > 32) || |
132 |
(huff.max_bits < 0) || (huff.max_bits > 32)) { |
133 |
av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal length: %i, max code length: %i\n",
|
134 |
huff.val_bits, huff.max_bits); |
135 |
return -1; |
136 |
} |
137 |
if((huff.nodes < 0) || (huff.nodes > 0x10000)) { |
138 |
av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree nodes: %i\n", huff.nodes);
|
139 |
return -1; |
140 |
} |
141 |
/* one-node tree */
|
142 |
if(huff.max_bits == 0) |
143 |
huff.max_bits = 1;
|
144 |
|
145 |
/* allocate space for codes - it is exactly ceil(nodes / 2) entries */
|
146 |
huff.max_num = (huff.nodes + 1) >> 1; |
147 |
huff.nums = av_mallocz(huff.max_num * sizeof(int)); |
148 |
huff.bits = av_mallocz(huff.max_num * sizeof(uint32_t));
|
149 |
huff.lens = av_mallocz(huff.max_num * sizeof(int)); |
150 |
|
151 |
if(tm2_read_tree(ctx, 0, 0, &huff) == -1) |
152 |
res = -1;
|
153 |
|
154 |
if(huff.num != huff.max_num) {
|
155 |
av_log(ctx->avctx, AV_LOG_ERROR, "Got less codes than expected: %i of %i\n",
|
156 |
huff.num, huff.max_num); |
157 |
res = -1;
|
158 |
} |
159 |
|
160 |
/* convert codes to vlc_table */
|
161 |
if(res != -1) { |
162 |
int i;
|
163 |
|
164 |
res = init_vlc(&code->vlc, huff.max_bits, huff.max_num, |
165 |
huff.lens, sizeof(int), sizeof(int), |
166 |
huff.bits, sizeof(uint32_t), sizeof(uint32_t), 0); |
167 |
if(res < 0) { |
168 |
av_log(ctx->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
|
169 |
res = -1;
|
170 |
} else
|
171 |
res = 0;
|
172 |
if(res != -1) { |
173 |
code->bits = huff.max_bits; |
174 |
code->length = huff.max_num; |
175 |
code->recode = av_malloc(code->length * sizeof(int)); |
176 |
for(i = 0; i < code->length; i++) |
177 |
code->recode[i] = huff.nums[i]; |
178 |
} |
179 |
} |
180 |
/* free allocated memory */
|
181 |
av_free(huff.nums); |
182 |
av_free(huff.bits); |
183 |
av_free(huff.lens); |
184 |
|
185 |
return res;
|
186 |
} |
187 |
|
188 |
static void tm2_free_codes(TM2Codes *code) |
189 |
{ |
190 |
if(code->recode)
|
191 |
av_free(code->recode); |
192 |
if(code->vlc.table)
|
193 |
free_vlc(&code->vlc); |
194 |
} |
195 |
|
196 |
static inline int tm2_get_token(GetBitContext *gb, TM2Codes *code) |
197 |
{ |
198 |
int val;
|
199 |
val = get_vlc2(gb, code->vlc.table, code->bits, 1);
|
200 |
return code->recode[val];
|
201 |
} |
202 |
|
203 |
static inline int tm2_read_header(TM2Context *ctx, uint8_t *buf) |
204 |
{ |
205 |
uint32_t magic; |
206 |
uint8_t *obuf; |
207 |
int length;
|
208 |
|
209 |
obuf = buf; |
210 |
|
211 |
magic = AV_RL32(buf); |
212 |
buf += 4;
|
213 |
|
214 |
if(magic == 0x00000100) { /* old header */ |
215 |
/* av_log (ctx->avctx, AV_LOG_ERROR, "TM2 old header: not implemented (yet)\n"); */
|
216 |
return 40; |
217 |
} else if(magic == 0x00000101) { /* new header */ |
218 |
int w, h, size, flags, xr, yr;
|
219 |
|
220 |
length = AV_RL32(buf); |
221 |
buf += 4;
|
222 |
|
223 |
init_get_bits(&ctx->gb, buf, 32 * 8); |
224 |
size = get_bits_long(&ctx->gb, 31);
|
225 |
h = get_bits(&ctx->gb, 15);
|
226 |
w = get_bits(&ctx->gb, 15);
|
227 |
flags = get_bits_long(&ctx->gb, 31);
|
228 |
yr = get_bits(&ctx->gb, 9);
|
229 |
xr = get_bits(&ctx->gb, 9);
|
230 |
|
231 |
return 40; |
232 |
} else {
|
233 |
av_log (ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08X\n", magic);
|
234 |
return -1; |
235 |
} |
236 |
|
237 |
return (buf - obuf);
|
238 |
} |
239 |
|
240 |
static int tm2_read_deltas(TM2Context *ctx, int stream_id) { |
241 |
int d, mb;
|
242 |
int i, v;
|
243 |
|
244 |
d = get_bits(&ctx->gb, 9);
|
245 |
mb = get_bits(&ctx->gb, 5);
|
246 |
|
247 |
if((d < 1) || (d > TM2_DELTAS) || (mb < 1) || (mb > 32)) { |
248 |
av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect delta table: %i deltas x %i bits\n", d, mb);
|
249 |
return -1; |
250 |
} |
251 |
|
252 |
for(i = 0; i < d; i++) { |
253 |
v = get_bits_long(&ctx->gb, mb); |
254 |
if(v & (1 << (mb - 1))) |
255 |
ctx->deltas[stream_id][i] = v - (1 << mb);
|
256 |
else
|
257 |
ctx->deltas[stream_id][i] = v; |
258 |
} |
259 |
for(; i < TM2_DELTAS; i++)
|
260 |
ctx->deltas[stream_id][i] = 0;
|
261 |
|
262 |
return 0; |
263 |
} |
264 |
|
265 |
static int tm2_read_stream(TM2Context *ctx, uint8_t *buf, int stream_id) { |
266 |
int i;
|
267 |
int cur = 0; |
268 |
int skip = 0; |
269 |
int len, toks;
|
270 |
TM2Codes codes; |
271 |
|
272 |
/* get stream length in dwords */
|
273 |
len = AV_RB32(buf); buf += 4; cur += 4; |
274 |
skip = len * 4 + 4; |
275 |
|
276 |
if(len == 0) |
277 |
return 4; |
278 |
|
279 |
toks = AV_RB32(buf); buf += 4; cur += 4; |
280 |
if(toks & 1) { |
281 |
len = AV_RB32(buf); buf += 4; cur += 4; |
282 |
if(len == TM2_ESCAPE) {
|
283 |
len = AV_RB32(buf); buf += 4; cur += 4; |
284 |
} |
285 |
if(len > 0) { |
286 |
init_get_bits(&ctx->gb, buf, (skip - cur) * 8);
|
287 |
if(tm2_read_deltas(ctx, stream_id) == -1) |
288 |
return -1; |
289 |
buf += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; |
290 |
cur += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; |
291 |
} |
292 |
} |
293 |
/* skip unused fields */
|
294 |
if(AV_RB32(buf) == TM2_ESCAPE) {
|
295 |
buf += 4; cur += 4; /* some unknown length - could be escaped too */ |
296 |
} |
297 |
buf += 4; cur += 4; |
298 |
buf += 4; cur += 4; /* unused by decoder */ |
299 |
|
300 |
init_get_bits(&ctx->gb, buf, (skip - cur) * 8);
|
301 |
if(tm2_build_huff_table(ctx, &codes) == -1) |
302 |
return -1; |
303 |
buf += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; |
304 |
cur += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; |
305 |
|
306 |
toks >>= 1;
|
307 |
/* check if we have sane number of tokens */
|
308 |
if((toks < 0) || (toks > 0xFFFFFF)){ |
309 |
av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
|
310 |
tm2_free_codes(&codes); |
311 |
return -1; |
312 |
} |
313 |
ctx->tokens[stream_id] = av_realloc(ctx->tokens[stream_id], toks * sizeof(int)); |
314 |
ctx->tok_lens[stream_id] = toks; |
315 |
len = AV_RB32(buf); buf += 4; cur += 4; |
316 |
if(len > 0) { |
317 |
init_get_bits(&ctx->gb, buf, (skip - cur) * 8);
|
318 |
for(i = 0; i < toks; i++) |
319 |
ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes); |
320 |
} else {
|
321 |
for(i = 0; i < toks; i++) |
322 |
ctx->tokens[stream_id][i] = codes.recode[0];
|
323 |
} |
324 |
tm2_free_codes(&codes); |
325 |
|
326 |
return skip;
|
327 |
} |
328 |
|
329 |
static inline int GET_TOK(TM2Context *ctx,int type) { |
330 |
if(ctx->tok_ptrs[type] >= ctx->tok_lens[type]) {
|
331 |
av_log(ctx->avctx, AV_LOG_ERROR, "Read token from stream %i out of bounds (%i>=%i)\n", type, ctx->tok_ptrs[type], ctx->tok_lens[type]);
|
332 |
return 0; |
333 |
} |
334 |
if(type <= TM2_MOT)
|
335 |
return ctx->deltas[type][ctx->tokens[type][ctx->tok_ptrs[type]++]];
|
336 |
return ctx->tokens[type][ctx->tok_ptrs[type]++];
|
337 |
} |
338 |
|
339 |
/* blocks decoding routines */
|
340 |
|
341 |
/* common Y, U, V pointers initialisation */
|
342 |
#define TM2_INIT_POINTERS() \
|
343 |
int *last, *clast; \
|
344 |
int *Y, *U, *V;\
|
345 |
int Ystride, Ustride, Vstride;\
|
346 |
\ |
347 |
Ystride = ctx->avctx->width;\ |
348 |
Vstride = (ctx->avctx->width + 1) >> 1;\ |
349 |
Ustride = (ctx->avctx->width + 1) >> 1;\ |
350 |
Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\ |
351 |
V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\ |
352 |
U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\ |
353 |
last = ctx->last + bx * 4;\
|
354 |
clast = ctx->clast + bx * 4;
|
355 |
|
356 |
#define TM2_INIT_POINTERS_2() \
|
357 |
int *Yo, *Uo, *Vo;\
|
358 |
int oYstride, oUstride, oVstride;\
|
359 |
\ |
360 |
TM2_INIT_POINTERS();\ |
361 |
oYstride = Ystride;\ |
362 |
oVstride = Vstride;\ |
363 |
oUstride = Ustride;\ |
364 |
Yo = (ctx->cur?ctx->Y1:ctx->Y2) + by * 4 * oYstride + bx * 4;\ |
365 |
Vo = (ctx->cur?ctx->V1:ctx->V2) + by * 2 * oVstride + bx * 2;\ |
366 |
Uo = (ctx->cur?ctx->U1:ctx->U2) + by * 2 * oUstride + bx * 2; |
367 |
|
368 |
/* recalculate last and delta values for next blocks */
|
369 |
#define TM2_RECALC_BLOCK(CHR, stride, last, CD) {\
|
370 |
CD[0] = (CHR[1] - 128) - last[1];\ |
371 |
CD[1] = (int)CHR[stride + 1] - (int)CHR[1];\ |
372 |
last[0] = (int)CHR[stride + 0] - 128;\ |
373 |
last[1] = (int)CHR[stride + 1] - 128;} |
374 |
|
375 |
/* common operations - add deltas to 4x4 block of luma or 2x2 blocks of chroma */
|
376 |
static inline void tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *deltas, int *last) |
377 |
{ |
378 |
int ct, d;
|
379 |
int i, j;
|
380 |
|
381 |
for(j = 0; j < 4; j++){ |
382 |
ct = ctx->D[j]; |
383 |
for(i = 0; i < 4; i++){ |
384 |
d = deltas[i + j * 4];
|
385 |
ct += d; |
386 |
last[i] += ct; |
387 |
Y[i] = av_clip_uint8(last[i]); |
388 |
} |
389 |
Y += stride; |
390 |
ctx->D[j] = ct; |
391 |
} |
392 |
} |
393 |
|
394 |
static inline void tm2_high_chroma(int *data, int stride, int *last, int *CD, int *deltas) |
395 |
{ |
396 |
int i, j;
|
397 |
for(j = 0; j < 2; j++){ |
398 |
for(i = 0; i < 2; i++){ |
399 |
CD[j] += deltas[i + j * 2];
|
400 |
last[i] += CD[j]; |
401 |
data[i] = last[i] + 128;
|
402 |
} |
403 |
data += stride; |
404 |
} |
405 |
} |
406 |
|
407 |
static inline void tm2_low_chroma(int *data, int stride, int *clast, int *CD, int *deltas, int bx) |
408 |
{ |
409 |
int t;
|
410 |
int l;
|
411 |
int prev;
|
412 |
|
413 |
if(bx > 0) |
414 |
prev = clast[-3];
|
415 |
else
|
416 |
prev = 0;
|
417 |
t = (CD[0] + CD[1]) >> 1; |
418 |
l = (prev - CD[0] - CD[1] + clast[1]) >> 1; |
419 |
CD[1] = CD[0] + CD[1] - t; |
420 |
CD[0] = t;
|
421 |
clast[0] = l;
|
422 |
|
423 |
tm2_high_chroma(data, stride, clast, CD, deltas); |
424 |
} |
425 |
|
426 |
static inline void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by) |
427 |
{ |
428 |
int i;
|
429 |
int deltas[16]; |
430 |
TM2_INIT_POINTERS(); |
431 |
|
432 |
/* hi-res chroma */
|
433 |
for(i = 0; i < 4; i++) { |
434 |
deltas[i] = GET_TOK(ctx, TM2_C_HI); |
435 |
deltas[i + 4] = GET_TOK(ctx, TM2_C_HI);
|
436 |
} |
437 |
tm2_high_chroma(U, Ustride, clast, ctx->CD, deltas); |
438 |
tm2_high_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas + 4); |
439 |
|
440 |
/* hi-res luma */
|
441 |
for(i = 0; i < 16; i++) |
442 |
deltas[i] = GET_TOK(ctx, TM2_L_HI); |
443 |
|
444 |
tm2_apply_deltas(ctx, Y, Ystride, deltas, last); |
445 |
} |
446 |
|
447 |
static inline void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by) |
448 |
{ |
449 |
int i;
|
450 |
int deltas[16]; |
451 |
TM2_INIT_POINTERS(); |
452 |
|
453 |
/* low-res chroma */
|
454 |
deltas[0] = GET_TOK(ctx, TM2_C_LO);
|
455 |
deltas[1] = deltas[2] = deltas[3] = 0; |
456 |
tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx); |
457 |
|
458 |
deltas[0] = GET_TOK(ctx, TM2_C_LO);
|
459 |
deltas[1] = deltas[2] = deltas[3] = 0; |
460 |
tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx); |
461 |
|
462 |
/* hi-res luma */
|
463 |
for(i = 0; i < 16; i++) |
464 |
deltas[i] = GET_TOK(ctx, TM2_L_HI); |
465 |
|
466 |
tm2_apply_deltas(ctx, Y, Ystride, deltas, last); |
467 |
} |
468 |
|
469 |
static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by) |
470 |
{ |
471 |
int i;
|
472 |
int t1, t2;
|
473 |
int deltas[16]; |
474 |
TM2_INIT_POINTERS(); |
475 |
|
476 |
/* low-res chroma */
|
477 |
deltas[0] = GET_TOK(ctx, TM2_C_LO);
|
478 |
deltas[1] = deltas[2] = deltas[3] = 0; |
479 |
tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx); |
480 |
|
481 |
deltas[0] = GET_TOK(ctx, TM2_C_LO);
|
482 |
deltas[1] = deltas[2] = deltas[3] = 0; |
483 |
tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx); |
484 |
|
485 |
/* low-res luma */
|
486 |
for(i = 0; i < 16; i++) |
487 |
deltas[i] = 0;
|
488 |
|
489 |
deltas[ 0] = GET_TOK(ctx, TM2_L_LO);
|
490 |
deltas[ 2] = GET_TOK(ctx, TM2_L_LO);
|
491 |
deltas[ 8] = GET_TOK(ctx, TM2_L_LO);
|
492 |
deltas[10] = GET_TOK(ctx, TM2_L_LO);
|
493 |
|
494 |
if(bx > 0) |
495 |
last[0] = (last[-1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3] + last[1]) >> 1; |
496 |
else
|
497 |
last[0] = (last[1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1; |
498 |
last[2] = (last[1] + last[3]) >> 1; |
499 |
|
500 |
t1 = ctx->D[0] + ctx->D[1]; |
501 |
ctx->D[0] = t1 >> 1; |
502 |
ctx->D[1] = t1 - (t1 >> 1); |
503 |
t2 = ctx->D[2] + ctx->D[3]; |
504 |
ctx->D[2] = t2 >> 1; |
505 |
ctx->D[3] = t2 - (t2 >> 1); |
506 |
|
507 |
tm2_apply_deltas(ctx, Y, Ystride, deltas, last); |
508 |
} |
509 |
|
510 |
static inline void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by) |
511 |
{ |
512 |
int i;
|
513 |
int ct;
|
514 |
int left, right, diff;
|
515 |
int deltas[16]; |
516 |
TM2_INIT_POINTERS(); |
517 |
|
518 |
/* null chroma */
|
519 |
deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0; |
520 |
tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx); |
521 |
|
522 |
deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0; |
523 |
tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx); |
524 |
|
525 |
/* null luma */
|
526 |
for(i = 0; i < 16; i++) |
527 |
deltas[i] = 0;
|
528 |
|
529 |
ct = ctx->D[0] + ctx->D[1] + ctx->D[2] + ctx->D[3]; |
530 |
|
531 |
if(bx > 0) |
532 |
left = last[-1] - ct;
|
533 |
else
|
534 |
left = 0;
|
535 |
|
536 |
right = last[3];
|
537 |
diff = right - left; |
538 |
last[0] = left + (diff >> 2); |
539 |
last[1] = left + (diff >> 1); |
540 |
last[2] = right - (diff >> 2); |
541 |
last[3] = right;
|
542 |
{ |
543 |
int tp = left;
|
544 |
|
545 |
ctx->D[0] = (tp + (ct >> 2)) - left; |
546 |
left += ctx->D[0];
|
547 |
ctx->D[1] = (tp + (ct >> 1)) - left; |
548 |
left += ctx->D[1];
|
549 |
ctx->D[2] = ((tp + ct) - (ct >> 2)) - left; |
550 |
left += ctx->D[2];
|
551 |
ctx->D[3] = (tp + ct) - left;
|
552 |
} |
553 |
tm2_apply_deltas(ctx, Y, Ystride, deltas, last); |
554 |
} |
555 |
|
556 |
static inline void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by) |
557 |
{ |
558 |
int i, j;
|
559 |
TM2_INIT_POINTERS_2(); |
560 |
|
561 |
/* update chroma */
|
562 |
for(j = 0; j < 2; j++){ |
563 |
for(i = 0; i < 2; i++){ |
564 |
U[i] = Uo[i]; |
565 |
V[i] = Vo[i]; |
566 |
} |
567 |
U += Ustride; V += Vstride; |
568 |
Uo += oUstride; Vo += oVstride; |
569 |
} |
570 |
U -= Ustride * 2;
|
571 |
V -= Vstride * 2;
|
572 |
TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD); |
573 |
TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2)); |
574 |
|
575 |
/* update deltas */
|
576 |
ctx->D[0] = Yo[3] - last[3]; |
577 |
ctx->D[1] = Yo[3 + oYstride] - Yo[3]; |
578 |
ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride]; |
579 |
ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2]; |
580 |
|
581 |
for(j = 0; j < 4; j++){ |
582 |
for(i = 0; i < 4; i++){ |
583 |
Y[i] = Yo[i]; |
584 |
last[i] = Yo[i]; |
585 |
} |
586 |
Y += Ystride; |
587 |
Yo += oYstride; |
588 |
} |
589 |
} |
590 |
|
591 |
static inline void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by) |
592 |
{ |
593 |
int i, j;
|
594 |
int d;
|
595 |
TM2_INIT_POINTERS_2(); |
596 |
|
597 |
/* update chroma */
|
598 |
for(j = 0; j < 2; j++){ |
599 |
for(i = 0; i < 2; i++){ |
600 |
U[i] = Uo[i] + GET_TOK(ctx, TM2_UPD); |
601 |
V[i] = Vo[i] + GET_TOK(ctx, TM2_UPD); |
602 |
} |
603 |
U += Ustride; V += Vstride; |
604 |
Uo += oUstride; Vo += oVstride; |
605 |
} |
606 |
U -= Ustride * 2;
|
607 |
V -= Vstride * 2;
|
608 |
TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD); |
609 |
TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2)); |
610 |
|
611 |
/* update deltas */
|
612 |
ctx->D[0] = Yo[3] - last[3]; |
613 |
ctx->D[1] = Yo[3 + oYstride] - Yo[3]; |
614 |
ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride]; |
615 |
ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2]; |
616 |
|
617 |
for(j = 0; j < 4; j++){ |
618 |
d = last[3];
|
619 |
for(i = 0; i < 4; i++){ |
620 |
Y[i] = Yo[i] + GET_TOK(ctx, TM2_UPD); |
621 |
last[i] = Y[i]; |
622 |
} |
623 |
ctx->D[j] = last[3] - d;
|
624 |
Y += Ystride; |
625 |
Yo += oYstride; |
626 |
} |
627 |
} |
628 |
|
629 |
static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by) |
630 |
{ |
631 |
int i, j;
|
632 |
int mx, my;
|
633 |
TM2_INIT_POINTERS_2(); |
634 |
|
635 |
mx = GET_TOK(ctx, TM2_MOT); |
636 |
my = GET_TOK(ctx, TM2_MOT); |
637 |
|
638 |
Yo += my * oYstride + mx; |
639 |
Uo += (my >> 1) * oUstride + (mx >> 1); |
640 |
Vo += (my >> 1) * oVstride + (mx >> 1); |
641 |
|
642 |
/* copy chroma */
|
643 |
for(j = 0; j < 2; j++){ |
644 |
for(i = 0; i < 2; i++){ |
645 |
U[i] = Uo[i]; |
646 |
V[i] = Vo[i]; |
647 |
} |
648 |
U += Ustride; V += Vstride; |
649 |
Uo += oUstride; Vo += oVstride; |
650 |
} |
651 |
U -= Ustride * 2;
|
652 |
V -= Vstride * 2;
|
653 |
TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD); |
654 |
TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2)); |
655 |
|
656 |
/* copy luma */
|
657 |
for(j = 0; j < 4; j++){ |
658 |
for(i = 0; i < 4; i++){ |
659 |
Y[i] = Yo[i]; |
660 |
} |
661 |
Y += Ystride; |
662 |
Yo += oYstride; |
663 |
} |
664 |
/* calculate deltas */
|
665 |
Y -= Ystride * 4;
|
666 |
ctx->D[0] = Y[3] - last[3]; |
667 |
ctx->D[1] = Y[3 + Ystride] - Y[3]; |
668 |
ctx->D[2] = Y[3 + Ystride * 2] - Y[3 + Ystride]; |
669 |
ctx->D[3] = Y[3 + Ystride * 3] - Y[3 + Ystride * 2]; |
670 |
for(i = 0; i < 4; i++) |
671 |
last[i] = Y[i + Ystride * 3];
|
672 |
} |
673 |
|
674 |
static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p) |
675 |
{ |
676 |
int i, j;
|
677 |
int bw, bh;
|
678 |
int type;
|
679 |
int keyframe = 1; |
680 |
uint8_t *Y, *U, *V; |
681 |
int *src;
|
682 |
|
683 |
bw = ctx->avctx->width >> 2;
|
684 |
bh = ctx->avctx->height >> 2;
|
685 |
|
686 |
for(i = 0; i < TM2_NUM_STREAMS; i++) |
687 |
ctx->tok_ptrs[i] = 0;
|
688 |
|
689 |
if (ctx->tok_lens[TM2_TYPE]<bw*bh){
|
690 |
av_log(ctx->avctx,AV_LOG_ERROR,"Got %i tokens for %i blocks\n",ctx->tok_lens[TM2_TYPE],bw*bh);
|
691 |
return -1; |
692 |
} |
693 |
|
694 |
memset(ctx->last, 0, 4 * bw * sizeof(int)); |
695 |
memset(ctx->clast, 0, 4 * bw * sizeof(int)); |
696 |
|
697 |
for(j = 0; j < bh; j++) { |
698 |
memset(ctx->D, 0, 4 * sizeof(int)); |
699 |
memset(ctx->CD, 0, 4 * sizeof(int)); |
700 |
for(i = 0; i < bw; i++) { |
701 |
type = GET_TOK(ctx, TM2_TYPE); |
702 |
switch(type) {
|
703 |
case TM2_HI_RES:
|
704 |
tm2_hi_res_block(ctx, p, i, j); |
705 |
break;
|
706 |
case TM2_MED_RES:
|
707 |
tm2_med_res_block(ctx, p, i, j); |
708 |
break;
|
709 |
case TM2_LOW_RES:
|
710 |
tm2_low_res_block(ctx, p, i, j); |
711 |
break;
|
712 |
case TM2_NULL_RES:
|
713 |
tm2_null_res_block(ctx, p, i, j); |
714 |
break;
|
715 |
case TM2_UPDATE:
|
716 |
tm2_update_block(ctx, p, i, j); |
717 |
keyframe = 0;
|
718 |
break;
|
719 |
case TM2_STILL:
|
720 |
tm2_still_block(ctx, p, i, j); |
721 |
keyframe = 0;
|
722 |
break;
|
723 |
case TM2_MOTION:
|
724 |
tm2_motion_block(ctx, p, i, j); |
725 |
keyframe = 0;
|
726 |
break;
|
727 |
default:
|
728 |
av_log(ctx->avctx, AV_LOG_ERROR, "Skipping unknown block type %i\n", type);
|
729 |
} |
730 |
} |
731 |
} |
732 |
|
733 |
/* copy data from our buffer to AVFrame */
|
734 |
Y = p->data[0];
|
735 |
src = (ctx->cur?ctx->Y2:ctx->Y1); |
736 |
for(j = 0; j < ctx->avctx->height; j++){ |
737 |
for(i = 0; i < ctx->avctx->width; i++){ |
738 |
Y[i] = av_clip_uint8(*src++); |
739 |
} |
740 |
Y += p->linesize[0];
|
741 |
} |
742 |
U = p->data[2];
|
743 |
src = (ctx->cur?ctx->U2:ctx->U1); |
744 |
for(j = 0; j < (ctx->avctx->height + 1) >> 1; j++){ |
745 |
for(i = 0; i < (ctx->avctx->width + 1) >> 1; i++){ |
746 |
U[i] = av_clip_uint8(*src++); |
747 |
} |
748 |
U += p->linesize[2];
|
749 |
} |
750 |
V = p->data[1];
|
751 |
src = (ctx->cur?ctx->V2:ctx->V1); |
752 |
for(j = 0; j < (ctx->avctx->height + 1) >> 1; j++){ |
753 |
for(i = 0; i < (ctx->avctx->width + 1) >> 1; i++){ |
754 |
V[i] = av_clip_uint8(*src++); |
755 |
} |
756 |
V += p->linesize[1];
|
757 |
} |
758 |
|
759 |
return keyframe;
|
760 |
} |
761 |
|
762 |
static int decode_frame(AVCodecContext *avctx, |
763 |
void *data, int *data_size, |
764 |
uint8_t *buf, int buf_size)
|
765 |
{ |
766 |
TM2Context * const l = avctx->priv_data;
|
767 |
AVFrame * const p= (AVFrame*)&l->pic;
|
768 |
int skip, t;
|
769 |
|
770 |
p->reference = 1;
|
771 |
p->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; |
772 |
if(avctx->reget_buffer(avctx, p) < 0){ |
773 |
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
774 |
return -1; |
775 |
} |
776 |
|
777 |
l->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, buf_size >> 2);
|
778 |
skip = tm2_read_header(l, buf); |
779 |
|
780 |
if(skip == -1) |
781 |
return -1; |
782 |
|
783 |
t = tm2_read_stream(l, buf + skip, TM2_C_HI); |
784 |
if(t == -1) |
785 |
return -1; |
786 |
skip += t; |
787 |
t = tm2_read_stream(l, buf + skip, TM2_C_LO); |
788 |
if(t == -1) |
789 |
return -1; |
790 |
skip += t; |
791 |
t = tm2_read_stream(l, buf + skip, TM2_L_HI); |
792 |
if(t == -1) |
793 |
return -1; |
794 |
skip += t; |
795 |
t = tm2_read_stream(l, buf + skip, TM2_L_LO); |
796 |
if(t == -1) |
797 |
return -1; |
798 |
skip += t; |
799 |
t = tm2_read_stream(l, buf + skip, TM2_UPD); |
800 |
if(t == -1) |
801 |
return -1; |
802 |
skip += t; |
803 |
t = tm2_read_stream(l, buf + skip, TM2_MOT); |
804 |
if(t == -1) |
805 |
return -1; |
806 |
skip += t; |
807 |
t = tm2_read_stream(l, buf + skip, TM2_TYPE); |
808 |
if(t == -1) |
809 |
return -1; |
810 |
p->key_frame = tm2_decode_blocks(l, p); |
811 |
if(p->key_frame)
|
812 |
p->pict_type = FF_I_TYPE; |
813 |
else
|
814 |
p->pict_type = FF_P_TYPE; |
815 |
|
816 |
l->cur = !l->cur; |
817 |
*data_size = sizeof(AVFrame);
|
818 |
*(AVFrame*)data = l->pic; |
819 |
|
820 |
return buf_size;
|
821 |
} |
822 |
|
823 |
static int decode_init(AVCodecContext *avctx){ |
824 |
TM2Context * const l = avctx->priv_data;
|
825 |
int i;
|
826 |
|
827 |
if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) { |
828 |
return -1; |
829 |
} |
830 |
if((avctx->width & 3) || (avctx->height & 3)){ |
831 |
av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n");
|
832 |
return -1; |
833 |
} |
834 |
|
835 |
l->avctx = avctx; |
836 |
l->pic.data[0]=NULL; |
837 |
avctx->has_b_frames = 0;
|
838 |
avctx->pix_fmt = PIX_FMT_YUV420P; |
839 |
|
840 |
dsputil_init(&l->dsp, avctx); |
841 |
|
842 |
l->last = av_malloc(4 * sizeof(int) * (avctx->width >> 2)); |
843 |
l->clast = av_malloc(4 * sizeof(int) * (avctx->width >> 2)); |
844 |
|
845 |
for(i = 0; i < TM2_NUM_STREAMS; i++) { |
846 |
l->tokens[i] = NULL;
|
847 |
l->tok_lens[i] = 0;
|
848 |
} |
849 |
|
850 |
l->Y1 = av_malloc(sizeof(int) * avctx->width * avctx->height); |
851 |
l->U1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); |
852 |
l->V1 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); |
853 |
l->Y2 = av_malloc(sizeof(int) * avctx->width * avctx->height); |
854 |
l->U2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); |
855 |
l->V2 = av_malloc(sizeof(int) * ((avctx->width + 1) >> 1) * ((avctx->height + 1) >> 1)); |
856 |
l->cur = 0;
|
857 |
|
858 |
return 0; |
859 |
} |
860 |
|
861 |
static int decode_end(AVCodecContext *avctx){ |
862 |
TM2Context * const l = avctx->priv_data;
|
863 |
int i;
|
864 |
|
865 |
if(l->last)
|
866 |
av_free(l->last); |
867 |
if(l->clast)
|
868 |
av_free(l->clast); |
869 |
for(i = 0; i < TM2_NUM_STREAMS; i++) |
870 |
if(l->tokens[i])
|
871 |
av_free(l->tokens[i]); |
872 |
if(l->Y1){
|
873 |
av_free(l->Y1); |
874 |
av_free(l->U1); |
875 |
av_free(l->V1); |
876 |
av_free(l->Y2); |
877 |
av_free(l->U2); |
878 |
av_free(l->V2); |
879 |
} |
880 |
return 0; |
881 |
} |
882 |
|
883 |
AVCodec truemotion2_decoder = { |
884 |
"truemotion2",
|
885 |
CODEC_TYPE_VIDEO, |
886 |
CODEC_ID_TRUEMOTION2, |
887 |
sizeof(TM2Context),
|
888 |
decode_init, |
889 |
NULL,
|
890 |
decode_end, |
891 |
decode_frame, |
892 |
CODEC_CAP_DR1, |
893 |
}; |