ffmpeg / libavcodec / adpcm.c @ f66e4f5f
History | View | Annotate | Download (48 KB)
1 |
/*
|
---|---|
2 |
* ADPCM codecs
|
3 |
* Copyright (c) 2001-2003 The ffmpeg Project
|
4 |
*
|
5 |
* This file is part of FFmpeg.
|
6 |
*
|
7 |
* FFmpeg is free software; you can redistribute it and/or
|
8 |
* modify it under the terms of the GNU Lesser General Public
|
9 |
* License as published by the Free Software Foundation; either
|
10 |
* version 2.1 of the License, or (at your option) any later version.
|
11 |
*
|
12 |
* FFmpeg is distributed in the hope that it will be useful,
|
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
* Lesser General Public License for more details.
|
16 |
*
|
17 |
* You should have received a copy of the GNU Lesser General Public
|
18 |
* License along with FFmpeg; if not, write to the Free Software
|
19 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
20 |
*/
|
21 |
#include "avcodec.h" |
22 |
#include "bitstream.h" |
23 |
|
24 |
/**
|
25 |
* @file adpcm.c
|
26 |
* ADPCM codecs.
|
27 |
* First version by Francois Revol (revol@free.fr)
|
28 |
* Fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
|
29 |
* by Mike Melanson (melanson@pcisys.net)
|
30 |
* CD-ROM XA ADPCM codec by BERO
|
31 |
* EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
|
32 |
*
|
33 |
* Features and limitations:
|
34 |
*
|
35 |
* Reference documents:
|
36 |
* http://www.pcisys.net/~melanson/codecs/simpleaudio.html
|
37 |
* http://www.geocities.com/SiliconValley/8682/aud3.txt
|
38 |
* http://openquicktime.sourceforge.net/plugins.htm
|
39 |
* XAnim sources (xa_codec.c) http://www.rasnaimaging.com/people/lapus/download.html
|
40 |
* http://www.cs.ucla.edu/~leec/mediabench/applications.html
|
41 |
* SoX source code http://home.sprynet.com/~cbagwell/sox.html
|
42 |
*
|
43 |
* CD-ROM XA:
|
44 |
* http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html
|
45 |
* vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html
|
46 |
* readstr http://www.geocities.co.jp/Playtown/2004/
|
47 |
*/
|
48 |
|
49 |
#define BLKSIZE 1024 |
50 |
|
51 |
#define CLAMP_TO_SHORT(value) \
|
52 |
if (value > 32767) \ |
53 |
value = 32767; \
|
54 |
else if (value < -32768) \ |
55 |
value = -32768; \
|
56 |
|
57 |
/* step_table[] and index_table[] are from the ADPCM reference source */
|
58 |
/* This is the index table: */
|
59 |
static const int index_table[16] = { |
60 |
-1, -1, -1, -1, 2, 4, 6, 8, |
61 |
-1, -1, -1, -1, 2, 4, 6, 8, |
62 |
}; |
63 |
|
64 |
/**
|
65 |
* This is the step table. Note that many programs use slight deviations from
|
66 |
* this table, but such deviations are negligible:
|
67 |
*/
|
68 |
static const int step_table[89] = { |
69 |
7, 8, 9, 10, 11, 12, 13, 14, 16, 17, |
70 |
19, 21, 23, 25, 28, 31, 34, 37, 41, 45, |
71 |
50, 55, 60, 66, 73, 80, 88, 97, 107, 118, |
72 |
130, 143, 157, 173, 190, 209, 230, 253, 279, 307, |
73 |
337, 371, 408, 449, 494, 544, 598, 658, 724, 796, |
74 |
876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, |
75 |
2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, |
76 |
5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, |
77 |
15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 |
78 |
}; |
79 |
|
80 |
/* These are for MS-ADPCM */
|
81 |
/* AdaptationTable[], AdaptCoeff1[], and AdaptCoeff2[] are from libsndfile */
|
82 |
static const int AdaptationTable[] = { |
83 |
230, 230, 230, 230, 307, 409, 512, 614, |
84 |
768, 614, 512, 409, 307, 230, 230, 230 |
85 |
}; |
86 |
|
87 |
static const int AdaptCoeff1[] = { |
88 |
256, 512, 0, 192, 240, 460, 392 |
89 |
}; |
90 |
|
91 |
static const int AdaptCoeff2[] = { |
92 |
0, -256, 0, 64, 0, -208, -232 |
93 |
}; |
94 |
|
95 |
/* These are for CD-ROM XA ADPCM */
|
96 |
static const int xa_adpcm_table[5][2] = { |
97 |
{ 0, 0 }, |
98 |
{ 60, 0 }, |
99 |
{ 115, -52 }, |
100 |
{ 98, -55 }, |
101 |
{ 122, -60 } |
102 |
}; |
103 |
|
104 |
static const int ea_adpcm_table[] = { |
105 |
0, 240, 460, 392, 0, 0, -208, -220, 0, 1, |
106 |
3, 4, 7, 8, 10, 11, 0, -1, -3, -4 |
107 |
}; |
108 |
|
109 |
static const int ct_adpcm_table[8] = { |
110 |
0x00E6, 0x00E6, 0x00E6, 0x00E6, |
111 |
0x0133, 0x0199, 0x0200, 0x0266 |
112 |
}; |
113 |
|
114 |
// padded to zero where table size is less then 16
|
115 |
static const int swf_index_tables[4][16] = { |
116 |
/*2*/ { -1, 2 }, |
117 |
/*3*/ { -1, -1, 2, 4 }, |
118 |
/*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 }, |
119 |
/*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 } |
120 |
}; |
121 |
|
122 |
static const int yamaha_indexscale[] = { |
123 |
230, 230, 230, 230, 307, 409, 512, 614, |
124 |
230, 230, 230, 230, 307, 409, 512, 614 |
125 |
}; |
126 |
|
127 |
static const int yamaha_difflookup[] = { |
128 |
1, 3, 5, 7, 9, 11, 13, 15, |
129 |
-1, -3, -5, -7, -9, -11, -13, -15 |
130 |
}; |
131 |
|
132 |
/* end of tables */
|
133 |
|
134 |
typedef struct ADPCMChannelStatus { |
135 |
int predictor;
|
136 |
short int step_index; |
137 |
int step;
|
138 |
/* for encoding */
|
139 |
int prev_sample;
|
140 |
|
141 |
/* MS version */
|
142 |
short sample1;
|
143 |
short sample2;
|
144 |
int coeff1;
|
145 |
int coeff2;
|
146 |
int idelta;
|
147 |
} ADPCMChannelStatus; |
148 |
|
149 |
typedef struct ADPCMContext { |
150 |
int channel; /* for stereo MOVs, decode left, then decode right, then tell it's decoded */ |
151 |
ADPCMChannelStatus status[2];
|
152 |
short sample_buffer[32]; /* hold left samples while waiting for right samples */ |
153 |
|
154 |
/* SWF only */
|
155 |
int nb_bits;
|
156 |
int nb_samples;
|
157 |
} ADPCMContext; |
158 |
|
159 |
/* XXX: implement encoding */
|
160 |
|
161 |
#ifdef CONFIG_ENCODERS
|
162 |
static int adpcm_encode_init(AVCodecContext *avctx) |
163 |
{ |
164 |
if (avctx->channels > 2) |
165 |
return -1; /* only stereo or mono =) */ |
166 |
switch(avctx->codec->id) {
|
167 |
case CODEC_ID_ADPCM_IMA_QT:
|
168 |
av_log(avctx, AV_LOG_ERROR, "ADPCM: codec adpcm_ima_qt unsupported for encoding !\n");
|
169 |
avctx->frame_size = 64; /* XXX: can multiple of avctx->channels * 64 (left and right blocks are interleaved) */ |
170 |
return -1; |
171 |
break;
|
172 |
case CODEC_ID_ADPCM_IMA_WAV:
|
173 |
avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ |
174 |
/* and we have 4 bytes per channel overhead */
|
175 |
avctx->block_align = BLKSIZE; |
176 |
/* seems frame_size isn't taken into account... have to buffer the samples :-( */
|
177 |
break;
|
178 |
case CODEC_ID_ADPCM_MS:
|
179 |
avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ |
180 |
/* and we have 7 bytes per channel overhead */
|
181 |
avctx->block_align = BLKSIZE; |
182 |
break;
|
183 |
case CODEC_ID_ADPCM_YAMAHA:
|
184 |
avctx->frame_size = BLKSIZE * avctx->channels; |
185 |
avctx->block_align = BLKSIZE; |
186 |
break;
|
187 |
default:
|
188 |
return -1; |
189 |
break;
|
190 |
} |
191 |
|
192 |
avctx->coded_frame= avcodec_alloc_frame(); |
193 |
avctx->coded_frame->key_frame= 1;
|
194 |
|
195 |
return 0; |
196 |
} |
197 |
|
198 |
static int adpcm_encode_close(AVCodecContext *avctx) |
199 |
{ |
200 |
av_freep(&avctx->coded_frame); |
201 |
|
202 |
return 0; |
203 |
} |
204 |
|
205 |
|
206 |
static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) |
207 |
{ |
208 |
int delta = sample - c->prev_sample;
|
209 |
int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8; |
210 |
c->prev_sample = c->prev_sample + ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8);
|
211 |
CLAMP_TO_SHORT(c->prev_sample); |
212 |
c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88); |
213 |
return nibble;
|
214 |
} |
215 |
|
216 |
static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) |
217 |
{ |
218 |
int predictor, nibble, bias;
|
219 |
|
220 |
predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 256;
|
221 |
|
222 |
nibble= sample - predictor; |
223 |
if(nibble>=0) bias= c->idelta/2; |
224 |
else bias=-c->idelta/2; |
225 |
|
226 |
nibble= (nibble + bias) / c->idelta; |
227 |
nibble= av_clip(nibble, -8, 7)&0x0F; |
228 |
|
229 |
predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; |
230 |
CLAMP_TO_SHORT(predictor); |
231 |
|
232 |
c->sample2 = c->sample1; |
233 |
c->sample1 = predictor; |
234 |
|
235 |
c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; |
236 |
if (c->idelta < 16) c->idelta = 16; |
237 |
|
238 |
return nibble;
|
239 |
} |
240 |
|
241 |
static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) |
242 |
{ |
243 |
int nibble, delta;
|
244 |
|
245 |
if(!c->step) {
|
246 |
c->predictor = 0;
|
247 |
c->step = 127;
|
248 |
} |
249 |
|
250 |
delta = sample - c->predictor; |
251 |
|
252 |
nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; |
253 |
|
254 |
c->predictor = c->predictor + ((c->step * yamaha_difflookup[nibble]) / 8);
|
255 |
CLAMP_TO_SHORT(c->predictor); |
256 |
c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
|
257 |
c->step = av_clip(c->step, 127, 24567); |
258 |
|
259 |
return nibble;
|
260 |
} |
261 |
|
262 |
typedef struct TrellisPath { |
263 |
int nibble;
|
264 |
int prev;
|
265 |
} TrellisPath; |
266 |
|
267 |
typedef struct TrellisNode { |
268 |
uint32_t ssd; |
269 |
int path;
|
270 |
int sample1;
|
271 |
int sample2;
|
272 |
int step;
|
273 |
} TrellisNode; |
274 |
|
275 |
static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, |
276 |
uint8_t *dst, ADPCMChannelStatus *c, int n)
|
277 |
{ |
278 |
#define FREEZE_INTERVAL 128 |
279 |
//FIXME 6% faster if frontier is a compile-time constant
|
280 |
const int frontier = 1 << avctx->trellis; |
281 |
const int stride = avctx->channels; |
282 |
const int version = avctx->codec->id; |
283 |
const int max_paths = frontier*FREEZE_INTERVAL; |
284 |
TrellisPath paths[max_paths], *p; |
285 |
TrellisNode node_buf[2][frontier];
|
286 |
TrellisNode *nodep_buf[2][frontier];
|
287 |
TrellisNode **nodes = nodep_buf[0]; // nodes[] is always sorted by .ssd |
288 |
TrellisNode **nodes_next = nodep_buf[1];
|
289 |
int pathn = 0, froze = -1, i, j, k; |
290 |
|
291 |
assert(!(max_paths&(max_paths-1)));
|
292 |
|
293 |
memset(nodep_buf, 0, sizeof(nodep_buf)); |
294 |
nodes[0] = &node_buf[1][0]; |
295 |
nodes[0]->ssd = 0; |
296 |
nodes[0]->path = 0; |
297 |
nodes[0]->step = c->step_index;
|
298 |
nodes[0]->sample1 = c->sample1;
|
299 |
nodes[0]->sample2 = c->sample2;
|
300 |
if(version == CODEC_ID_ADPCM_IMA_WAV)
|
301 |
nodes[0]->sample1 = c->prev_sample;
|
302 |
if(version == CODEC_ID_ADPCM_MS)
|
303 |
nodes[0]->step = c->idelta;
|
304 |
if(version == CODEC_ID_ADPCM_YAMAHA) {
|
305 |
if(c->step == 0) { |
306 |
nodes[0]->step = 127; |
307 |
nodes[0]->sample1 = 0; |
308 |
} else {
|
309 |
nodes[0]->step = c->step;
|
310 |
nodes[0]->sample1 = c->predictor;
|
311 |
} |
312 |
} |
313 |
|
314 |
for(i=0; i<n; i++) { |
315 |
TrellisNode *t = node_buf[i&1];
|
316 |
TrellisNode **u; |
317 |
int sample = samples[i*stride];
|
318 |
memset(nodes_next, 0, frontier*sizeof(TrellisNode*)); |
319 |
for(j=0; j<frontier && nodes[j]; j++) { |
320 |
// higher j have higher ssd already, so they're unlikely to use a suboptimal next sample too
|
321 |
const int range = (j < frontier/2) ? 1 : 0; |
322 |
const int step = nodes[j]->step; |
323 |
int nidx;
|
324 |
if(version == CODEC_ID_ADPCM_MS) {
|
325 |
const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 256; |
326 |
const int div = (sample - predictor) / step; |
327 |
const int nmin = av_clip(div-range, -8, 6); |
328 |
const int nmax = av_clip(div+range, -7, 7); |
329 |
for(nidx=nmin; nidx<=nmax; nidx++) {
|
330 |
const int nibble = nidx & 0xf; |
331 |
int dec_sample = predictor + nidx * step;
|
332 |
#define STORE_NODE(NAME, STEP_INDEX)\
|
333 |
int d;\
|
334 |
uint32_t ssd;\ |
335 |
CLAMP_TO_SHORT(dec_sample);\ |
336 |
d = sample - dec_sample;\ |
337 |
ssd = nodes[j]->ssd + d*d;\ |
338 |
if(nodes_next[frontier-1] && ssd >= nodes_next[frontier-1]->ssd)\ |
339 |
continue;\
|
340 |
/* Collapse any two states with the same previous sample value. \
|
341 |
* One could also distinguish states by step and by 2nd to last
|
342 |
* sample, but the effects of that are negligible. */\
|
343 |
for(k=0; k<frontier && nodes_next[k]; k++) {\ |
344 |
if(dec_sample == nodes_next[k]->sample1) {\
|
345 |
assert(ssd >= nodes_next[k]->ssd);\ |
346 |
goto next_##NAME;\ |
347 |
}\ |
348 |
}\ |
349 |
for(k=0; k<frontier; k++) {\ |
350 |
if(!nodes_next[k] || ssd < nodes_next[k]->ssd) {\
|
351 |
TrellisNode *u = nodes_next[frontier-1];\
|
352 |
if(!u) {\
|
353 |
assert(pathn < max_paths);\ |
354 |
u = t++;\ |
355 |
u->path = pathn++;\ |
356 |
}\ |
357 |
u->ssd = ssd;\ |
358 |
u->step = STEP_INDEX;\ |
359 |
u->sample2 = nodes[j]->sample1;\ |
360 |
u->sample1 = dec_sample;\ |
361 |
paths[u->path].nibble = nibble;\ |
362 |
paths[u->path].prev = nodes[j]->path;\ |
363 |
memmove(&nodes_next[k+1], &nodes_next[k], (frontier-k-1)*sizeof(TrellisNode*));\ |
364 |
nodes_next[k] = u;\ |
365 |
break;\
|
366 |
}\ |
367 |
}\ |
368 |
next_##NAME:; |
369 |
STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8)); |
370 |
} |
371 |
} else if(version == CODEC_ID_ADPCM_IMA_WAV) { |
372 |
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
|
373 |
const int predictor = nodes[j]->sample1;\ |
374 |
const int div = (sample - predictor) * 4 / STEP_TABLE;\ |
375 |
int nmin = av_clip(div-range, -7, 6);\ |
376 |
int nmax = av_clip(div+range, -6, 7);\ |
377 |
if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ |
378 |
if(nmax<0) nmax--;\ |
379 |
for(nidx=nmin; nidx<=nmax; nidx++) {\
|
380 |
const int nibble = nidx<0 ? 7-nidx : nidx;\ |
381 |
int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\ |
382 |
STORE_NODE(NAME, STEP_INDEX);\ |
383 |
} |
384 |
LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88)); |
385 |
} else { //CODEC_ID_ADPCM_YAMAHA |
386 |
LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567)); |
387 |
#undef LOOP_NODES
|
388 |
#undef STORE_NODE
|
389 |
} |
390 |
} |
391 |
|
392 |
u = nodes; |
393 |
nodes = nodes_next; |
394 |
nodes_next = u; |
395 |
|
396 |
// prevent overflow
|
397 |
if(nodes[0]->ssd > (1<<28)) { |
398 |
for(j=1; j<frontier && nodes[j]; j++) |
399 |
nodes[j]->ssd -= nodes[0]->ssd;
|
400 |
nodes[0]->ssd = 0; |
401 |
} |
402 |
|
403 |
// merge old paths to save memory
|
404 |
if(i == froze + FREEZE_INTERVAL) {
|
405 |
p = &paths[nodes[0]->path];
|
406 |
for(k=i; k>froze; k--) {
|
407 |
dst[k] = p->nibble; |
408 |
p = &paths[p->prev]; |
409 |
} |
410 |
froze = i; |
411 |
pathn = 0;
|
412 |
// other nodes might use paths that don't coincide with the frozen one.
|
413 |
// checking which nodes do so is too slow, so just kill them all.
|
414 |
// this also slightly improves quality, but I don't know why.
|
415 |
memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); |
416 |
} |
417 |
} |
418 |
|
419 |
p = &paths[nodes[0]->path];
|
420 |
for(i=n-1; i>froze; i--) { |
421 |
dst[i] = p->nibble; |
422 |
p = &paths[p->prev]; |
423 |
} |
424 |
|
425 |
c->predictor = nodes[0]->sample1;
|
426 |
c->sample1 = nodes[0]->sample1;
|
427 |
c->sample2 = nodes[0]->sample2;
|
428 |
c->step_index = nodes[0]->step;
|
429 |
c->step = nodes[0]->step;
|
430 |
c->idelta = nodes[0]->step;
|
431 |
} |
432 |
|
433 |
static int adpcm_encode_frame(AVCodecContext *avctx, |
434 |
unsigned char *frame, int buf_size, void *data) |
435 |
{ |
436 |
int n, i, st;
|
437 |
short *samples;
|
438 |
unsigned char *dst; |
439 |
ADPCMContext *c = avctx->priv_data; |
440 |
|
441 |
dst = frame; |
442 |
samples = (short *)data;
|
443 |
st= avctx->channels == 2;
|
444 |
/* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */
|
445 |
|
446 |
switch(avctx->codec->id) {
|
447 |
case CODEC_ID_ADPCM_IMA_QT: /* XXX: can't test until we get .mov writer */ |
448 |
break;
|
449 |
case CODEC_ID_ADPCM_IMA_WAV:
|
450 |
n = avctx->frame_size / 8;
|
451 |
c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ |
452 |
/* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ |
453 |
*dst++ = (c->status[0].prev_sample) & 0xFF; /* little endian */ |
454 |
*dst++ = (c->status[0].prev_sample >> 8) & 0xFF; |
455 |
*dst++ = (unsigned char)c->status[0].step_index; |
456 |
*dst++ = 0; /* unknown */ |
457 |
samples++; |
458 |
if (avctx->channels == 2) { |
459 |
c->status[1].prev_sample = (signed short)samples[1]; |
460 |
/* c->status[1].step_index = 0; */
|
461 |
*dst++ = (c->status[1].prev_sample) & 0xFF; |
462 |
*dst++ = (c->status[1].prev_sample >> 8) & 0xFF; |
463 |
*dst++ = (unsigned char)c->status[1].step_index; |
464 |
*dst++ = 0;
|
465 |
samples++; |
466 |
} |
467 |
|
468 |
/* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */
|
469 |
if(avctx->trellis > 0) { |
470 |
uint8_t buf[2][n*8]; |
471 |
adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n*8); |
472 |
if(avctx->channels == 2) |
473 |
adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n*8); |
474 |
for(i=0; i<n; i++) { |
475 |
*dst++ = buf[0][8*i+0] | (buf[0][8*i+1] << 4); |
476 |
*dst++ = buf[0][8*i+2] | (buf[0][8*i+3] << 4); |
477 |
*dst++ = buf[0][8*i+4] | (buf[0][8*i+5] << 4); |
478 |
*dst++ = buf[0][8*i+6] | (buf[0][8*i+7] << 4); |
479 |
if (avctx->channels == 2) { |
480 |
*dst++ = buf[1][8*i+0] | (buf[1][8*i+1] << 4); |
481 |
*dst++ = buf[1][8*i+2] | (buf[1][8*i+3] << 4); |
482 |
*dst++ = buf[1][8*i+4] | (buf[1][8*i+5] << 4); |
483 |
*dst++ = buf[1][8*i+6] | (buf[1][8*i+7] << 4); |
484 |
} |
485 |
} |
486 |
} else
|
487 |
for (; n>0; n--) { |
488 |
*dst = adpcm_ima_compress_sample(&c->status[0], samples[0]) & 0x0F; |
489 |
*dst |= (adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4) & 0xF0; |
490 |
dst++; |
491 |
*dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]) & 0x0F; |
492 |
*dst |= (adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4) & 0xF0; |
493 |
dst++; |
494 |
*dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]) & 0x0F; |
495 |
*dst |= (adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4) & 0xF0; |
496 |
dst++; |
497 |
*dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]) & 0x0F; |
498 |
*dst |= (adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4) & 0xF0; |
499 |
dst++; |
500 |
/* right channel */
|
501 |
if (avctx->channels == 2) { |
502 |
*dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); |
503 |
*dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; |
504 |
dst++; |
505 |
*dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); |
506 |
*dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; |
507 |
dst++; |
508 |
*dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); |
509 |
*dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; |
510 |
dst++; |
511 |
*dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); |
512 |
*dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; |
513 |
dst++; |
514 |
} |
515 |
samples += 8 * avctx->channels;
|
516 |
} |
517 |
break;
|
518 |
case CODEC_ID_ADPCM_MS:
|
519 |
for(i=0; i<avctx->channels; i++){ |
520 |
int predictor=0; |
521 |
|
522 |
*dst++ = predictor; |
523 |
c->status[i].coeff1 = AdaptCoeff1[predictor]; |
524 |
c->status[i].coeff2 = AdaptCoeff2[predictor]; |
525 |
} |
526 |
for(i=0; i<avctx->channels; i++){ |
527 |
if (c->status[i].idelta < 16) |
528 |
c->status[i].idelta = 16;
|
529 |
|
530 |
*dst++ = c->status[i].idelta & 0xFF;
|
531 |
*dst++ = c->status[i].idelta >> 8;
|
532 |
} |
533 |
for(i=0; i<avctx->channels; i++){ |
534 |
c->status[i].sample1= *samples++; |
535 |
|
536 |
*dst++ = c->status[i].sample1 & 0xFF;
|
537 |
*dst++ = c->status[i].sample1 >> 8;
|
538 |
} |
539 |
for(i=0; i<avctx->channels; i++){ |
540 |
c->status[i].sample2= *samples++; |
541 |
|
542 |
*dst++ = c->status[i].sample2 & 0xFF;
|
543 |
*dst++ = c->status[i].sample2 >> 8;
|
544 |
} |
545 |
|
546 |
if(avctx->trellis > 0) { |
547 |
int n = avctx->block_align - 7*avctx->channels; |
548 |
uint8_t buf[2][n];
|
549 |
if(avctx->channels == 1) { |
550 |
n *= 2;
|
551 |
adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); |
552 |
for(i=0; i<n; i+=2) |
553 |
*dst++ = (buf[0][i] << 4) | buf[0][i+1]; |
554 |
} else {
|
555 |
adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); |
556 |
adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n); |
557 |
for(i=0; i<n; i++) |
558 |
*dst++ = (buf[0][i] << 4) | buf[1][i]; |
559 |
} |
560 |
} else
|
561 |
for(i=7*avctx->channels; i<avctx->block_align; i++) { |
562 |
int nibble;
|
563 |
nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; |
564 |
nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); |
565 |
*dst++ = nibble; |
566 |
} |
567 |
break;
|
568 |
case CODEC_ID_ADPCM_YAMAHA:
|
569 |
n = avctx->frame_size / 2;
|
570 |
if(avctx->trellis > 0) { |
571 |
uint8_t buf[2][n*2]; |
572 |
n *= 2;
|
573 |
if(avctx->channels == 1) { |
574 |
adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); |
575 |
for(i=0; i<n; i+=2) |
576 |
*dst++ = buf[0][i] | (buf[0][i+1] << 4); |
577 |
} else {
|
578 |
adpcm_compress_trellis(avctx, samples, buf[0], &c->status[0], n); |
579 |
adpcm_compress_trellis(avctx, samples+1, buf[1], &c->status[1], n); |
580 |
for(i=0; i<n; i++) |
581 |
*dst++ = buf[0][i] | (buf[1][i] << 4); |
582 |
} |
583 |
} else
|
584 |
for (; n>0; n--) { |
585 |
for(i = 0; i < avctx->channels; i++) { |
586 |
int nibble;
|
587 |
nibble = adpcm_yamaha_compress_sample(&c->status[i], samples[i]); |
588 |
nibble |= adpcm_yamaha_compress_sample(&c->status[i], samples[i+avctx->channels]) << 4;
|
589 |
*dst++ = nibble; |
590 |
} |
591 |
samples += 2 * avctx->channels;
|
592 |
} |
593 |
break;
|
594 |
default:
|
595 |
return -1; |
596 |
} |
597 |
return dst - frame;
|
598 |
} |
599 |
#endif //CONFIG_ENCODERS |
600 |
|
601 |
static int adpcm_decode_init(AVCodecContext * avctx) |
602 |
{ |
603 |
ADPCMContext *c = avctx->priv_data; |
604 |
|
605 |
if(avctx->channels > 2U){ |
606 |
return -1; |
607 |
} |
608 |
|
609 |
c->channel = 0;
|
610 |
c->status[0].predictor = c->status[1].predictor = 0; |
611 |
c->status[0].step_index = c->status[1].step_index = 0; |
612 |
c->status[0].step = c->status[1].step = 0; |
613 |
|
614 |
switch(avctx->codec->id) {
|
615 |
case CODEC_ID_ADPCM_CT:
|
616 |
c->status[0].step = c->status[1].step = 511; |
617 |
break;
|
618 |
default:
|
619 |
break;
|
620 |
} |
621 |
return 0; |
622 |
} |
623 |
|
624 |
static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift) |
625 |
{ |
626 |
int step_index;
|
627 |
int predictor;
|
628 |
int sign, delta, diff, step;
|
629 |
|
630 |
step = step_table[c->step_index]; |
631 |
step_index = c->step_index + index_table[(unsigned)nibble];
|
632 |
if (step_index < 0) step_index = 0; |
633 |
else if (step_index > 88) step_index = 88; |
634 |
|
635 |
sign = nibble & 8;
|
636 |
delta = nibble & 7;
|
637 |
/* perform direct multiplication instead of series of jumps proposed by
|
638 |
* the reference ADPCM implementation since modern CPUs can do the mults
|
639 |
* quickly enough */
|
640 |
diff = ((2 * delta + 1) * step) >> shift; |
641 |
predictor = c->predictor; |
642 |
if (sign) predictor -= diff;
|
643 |
else predictor += diff;
|
644 |
|
645 |
CLAMP_TO_SHORT(predictor); |
646 |
c->predictor = predictor; |
647 |
c->step_index = step_index; |
648 |
|
649 |
return (short)predictor; |
650 |
} |
651 |
|
652 |
static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble) |
653 |
{ |
654 |
int predictor;
|
655 |
|
656 |
predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 256;
|
657 |
predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; |
658 |
CLAMP_TO_SHORT(predictor); |
659 |
|
660 |
c->sample2 = c->sample1; |
661 |
c->sample1 = predictor; |
662 |
c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; |
663 |
if (c->idelta < 16) c->idelta = 16; |
664 |
|
665 |
return (short)predictor; |
666 |
} |
667 |
|
668 |
static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble) |
669 |
{ |
670 |
int predictor;
|
671 |
int sign, delta, diff;
|
672 |
int new_step;
|
673 |
|
674 |
sign = nibble & 8;
|
675 |
delta = nibble & 7;
|
676 |
/* perform direct multiplication instead of series of jumps proposed by
|
677 |
* the reference ADPCM implementation since modern CPUs can do the mults
|
678 |
* quickly enough */
|
679 |
diff = ((2 * delta + 1) * c->step) >> 3; |
680 |
predictor = c->predictor; |
681 |
/* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
|
682 |
if(sign)
|
683 |
predictor = ((predictor * 254) >> 8) - diff; |
684 |
else
|
685 |
predictor = ((predictor * 254) >> 8) + diff; |
686 |
/* calculate new step and clamp it to range 511..32767 */
|
687 |
new_step = (ct_adpcm_table[nibble & 7] * c->step) >> 8; |
688 |
c->step = new_step; |
689 |
if(c->step < 511) |
690 |
c->step = 511;
|
691 |
if(c->step > 32767) |
692 |
c->step = 32767;
|
693 |
|
694 |
CLAMP_TO_SHORT(predictor); |
695 |
c->predictor = predictor; |
696 |
return (short)predictor; |
697 |
} |
698 |
|
699 |
static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift) |
700 |
{ |
701 |
int sign, delta, diff;
|
702 |
|
703 |
sign = nibble & (1<<(size-1)); |
704 |
delta = nibble & ((1<<(size-1))-1); |
705 |
diff = delta << (7 + c->step + shift);
|
706 |
|
707 |
if (sign)
|
708 |
c->predictor -= diff; |
709 |
else
|
710 |
c->predictor += diff; |
711 |
|
712 |
/* clamp result */
|
713 |
if (c->predictor > 16256) |
714 |
c->predictor = 16256;
|
715 |
else if (c->predictor < -16384) |
716 |
c->predictor = -16384;
|
717 |
|
718 |
/* calculate new step */
|
719 |
if (delta >= (2*size - 3) && c->step < 3) |
720 |
c->step++; |
721 |
else if (delta == 0 && c->step > 0) |
722 |
c->step--; |
723 |
|
724 |
return (short) c->predictor; |
725 |
} |
726 |
|
727 |
static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble) |
728 |
{ |
729 |
if(!c->step) {
|
730 |
c->predictor = 0;
|
731 |
c->step = 127;
|
732 |
} |
733 |
|
734 |
c->predictor += (c->step * yamaha_difflookup[nibble]) / 8;
|
735 |
CLAMP_TO_SHORT(c->predictor); |
736 |
c->step = (c->step * yamaha_indexscale[nibble]) >> 8;
|
737 |
c->step = av_clip(c->step, 127, 24567); |
738 |
return c->predictor;
|
739 |
} |
740 |
|
741 |
static void xa_decode(short *out, const unsigned char *in, |
742 |
ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc)
|
743 |
{ |
744 |
int i, j;
|
745 |
int shift,filter,f0,f1;
|
746 |
int s_1,s_2;
|
747 |
int d,s,t;
|
748 |
|
749 |
for(i=0;i<4;i++) { |
750 |
|
751 |
shift = 12 - (in[4+i*2] & 15); |
752 |
filter = in[4+i*2] >> 4; |
753 |
f0 = xa_adpcm_table[filter][0];
|
754 |
f1 = xa_adpcm_table[filter][1];
|
755 |
|
756 |
s_1 = left->sample1; |
757 |
s_2 = left->sample2; |
758 |
|
759 |
for(j=0;j<28;j++) { |
760 |
d = in[16+i+j*4]; |
761 |
|
762 |
t = (signed char)(d<<4)>>4; |
763 |
s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); |
764 |
CLAMP_TO_SHORT(s); |
765 |
*out = s; |
766 |
out += inc; |
767 |
s_2 = s_1; |
768 |
s_1 = s; |
769 |
} |
770 |
|
771 |
if (inc==2) { /* stereo */ |
772 |
left->sample1 = s_1; |
773 |
left->sample2 = s_2; |
774 |
s_1 = right->sample1; |
775 |
s_2 = right->sample2; |
776 |
out = out + 1 - 28*2; |
777 |
} |
778 |
|
779 |
shift = 12 - (in[5+i*2] & 15); |
780 |
filter = in[5+i*2] >> 4; |
781 |
|
782 |
f0 = xa_adpcm_table[filter][0];
|
783 |
f1 = xa_adpcm_table[filter][1];
|
784 |
|
785 |
for(j=0;j<28;j++) { |
786 |
d = in[16+i+j*4]; |
787 |
|
788 |
t = (signed char)d >> 4; |
789 |
s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); |
790 |
CLAMP_TO_SHORT(s); |
791 |
*out = s; |
792 |
out += inc; |
793 |
s_2 = s_1; |
794 |
s_1 = s; |
795 |
} |
796 |
|
797 |
if (inc==2) { /* stereo */ |
798 |
right->sample1 = s_1; |
799 |
right->sample2 = s_2; |
800 |
out -= 1;
|
801 |
} else {
|
802 |
left->sample1 = s_1; |
803 |
left->sample2 = s_2; |
804 |
} |
805 |
} |
806 |
} |
807 |
|
808 |
|
809 |
/* DK3 ADPCM support macro */
|
810 |
#define DK3_GET_NEXT_NIBBLE() \
|
811 |
if (decode_top_nibble_next) \
|
812 |
{ \ |
813 |
nibble = (last_byte >> 4) & 0x0F; \ |
814 |
decode_top_nibble_next = 0; \
|
815 |
} \ |
816 |
else \
|
817 |
{ \ |
818 |
last_byte = *src++; \ |
819 |
if (src >= buf + buf_size) break; \ |
820 |
nibble = last_byte & 0x0F; \
|
821 |
decode_top_nibble_next = 1; \
|
822 |
} |
823 |
|
824 |
static int adpcm_decode_frame(AVCodecContext *avctx, |
825 |
void *data, int *data_size, |
826 |
uint8_t *buf, int buf_size)
|
827 |
{ |
828 |
ADPCMContext *c = avctx->priv_data; |
829 |
ADPCMChannelStatus *cs; |
830 |
int n, m, channel, i;
|
831 |
int block_predictor[2]; |
832 |
short *samples;
|
833 |
short *samples_end;
|
834 |
uint8_t *src; |
835 |
int st; /* stereo */ |
836 |
|
837 |
/* DK3 ADPCM accounting variables */
|
838 |
unsigned char last_byte = 0; |
839 |
unsigned char nibble; |
840 |
int decode_top_nibble_next = 0; |
841 |
int diff_channel;
|
842 |
|
843 |
/* EA ADPCM state variables */
|
844 |
uint32_t samples_in_chunk; |
845 |
int32_t previous_left_sample, previous_right_sample; |
846 |
int32_t current_left_sample, current_right_sample; |
847 |
int32_t next_left_sample, next_right_sample; |
848 |
int32_t coeff1l, coeff2l, coeff1r, coeff2r; |
849 |
uint8_t shift_left, shift_right; |
850 |
int count1, count2;
|
851 |
|
852 |
if (!buf_size)
|
853 |
return 0; |
854 |
|
855 |
//should protect all 4bit ADPCM variants
|
856 |
//8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels
|
857 |
//
|
858 |
if(*data_size/4 < buf_size + 8) |
859 |
return -1; |
860 |
|
861 |
samples = data; |
862 |
samples_end= samples + *data_size/2;
|
863 |
*data_size= 0;
|
864 |
src = buf; |
865 |
|
866 |
st = avctx->channels == 2 ? 1 : 0; |
867 |
|
868 |
switch(avctx->codec->id) {
|
869 |
case CODEC_ID_ADPCM_IMA_QT:
|
870 |
n = (buf_size - 2);/* >> 2*avctx->channels;*/ |
871 |
channel = c->channel; |
872 |
cs = &(c->status[channel]); |
873 |
/* (pppppp) (piiiiiii) */
|
874 |
|
875 |
/* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
|
876 |
cs->predictor = (*src++) << 8;
|
877 |
cs->predictor |= (*src & 0x80);
|
878 |
cs->predictor &= 0xFF80;
|
879 |
|
880 |
/* sign extension */
|
881 |
if(cs->predictor & 0x8000) |
882 |
cs->predictor -= 0x10000;
|
883 |
|
884 |
CLAMP_TO_SHORT(cs->predictor); |
885 |
|
886 |
cs->step_index = (*src++) & 0x7F;
|
887 |
|
888 |
if (cs->step_index > 88){ |
889 |
av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
|
890 |
cs->step_index = 88;
|
891 |
} |
892 |
|
893 |
cs->step = step_table[cs->step_index]; |
894 |
|
895 |
if (st && channel)
|
896 |
samples++; |
897 |
|
898 |
for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */ |
899 |
*samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3); |
900 |
samples += avctx->channels; |
901 |
*samples = adpcm_ima_expand_nibble(cs, (src[0] >> 4) & 0x0F, 3); |
902 |
samples += avctx->channels; |
903 |
src ++; |
904 |
} |
905 |
|
906 |
if(st) { /* handle stereo interlacing */ |
907 |
c->channel = (channel + 1) % 2; /* we get one packet for left, then one for right data */ |
908 |
if(channel == 1) { /* wait for the other packet before outputing anything */ |
909 |
return src - buf;
|
910 |
} |
911 |
} |
912 |
break;
|
913 |
case CODEC_ID_ADPCM_IMA_WAV:
|
914 |
if (avctx->block_align != 0 && buf_size > avctx->block_align) |
915 |
buf_size = avctx->block_align; |
916 |
|
917 |
// samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1;
|
918 |
|
919 |
for(i=0; i<avctx->channels; i++){ |
920 |
cs = &(c->status[i]); |
921 |
cs->predictor = (int16_t)(src[0] + (src[1]<<8)); |
922 |
src+=2;
|
923 |
|
924 |
// XXX: is this correct ??: *samples++ = cs->predictor;
|
925 |
|
926 |
cs->step_index = *src++; |
927 |
if (cs->step_index > 88){ |
928 |
av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
|
929 |
cs->step_index = 88;
|
930 |
} |
931 |
if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */ |
932 |
} |
933 |
|
934 |
while(src < buf + buf_size){
|
935 |
for(m=0; m<4; m++){ |
936 |
for(i=0; i<=st; i++) |
937 |
*samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3); |
938 |
for(i=0; i<=st; i++) |
939 |
*samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3); |
940 |
src++; |
941 |
} |
942 |
src += 4*st;
|
943 |
} |
944 |
break;
|
945 |
case CODEC_ID_ADPCM_4XM:
|
946 |
cs = &(c->status[0]);
|
947 |
c->status[0].predictor= (int16_t)(src[0] + (src[1]<<8)); src+=2; |
948 |
if(st){
|
949 |
c->status[1].predictor= (int16_t)(src[0] + (src[1]<<8)); src+=2; |
950 |
} |
951 |
c->status[0].step_index= (int16_t)(src[0] + (src[1]<<8)); src+=2; |
952 |
if(st){
|
953 |
c->status[1].step_index= (int16_t)(src[0] + (src[1]<<8)); src+=2; |
954 |
} |
955 |
if (cs->step_index < 0) cs->step_index = 0; |
956 |
if (cs->step_index > 88) cs->step_index = 88; |
957 |
|
958 |
m= (buf_size - (src - buf))>>st; |
959 |
for(i=0; i<m; i++) { |
960 |
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4); |
961 |
if (st)
|
962 |
*samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4); |
963 |
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4); |
964 |
if (st)
|
965 |
*samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4); |
966 |
} |
967 |
|
968 |
src += m<<st; |
969 |
|
970 |
break;
|
971 |
case CODEC_ID_ADPCM_MS:
|
972 |
if (avctx->block_align != 0 && buf_size > avctx->block_align) |
973 |
buf_size = avctx->block_align; |
974 |
n = buf_size - 7 * avctx->channels;
|
975 |
if (n < 0) |
976 |
return -1; |
977 |
block_predictor[0] = av_clip(*src++, 0, 7); |
978 |
block_predictor[1] = 0; |
979 |
if (st)
|
980 |
block_predictor[1] = av_clip(*src++, 0, 7); |
981 |
c->status[0].idelta = (int16_t)((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); |
982 |
src+=2;
|
983 |
if (st){
|
984 |
c->status[1].idelta = (int16_t)((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); |
985 |
src+=2;
|
986 |
} |
987 |
c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]]; |
988 |
c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]]; |
989 |
c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]]; |
990 |
c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]]; |
991 |
|
992 |
c->status[0].sample1 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); |
993 |
src+=2;
|
994 |
if (st) c->status[1].sample1 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); |
995 |
if (st) src+=2; |
996 |
c->status[0].sample2 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); |
997 |
src+=2;
|
998 |
if (st) c->status[1].sample2 = ((*src & 0xFF) | ((src[1] << 8) & 0xFF00)); |
999 |
if (st) src+=2; |
1000 |
|
1001 |
*samples++ = c->status[0].sample1;
|
1002 |
if (st) *samples++ = c->status[1].sample1; |
1003 |
*samples++ = c->status[0].sample2;
|
1004 |
if (st) *samples++ = c->status[1].sample2; |
1005 |
for(;n>0;n--) { |
1006 |
*samples++ = adpcm_ms_expand_nibble(&c->status[0], (src[0] >> 4) & 0x0F); |
1007 |
*samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F); |
1008 |
src ++; |
1009 |
} |
1010 |
break;
|
1011 |
case CODEC_ID_ADPCM_IMA_DK4:
|
1012 |
if (avctx->block_align != 0 && buf_size > avctx->block_align) |
1013 |
buf_size = avctx->block_align; |
1014 |
|
1015 |
c->status[0].predictor = (int16_t)(src[0] | (src[1] << 8)); |
1016 |
c->status[0].step_index = src[2]; |
1017 |
src += 4;
|
1018 |
*samples++ = c->status[0].predictor;
|
1019 |
if (st) {
|
1020 |
c->status[1].predictor = (int16_t)(src[0] | (src[1] << 8)); |
1021 |
c->status[1].step_index = src[2]; |
1022 |
src += 4;
|
1023 |
*samples++ = c->status[1].predictor;
|
1024 |
} |
1025 |
while (src < buf + buf_size) {
|
1026 |
|
1027 |
/* take care of the top nibble (always left or mono channel) */
|
1028 |
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
|
1029 |
(src[0] >> 4) & 0x0F, 3); |
1030 |
|
1031 |
/* take care of the bottom nibble, which is right sample for
|
1032 |
* stereo, or another mono sample */
|
1033 |
if (st)
|
1034 |
*samples++ = adpcm_ima_expand_nibble(&c->status[1],
|
1035 |
src[0] & 0x0F, 3); |
1036 |
else
|
1037 |
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
|
1038 |
src[0] & 0x0F, 3); |
1039 |
|
1040 |
src++; |
1041 |
} |
1042 |
break;
|
1043 |
case CODEC_ID_ADPCM_IMA_DK3:
|
1044 |
if (avctx->block_align != 0 && buf_size > avctx->block_align) |
1045 |
buf_size = avctx->block_align; |
1046 |
|
1047 |
if(buf_size + 16 > (samples_end - samples)*3/8) |
1048 |
return -1; |
1049 |
|
1050 |
c->status[0].predictor = (int16_t)(src[10] | (src[11] << 8)); |
1051 |
c->status[1].predictor = (int16_t)(src[12] | (src[13] << 8)); |
1052 |
c->status[0].step_index = src[14]; |
1053 |
c->status[1].step_index = src[15]; |
1054 |
/* sign extend the predictors */
|
1055 |
src += 16;
|
1056 |
diff_channel = c->status[1].predictor;
|
1057 |
|
1058 |
/* the DK3_GET_NEXT_NIBBLE macro issues the break statement when
|
1059 |
* the buffer is consumed */
|
1060 |
while (1) { |
1061 |
|
1062 |
/* for this algorithm, c->status[0] is the sum channel and
|
1063 |
* c->status[1] is the diff channel */
|
1064 |
|
1065 |
/* process the first predictor of the sum channel */
|
1066 |
DK3_GET_NEXT_NIBBLE(); |
1067 |
adpcm_ima_expand_nibble(&c->status[0], nibble, 3); |
1068 |
|
1069 |
/* process the diff channel predictor */
|
1070 |
DK3_GET_NEXT_NIBBLE(); |
1071 |
adpcm_ima_expand_nibble(&c->status[1], nibble, 3); |
1072 |
|
1073 |
/* process the first pair of stereo PCM samples */
|
1074 |
diff_channel = (diff_channel + c->status[1].predictor) / 2; |
1075 |
*samples++ = c->status[0].predictor + c->status[1].predictor; |
1076 |
*samples++ = c->status[0].predictor - c->status[1].predictor; |
1077 |
|
1078 |
/* process the second predictor of the sum channel */
|
1079 |
DK3_GET_NEXT_NIBBLE(); |
1080 |
adpcm_ima_expand_nibble(&c->status[0], nibble, 3); |
1081 |
|
1082 |
/* process the second pair of stereo PCM samples */
|
1083 |
diff_channel = (diff_channel + c->status[1].predictor) / 2; |
1084 |
*samples++ = c->status[0].predictor + c->status[1].predictor; |
1085 |
*samples++ = c->status[0].predictor - c->status[1].predictor; |
1086 |
} |
1087 |
break;
|
1088 |
case CODEC_ID_ADPCM_IMA_WS:
|
1089 |
/* no per-block initialization; just start decoding the data */
|
1090 |
while (src < buf + buf_size) {
|
1091 |
|
1092 |
if (st) {
|
1093 |
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
|
1094 |
(src[0] >> 4) & 0x0F, 3); |
1095 |
*samples++ = adpcm_ima_expand_nibble(&c->status[1],
|
1096 |
src[0] & 0x0F, 3); |
1097 |
} else {
|
1098 |
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
|
1099 |
(src[0] >> 4) & 0x0F, 3); |
1100 |
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
|
1101 |
src[0] & 0x0F, 3); |
1102 |
} |
1103 |
|
1104 |
src++; |
1105 |
} |
1106 |
break;
|
1107 |
case CODEC_ID_ADPCM_XA:
|
1108 |
c->status[0].sample1 = c->status[0].sample2 = |
1109 |
c->status[1].sample1 = c->status[1].sample2 = 0; |
1110 |
while (buf_size >= 128) { |
1111 |
xa_decode(samples, src, &c->status[0], &c->status[1], |
1112 |
avctx->channels); |
1113 |
src += 128;
|
1114 |
samples += 28 * 8; |
1115 |
buf_size -= 128;
|
1116 |
} |
1117 |
break;
|
1118 |
case CODEC_ID_ADPCM_EA:
|
1119 |
samples_in_chunk = AV_RL32(src); |
1120 |
if (samples_in_chunk >= ((buf_size - 12) * 2)) { |
1121 |
src += buf_size; |
1122 |
break;
|
1123 |
} |
1124 |
src += 4;
|
1125 |
current_left_sample = (int16_t)AV_RL16(src); |
1126 |
src += 2;
|
1127 |
previous_left_sample = (int16_t)AV_RL16(src); |
1128 |
src += 2;
|
1129 |
current_right_sample = (int16_t)AV_RL16(src); |
1130 |
src += 2;
|
1131 |
previous_right_sample = (int16_t)AV_RL16(src); |
1132 |
src += 2;
|
1133 |
|
1134 |
for (count1 = 0; count1 < samples_in_chunk/28;count1++) { |
1135 |
coeff1l = ea_adpcm_table[(*src >> 4) & 0x0F]; |
1136 |
coeff2l = ea_adpcm_table[((*src >> 4) & 0x0F) + 4]; |
1137 |
coeff1r = ea_adpcm_table[*src & 0x0F];
|
1138 |
coeff2r = ea_adpcm_table[(*src & 0x0F) + 4]; |
1139 |
src++; |
1140 |
|
1141 |
shift_left = ((*src >> 4) & 0x0F) + 8; |
1142 |
shift_right = (*src & 0x0F) + 8; |
1143 |
src++; |
1144 |
|
1145 |
for (count2 = 0; count2 < 28; count2++) { |
1146 |
next_left_sample = (((*src & 0xF0) << 24) >> shift_left); |
1147 |
next_right_sample = (((*src & 0x0F) << 28) >> shift_right); |
1148 |
src++; |
1149 |
|
1150 |
next_left_sample = (next_left_sample + |
1151 |
(current_left_sample * coeff1l) + |
1152 |
(previous_left_sample * coeff2l) + 0x80) >> 8; |
1153 |
next_right_sample = (next_right_sample + |
1154 |
(current_right_sample * coeff1r) + |
1155 |
(previous_right_sample * coeff2r) + 0x80) >> 8; |
1156 |
CLAMP_TO_SHORT(next_left_sample); |
1157 |
CLAMP_TO_SHORT(next_right_sample); |
1158 |
|
1159 |
previous_left_sample = current_left_sample; |
1160 |
current_left_sample = next_left_sample; |
1161 |
previous_right_sample = current_right_sample; |
1162 |
current_right_sample = next_right_sample; |
1163 |
*samples++ = (unsigned short)current_left_sample; |
1164 |
*samples++ = (unsigned short)current_right_sample; |
1165 |
} |
1166 |
} |
1167 |
break;
|
1168 |
case CODEC_ID_ADPCM_IMA_SMJPEG:
|
1169 |
c->status[0].predictor = *src;
|
1170 |
src += 2;
|
1171 |
c->status[0].step_index = *src++;
|
1172 |
src++; /* skip another byte before getting to the meat */
|
1173 |
while (src < buf + buf_size) {
|
1174 |
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
|
1175 |
*src & 0x0F, 3); |
1176 |
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
|
1177 |
(*src >> 4) & 0x0F, 3); |
1178 |
src++; |
1179 |
} |
1180 |
break;
|
1181 |
case CODEC_ID_ADPCM_CT:
|
1182 |
while (src < buf + buf_size) {
|
1183 |
if (st) {
|
1184 |
*samples++ = adpcm_ct_expand_nibble(&c->status[0],
|
1185 |
(src[0] >> 4) & 0x0F); |
1186 |
*samples++ = adpcm_ct_expand_nibble(&c->status[1],
|
1187 |
src[0] & 0x0F); |
1188 |
} else {
|
1189 |
*samples++ = adpcm_ct_expand_nibble(&c->status[0],
|
1190 |
(src[0] >> 4) & 0x0F); |
1191 |
*samples++ = adpcm_ct_expand_nibble(&c->status[0],
|
1192 |
src[0] & 0x0F); |
1193 |
} |
1194 |
src++; |
1195 |
} |
1196 |
break;
|
1197 |
case CODEC_ID_ADPCM_SBPRO_4:
|
1198 |
case CODEC_ID_ADPCM_SBPRO_3:
|
1199 |
case CODEC_ID_ADPCM_SBPRO_2:
|
1200 |
if (!c->status[0].step_index) { |
1201 |
/* the first byte is a raw sample */
|
1202 |
*samples++ = 128 * (*src++ - 0x80); |
1203 |
if (st)
|
1204 |
*samples++ = 128 * (*src++ - 0x80); |
1205 |
c->status[0].step_index = 1; |
1206 |
} |
1207 |
if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) {
|
1208 |
while (src < buf + buf_size) {
|
1209 |
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
|
1210 |
(src[0] >> 4) & 0x0F, 4, 0); |
1211 |
*samples++ = adpcm_sbpro_expand_nibble(&c->status[st], |
1212 |
src[0] & 0x0F, 4, 0); |
1213 |
src++; |
1214 |
} |
1215 |
} else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) { |
1216 |
while (src < buf + buf_size && samples + 2 < samples_end) { |
1217 |
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
|
1218 |
(src[0] >> 5) & 0x07, 3, 0); |
1219 |
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
|
1220 |
(src[0] >> 2) & 0x07, 3, 0); |
1221 |
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
|
1222 |
src[0] & 0x03, 2, 0); |
1223 |
src++; |
1224 |
} |
1225 |
} else {
|
1226 |
while (src < buf + buf_size && samples + 3 < samples_end) { |
1227 |
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
|
1228 |
(src[0] >> 6) & 0x03, 2, 2); |
1229 |
*samples++ = adpcm_sbpro_expand_nibble(&c->status[st], |
1230 |
(src[0] >> 4) & 0x03, 2, 2); |
1231 |
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
|
1232 |
(src[0] >> 2) & 0x03, 2, 2); |
1233 |
*samples++ = adpcm_sbpro_expand_nibble(&c->status[st], |
1234 |
src[0] & 0x03, 2, 2); |
1235 |
src++; |
1236 |
} |
1237 |
} |
1238 |
break;
|
1239 |
case CODEC_ID_ADPCM_SWF:
|
1240 |
{ |
1241 |
GetBitContext gb; |
1242 |
const int *table; |
1243 |
int k0, signmask;
|
1244 |
int size = buf_size*8; |
1245 |
|
1246 |
init_get_bits(&gb, buf, size); |
1247 |
|
1248 |
//FIXME the following return -1 may be removed only after
|
1249 |
//1. correctly spliting the stream into packets at demuxer or parser level
|
1250 |
//2. checking array bounds when writing
|
1251 |
//3. moving the global nb_bits header into extradata
|
1252 |
return -1; |
1253 |
// first frame, read bits & inital values
|
1254 |
if (!c->nb_bits)
|
1255 |
{ |
1256 |
c->nb_bits = get_bits(&gb, 2)+2; |
1257 |
// av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", c->nb_bits);
|
1258 |
} |
1259 |
|
1260 |
table = swf_index_tables[c->nb_bits-2];
|
1261 |
k0 = 1 << (c->nb_bits-2); |
1262 |
signmask = 1 << (c->nb_bits-1); |
1263 |
|
1264 |
while (get_bits_count(&gb) <= size)
|
1265 |
{ |
1266 |
int i;
|
1267 |
|
1268 |
c->nb_samples++; |
1269 |
// wrap around at every 4096 samples...
|
1270 |
if ((c->nb_samples & 0xfff) == 1) |
1271 |
{ |
1272 |
for (i = 0; i <= st; i++) |
1273 |
{ |
1274 |
*samples++ = c->status[i].predictor = get_sbits(&gb, 16);
|
1275 |
c->status[i].step_index = get_bits(&gb, 6);
|
1276 |
} |
1277 |
} |
1278 |
|
1279 |
// similar to IMA adpcm
|
1280 |
for (i = 0; i <= st; i++) |
1281 |
{ |
1282 |
int delta = get_bits(&gb, c->nb_bits);
|
1283 |
int step = step_table[c->status[i].step_index];
|
1284 |
long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 |
1285 |
int k = k0;
|
1286 |
|
1287 |
do {
|
1288 |
if (delta & k)
|
1289 |
vpdiff += step; |
1290 |
step >>= 1;
|
1291 |
k >>= 1;
|
1292 |
} while(k);
|
1293 |
vpdiff += step; |
1294 |
|
1295 |
if (delta & signmask)
|
1296 |
c->status[i].predictor -= vpdiff; |
1297 |
else
|
1298 |
c->status[i].predictor += vpdiff; |
1299 |
|
1300 |
c->status[i].step_index += table[delta & (~signmask)]; |
1301 |
|
1302 |
c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88); |
1303 |
c->status[i].predictor = av_clip(c->status[i].predictor, -32768, 32767); |
1304 |
|
1305 |
*samples++ = c->status[i].predictor; |
1306 |
} |
1307 |
} |
1308 |
|
1309 |
// src += get_bits_count(&gb)*8;
|
1310 |
src += size; |
1311 |
|
1312 |
break;
|
1313 |
} |
1314 |
case CODEC_ID_ADPCM_YAMAHA:
|
1315 |
while (src < buf + buf_size) {
|
1316 |
if (st) {
|
1317 |
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
|
1318 |
src[0] & 0x0F); |
1319 |
*samples++ = adpcm_yamaha_expand_nibble(&c->status[1],
|
1320 |
(src[0] >> 4) & 0x0F); |
1321 |
} else {
|
1322 |
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
|
1323 |
src[0] & 0x0F); |
1324 |
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
|
1325 |
(src[0] >> 4) & 0x0F); |
1326 |
} |
1327 |
src++; |
1328 |
} |
1329 |
break;
|
1330 |
default:
|
1331 |
return -1; |
1332 |
} |
1333 |
*data_size = (uint8_t *)samples - (uint8_t *)data; |
1334 |
return src - buf;
|
1335 |
} |
1336 |
|
1337 |
|
1338 |
|
1339 |
#ifdef CONFIG_ENCODERS
|
1340 |
#define ADPCM_ENCODER(id,name) \
|
1341 |
AVCodec name ## _encoder = { \ |
1342 |
#name, \
|
1343 |
CODEC_TYPE_AUDIO, \ |
1344 |
id, \ |
1345 |
sizeof(ADPCMContext), \
|
1346 |
adpcm_encode_init, \ |
1347 |
adpcm_encode_frame, \ |
1348 |
adpcm_encode_close, \ |
1349 |
NULL, \
|
1350 |
}; |
1351 |
#else
|
1352 |
#define ADPCM_ENCODER(id,name)
|
1353 |
#endif
|
1354 |
|
1355 |
#ifdef CONFIG_DECODERS
|
1356 |
#define ADPCM_DECODER(id,name) \
|
1357 |
AVCodec name ## _decoder = { \ |
1358 |
#name, \
|
1359 |
CODEC_TYPE_AUDIO, \ |
1360 |
id, \ |
1361 |
sizeof(ADPCMContext), \
|
1362 |
adpcm_decode_init, \ |
1363 |
NULL, \
|
1364 |
NULL, \
|
1365 |
adpcm_decode_frame, \ |
1366 |
}; |
1367 |
#else
|
1368 |
#define ADPCM_DECODER(id,name)
|
1369 |
#endif
|
1370 |
|
1371 |
#define ADPCM_CODEC(id, name) \
|
1372 |
ADPCM_ENCODER(id,name) ADPCM_DECODER(id,name) |
1373 |
|
1374 |
ADPCM_CODEC(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt); |
1375 |
ADPCM_CODEC(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav); |
1376 |
ADPCM_CODEC(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3); |
1377 |
ADPCM_CODEC(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4); |
1378 |
ADPCM_CODEC(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws); |
1379 |
ADPCM_CODEC(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg); |
1380 |
ADPCM_CODEC(CODEC_ID_ADPCM_MS, adpcm_ms); |
1381 |
ADPCM_CODEC(CODEC_ID_ADPCM_4XM, adpcm_4xm); |
1382 |
ADPCM_CODEC(CODEC_ID_ADPCM_XA, adpcm_xa); |
1383 |
ADPCM_CODEC(CODEC_ID_ADPCM_EA, adpcm_ea); |
1384 |
ADPCM_CODEC(CODEC_ID_ADPCM_CT, adpcm_ct); |
1385 |
ADPCM_CODEC(CODEC_ID_ADPCM_SWF, adpcm_swf); |
1386 |
ADPCM_CODEC(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha); |
1387 |
ADPCM_CODEC(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4); |
1388 |
ADPCM_CODEC(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3); |
1389 |
ADPCM_CODEC(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2); |
1390 |
|
1391 |
#undef ADPCM_CODEC
|