Revision 21e681ba libavcodec/g722.c

View differences:

libavcodec/g722.c
1 1
/*
2
 * G.722 ADPCM audio decoder
2
 * G.722 ADPCM audio encoder/decoder
3 3
 *
4 4
 * Copyright (c) CMU 1993 Computer Science, Speech Group
5 5
 *                        Chengxiang Lu and Alex Hauptmann
......
219 219
    return 0;
220 220
}
221 221

  
222
#if CONFIG_ADPCM_G722_DECODER
222 223
static const int16_t low_inv_quant5[32] = {
223 224
     -35,   -35, -2919, -2195, -1765, -1458, -1219, -1023,
224 225
    -858,  -714,  -587,  -473,  -370,  -276,  -190,  -110,
......
301 302
    .long_name      = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
302 303
    .max_lowres     = 1,
303 304
};
305
#endif
306

  
307
#if CONFIG_ADPCM_G722_ENCODER
308
static const int16_t low_quant[33] = {
309
      35,   72,  110,  150,  190,  233,  276,  323,
310
     370,  422,  473,  530,  587,  650,  714,  786,
311
     858,  940, 1023, 1121, 1219, 1339, 1458, 1612,
312
    1765, 1980, 2195, 2557, 2919
313
};
314

  
315
static inline void filter_samples(G722Context *c, const int16_t *samples,
316
                                  int *xlow, int *xhigh)
317
{
318
    int xout1, xout2;
319
    c->prev_samples[c->prev_samples_pos++] = samples[0];
320
    c->prev_samples[c->prev_samples_pos++] = samples[1];
321
    apply_qmf(c->prev_samples + c->prev_samples_pos - 24, &xout1, &xout2);
322
    *xlow  = xout1 + xout2 >> 13;
323
    *xhigh = xout1 - xout2 >> 13;
324
    if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
325
        memmove(c->prev_samples,
326
                c->prev_samples + c->prev_samples_pos - 22,
327
                22 * sizeof(c->prev_samples[0]));
328
        c->prev_samples_pos = 22;
329
    }
330
}
331

  
332
static inline int encode_high(const struct G722Band *state, int xhigh)
333
{
334
    int diff = av_clip_int16(xhigh - state->s_predictor);
335
    int pred = 141 * state->scale_factor >> 8;
336
           /* = diff >= 0 ? (diff < pred) + 2 : diff >= -pred */
337
    return ((diff ^ (diff >> (sizeof(diff)*8-1))) < pred) + 2*(diff >= 0);
338
}
339

  
340
static inline int encode_low(const struct G722Band* state, int xlow)
341
{
342
    int diff  = av_clip_int16(xlow - state->s_predictor);
343
           /* = diff >= 0 ? diff : -(diff + 1) */
344
    int limit = diff ^ (diff >> (sizeof(diff)*8-1));
345
    int i = 0;
346
    limit = limit + 1 << 10;
347
    if (limit > low_quant[8] * state->scale_factor)
348
        i = 9;
349
    while (i < 29 && limit > low_quant[i] * state->scale_factor)
350
        i++;
351
    return (diff < 0 ? (i < 2 ? 63 : 33) : 61) - i;
352
}
353

  
354
static int g722_encode_frame(AVCodecContext *avctx,
355
                             uint8_t *dst, int buf_size, void *data)
356
{
357
    G722Context *c = avctx->priv_data;
358
    const int16_t *samples = data;
359
    int i;
360

  
361
    for (i = 0; i < buf_size >> 1; i++) {
362
        int xlow, xhigh, ihigh, ilow;
363
        filter_samples(c, &samples[2*i], &xlow, &xhigh);
364
        ihigh = encode_high(&c->band[1], xhigh);
365
        ilow  = encode_low(&c->band[0], xlow);
366
        update_high_predictor(&c->band[1], c->band[1].scale_factor *
367
                              high_inv_quant[ihigh] >> 10, ihigh);
368
        update_low_predictor(&c->band[0], ilow >> 2);
369
        *dst++ = ihigh << 6 | ilow;
370
    }
371
    return i;
372
}
373

  
374
AVCodec adpcm_g722_encoder = {
375
    .name           = "g722",
376
    .type           = AVMEDIA_TYPE_AUDIO,
377
    .id             = CODEC_ID_ADPCM_G722,
378
    .priv_data_size = sizeof(G722Context),
379
    .init           = g722_init,
380
    .encode         = g722_encode_frame,
381
    .long_name      = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
382
    .sample_fmts    = (enum SampleFormat[]){SAMPLE_FMT_S16,SAMPLE_FMT_NONE},
383
};
384
#endif
304 385

  

Also available in: Unified diff