Revision 911e21a3

View differences:

libavcodec/ac3dec.c
994 994
        } else {
995 995
            gain *= s->dynamic_range[0];
996 996
        }
997
        for(i=0; i<256; i++) {
998
            s->transform_coeffs[ch][i] = s->fixed_coeffs[ch][i] * gain;
999
        }
997
        s->dsp.int32_to_float_fmul_scalar(s->transform_coeffs[ch], s->fixed_coeffs[ch], gain, 256);
1000 998
    }
1001 999

  
1002 1000
    /* downmix and MDCT. order depends on whether block switching is used for
libavcodec/ac3dec.h
158 158
    float mul_bias;                         ///< scaling for float_to_int16 conversion
159 159
///@}
160 160

  
161
    int fixed_coeffs[AC3_MAX_CHANNELS][AC3_MAX_COEFS];  ///> fixed-point transform coefficients
161
    DECLARE_ALIGNED_16(int, fixed_coeffs[AC3_MAX_CHANNELS][AC3_MAX_COEFS]);  ///> fixed-point transform coefficients
162 162

  
163 163
///@defgroup arrays aligned arrays
164 164
    DECLARE_ALIGNED_16(float, transform_coeffs[AC3_MAX_CHANNELS][AC3_MAX_COEFS]);   ///< transform coefficients
libavcodec/dsputil.c
3948 3948
    }
3949 3949
}
3950 3950

  
3951
static void int32_to_float_fmul_scalar_c(float *dst, const int *src, float mul, int len){
3952
    int i;
3953
    for(i=0; i<len; i++)
3954
        dst[i] = src[i] * mul;
3955
}
3956

  
3951 3957
static av_always_inline int float_to_int16_one(const float *src){
3952 3958
    int_fast32_t tmp = *(const int32_t*)src;
3953 3959
    if(tmp & 0xf0000){
......
4489 4495
    c->vector_fmul_reverse = vector_fmul_reverse_c;
4490 4496
    c->vector_fmul_add_add = ff_vector_fmul_add_add_c;
4491 4497
    c->vector_fmul_window = ff_vector_fmul_window_c;
4498
    c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_c;
4492 4499
    c->float_to_int16 = ff_float_to_int16_c;
4493 4500
    c->float_to_int16_interleave = ff_float_to_int16_interleave_c;
4494 4501
    c->add_int16 = add_int16_c;
libavcodec/dsputil.h
370 370
    void (*vector_fmul_add_add)(float *dst, const float *src0, const float *src1, const float *src2, int src3, int len, int step);
371 371
    /* assume len is a multiple of 4, and arrays are 16-byte aligned */
372 372
    void (*vector_fmul_window)(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len);
373
    /* assume len is a multiple of 8, and arrays are 16-byte aligned */
374
    void (*int32_to_float_fmul_scalar)(float *dst, const int *src, float mul, int len);
373 375

  
374 376
    /* C version: convert floats from the range [384.0,386.0] to ints in [-32768,32767]
375 377
     * simd versions: convert floats from [-32768.0,32767.0] without rescaling and arrays are 16byte aligned */
libavcodec/i386/dsputil_mmx.c
2192 2192
        ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2193 2193
}
2194 2194

  
2195
static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
2196
{
2197
    x86_reg i = -4*len;
2198
    asm volatile(
2199
        "movss  %3, %%xmm4 \n"
2200
        "shufps $0, %%xmm4, %%xmm4 \n"
2201
        "1: \n"
2202
        "cvtpi2ps   (%2,%0), %%xmm0 \n"
2203
        "cvtpi2ps  8(%2,%0), %%xmm1 \n"
2204
        "cvtpi2ps 16(%2,%0), %%xmm2 \n"
2205
        "cvtpi2ps 24(%2,%0), %%xmm3 \n"
2206
        "movlhps  %%xmm1,    %%xmm0 \n"
2207
        "movlhps  %%xmm3,    %%xmm2 \n"
2208
        "mulps    %%xmm4,    %%xmm0 \n"
2209
        "mulps    %%xmm4,    %%xmm2 \n"
2210
        "movaps   %%xmm0,   (%1,%0) \n"
2211
        "movaps   %%xmm2, 16(%1,%0) \n"
2212
        "add $32, %0 \n"
2213
        "jl 1b \n"
2214
        :"+r"(i)
2215
        :"r"(dst+len), "r"(src+len), "xm"(mul)
2216
    );
2217
}
2218

  
2219
static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
2220
{
2221
    x86_reg i = -4*len;
2222
    asm volatile(
2223
        "movss  %3, %%xmm4 \n"
2224
        "shufps $0, %%xmm4, %%xmm4 \n"
2225
        "1: \n"
2226
        "cvtdq2ps   (%2,%0), %%xmm0 \n"
2227
        "cvtdq2ps 16(%2,%0), %%xmm1 \n"
2228
        "mulps    %%xmm4,    %%xmm0 \n"
2229
        "mulps    %%xmm4,    %%xmm1 \n"
2230
        "movaps   %%xmm0,   (%1,%0) \n"
2231
        "movaps   %%xmm1, 16(%1,%0) \n"
2232
        "add $32, %0 \n"
2233
        "jl 1b \n"
2234
        :"+r"(i)
2235
        :"r"(dst+len), "r"(src+len), "xm"(mul)
2236
    );
2237
}
2238

  
2195 2239
static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
2196 2240
    // not bit-exact: pf2id uses different rounding than C and SSE
2197 2241
    asm volatile(
......
2786 2830
            c->vector_fmul_reverse = vector_fmul_reverse_sse;
2787 2831
            c->vector_fmul_add_add = vector_fmul_add_add_sse;
2788 2832
            c->vector_fmul_window = vector_fmul_window_sse;
2833
            c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
2789 2834
            c->float_to_int16 = float_to_int16_sse;
2790 2835
            c->float_to_int16_interleave = float_to_int16_interleave_sse;
2791 2836
        }
2792 2837
        if(mm_flags & MM_3DNOW)
2793 2838
            c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
2794 2839
        if(mm_flags & MM_SSE2){
2840
            c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
2795 2841
            c->float_to_int16 = float_to_int16_sse2;
2796 2842
            c->float_to_int16_interleave = float_to_int16_interleave_sse2;
2797 2843
            c->add_int16 = add_int16_sse2;

Also available in: Unified diff