Statistics
| Branch: | Revision:

ffmpeg / libavcodec / arm / aac.h @ 2912e87a

History | View | Annotate | Download (6.1 KB)

1
/*
2
 * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
3
 *
4
 * This file is part of Libav.
5
 *
6
 * Libav is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * Libav is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with Libav; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20

    
21
#ifndef AVCODEC_ARM_AAC_H
22
#define AVCODEC_ARM_AAC_H
23

    
24
#include "config.h"
25

    
26
#if HAVE_NEON && HAVE_INLINE_ASM
27

    
28
#define VMUL2 VMUL2
29
static inline float *VMUL2(float *dst, const float *v, unsigned idx,
30
                           const float *scale)
31
{
32
    unsigned v0, v1;
33
    __asm__ volatile ("ubfx     %0,  %4,  #0, #4      \n\t"
34
                      "ubfx     %1,  %4,  #4, #4      \n\t"
35
                      "ldr      %0,  [%3, %0, lsl #2] \n\t"
36
                      "ldr      %1,  [%3, %1, lsl #2] \n\t"
37
                      "vld1.32  {d1[]},   [%5,:32]    \n\t"
38
                      "vmov     d0,  %0,  %1          \n\t"
39
                      "vmul.f32 d0,  d0,  d1          \n\t"
40
                      "vst1.32  {d0},     [%2,:64]!   \n\t"
41
                      : "=&r"(v0), "=&r"(v1), "+r"(dst)
42
                      : "r"(v), "r"(idx), "r"(scale)
43
                      : "d0", "d1");
44
    return dst;
45
}
46

    
47
#define VMUL4 VMUL4
48
static inline float *VMUL4(float *dst, const float *v, unsigned idx,
49
                           const float *scale)
50
{
51
    unsigned v0, v1, v2, v3;
52
    __asm__ volatile ("ubfx     %0,  %6,  #0, #2      \n\t"
53
                      "ubfx     %1,  %6,  #2, #2      \n\t"
54
                      "ldr      %0,  [%5, %0, lsl #2] \n\t"
55
                      "ubfx     %2,  %6,  #4, #2      \n\t"
56
                      "ldr      %1,  [%5, %1, lsl #2] \n\t"
57
                      "ubfx     %3,  %6,  #6, #2      \n\t"
58
                      "ldr      %2,  [%5, %2, lsl #2] \n\t"
59
                      "vmov     d0,  %0,  %1          \n\t"
60
                      "ldr      %3,  [%5, %3, lsl #2] \n\t"
61
                      "vld1.32  {d2[],d3[]},[%7,:32]  \n\t"
62
                      "vmov     d1,  %2,  %3          \n\t"
63
                      "vmul.f32 q0,  q0,  q1          \n\t"
64
                      "vst1.32  {q0},     [%4,:128]!  \n\t"
65
                      : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst)
66
                      : "r"(v), "r"(idx), "r"(scale)
67
                      : "d0", "d1", "d2", "d3");
68
    return dst;
69
}
70

    
71
#define VMUL2S VMUL2S
72
static inline float *VMUL2S(float *dst, const float *v, unsigned idx,
73
                            unsigned sign, const float *scale)
74
{
75
    unsigned v0, v1, v2, v3;
76
    __asm__ volatile ("ubfx     %0,  %6,  #0, #4      \n\t"
77
                      "ubfx     %1,  %6,  #4, #4      \n\t"
78
                      "ldr      %0,  [%5, %0, lsl #2] \n\t"
79
                      "lsl      %2,  %8,  #30         \n\t"
80
                      "ldr      %1,  [%5, %1, lsl #2] \n\t"
81
                      "lsl      %3,  %8,  #31         \n\t"
82
                      "vmov     d0,  %0,  %1          \n\t"
83
                      "bic      %2,  %2,  #1<<30      \n\t"
84
                      "vld1.32  {d1[]},   [%7,:32]    \n\t"
85
                      "vmov     d2,  %2,  %3          \n\t"
86
                      "veor     d0,  d0,  d2          \n\t"
87
                      "vmul.f32 d0,  d0,  d1          \n\t"
88
                      "vst1.32  {d0},     [%4,:64]!   \n\t"
89
                      : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst)
90
                      : "r"(v), "r"(idx), "r"(scale), "r"(sign)
91
                      : "d0", "d1", "d2");
92
    return dst;
93
}
94

    
95
#define VMUL4S VMUL4S
96
static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
97
                            unsigned sign, const float *scale)
98
{
99
    unsigned v0, v1, v2, v3, nz;
100
    __asm__ volatile ("vld1.32  {d2[],d3[]},[%9,:32]  \n\t"
101
                      "ubfx     %0,  %8,  #0, #2      \n\t"
102
                      "ubfx     %1,  %8,  #2, #2      \n\t"
103
                      "ldr      %0,  [%7, %0, lsl #2] \n\t"
104
                      "ubfx     %2,  %8,  #4, #2      \n\t"
105
                      "ldr      %1,  [%7, %1, lsl #2] \n\t"
106
                      "ubfx     %3,  %8,  #6, #2      \n\t"
107
                      "ldr      %2,  [%7, %2, lsl #2] \n\t"
108
                      "vmov     d0,  %0,  %1          \n\t"
109
                      "ldr      %3,  [%7, %3, lsl #2] \n\t"
110
                      "lsr      %6,  %8,  #12         \n\t"
111
                      "rbit     %6,  %6               \n\t"
112
                      "vmov     d1,  %2,  %3          \n\t"
113
                      "lsls     %6,  %6,  #1          \n\t"
114
                      "and      %0,  %5,  #1<<31      \n\t"
115
                      "lslcs    %5,  %5,  #1          \n\t"
116
                      "lsls     %6,  %6,  #1          \n\t"
117
                      "and      %1,  %5,  #1<<31      \n\t"
118
                      "lslcs    %5,  %5,  #1          \n\t"
119
                      "lsls     %6,  %6,  #1          \n\t"
120
                      "and      %2,  %5,  #1<<31      \n\t"
121
                      "lslcs    %5,  %5,  #1          \n\t"
122
                      "vmov     d4,  %0,  %1          \n\t"
123
                      "and      %3,  %5,  #1<<31      \n\t"
124
                      "vmov     d5,  %2,  %3          \n\t"
125
                      "veor     q0,  q0,  q2          \n\t"
126
                      "vmul.f32 q0,  q0,  q1          \n\t"
127
                      "vst1.32  {q0},     [%4,:128]!  \n\t"
128
                      : "=&r"(v0), "=&r"(v1), "=&r"(v2), "=&r"(v3), "+r"(dst),
129
                        "+r"(sign), "=r"(nz)
130
                      : "r"(v), "r"(idx), "r"(scale)
131
                      : "d0", "d1", "d2", "d3", "d4", "d5");
132
    return dst;
133
}
134

    
135
#endif /* HAVE_NEON && HAVE_INLINE_ASM */
136

    
137
#endif /* AVCODEC_ARM_AAC_H */