ffmpeg / libavcodec / arm / dsputil_init_neon.c @ 6eabb0d3
History | View | Annotate | Download (17.9 KB)
1 |
/*
|
---|---|
2 |
* ARM NEON optimised DSP functions
|
3 |
* Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
|
4 |
*
|
5 |
* This file is part of FFmpeg.
|
6 |
*
|
7 |
* FFmpeg is free software; you can redistribute it and/or
|
8 |
* modify it under the terms of the GNU Lesser General Public
|
9 |
* License as published by the Free Software Foundation; either
|
10 |
* version 2.1 of the License, or (at your option) any later version.
|
11 |
*
|
12 |
* FFmpeg is distributed in the hope that it will be useful,
|
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
* Lesser General Public License for more details.
|
16 |
*
|
17 |
* You should have received a copy of the GNU Lesser General Public
|
18 |
* License along with FFmpeg; if not, write to the Free Software
|
19 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
20 |
*/
|
21 |
|
22 |
#include <stdint.h> |
23 |
|
24 |
#include "libavcodec/avcodec.h" |
25 |
#include "libavcodec/dsputil.h" |
26 |
#include "dsputil_arm.h" |
27 |
|
28 |
void ff_simple_idct_neon(DCTELEM *data);
|
29 |
void ff_simple_idct_put_neon(uint8_t *dest, int line_size, DCTELEM *data); |
30 |
void ff_simple_idct_add_neon(uint8_t *dest, int line_size, DCTELEM *data); |
31 |
|
32 |
void ff_vp3_idct_neon(DCTELEM *data);
|
33 |
void ff_vp3_idct_put_neon(uint8_t *dest, int line_size, DCTELEM *data); |
34 |
void ff_vp3_idct_add_neon(uint8_t *dest, int line_size, DCTELEM *data); |
35 |
void ff_vp3_idct_dc_add_neon(uint8_t *dest, int line_size, const DCTELEM *data); |
36 |
|
37 |
void ff_clear_block_neon(DCTELEM *block);
|
38 |
void ff_clear_blocks_neon(DCTELEM *blocks);
|
39 |
|
40 |
void ff_put_pixels16_neon(uint8_t *, const uint8_t *, int, int); |
41 |
void ff_put_pixels16_x2_neon(uint8_t *, const uint8_t *, int, int); |
42 |
void ff_put_pixels16_y2_neon(uint8_t *, const uint8_t *, int, int); |
43 |
void ff_put_pixels16_xy2_neon(uint8_t *, const uint8_t *, int, int); |
44 |
void ff_put_pixels8_neon(uint8_t *, const uint8_t *, int, int); |
45 |
void ff_put_pixels8_x2_neon(uint8_t *, const uint8_t *, int, int); |
46 |
void ff_put_pixels8_y2_neon(uint8_t *, const uint8_t *, int, int); |
47 |
void ff_put_pixels8_xy2_neon(uint8_t *, const uint8_t *, int, int); |
48 |
void ff_put_pixels16_x2_no_rnd_neon(uint8_t *, const uint8_t *, int, int); |
49 |
void ff_put_pixels16_y2_no_rnd_neon(uint8_t *, const uint8_t *, int, int); |
50 |
void ff_put_pixels16_xy2_no_rnd_neon(uint8_t *, const uint8_t *, int, int); |
51 |
void ff_put_pixels8_x2_no_rnd_neon(uint8_t *, const uint8_t *, int, int); |
52 |
void ff_put_pixels8_y2_no_rnd_neon(uint8_t *, const uint8_t *, int, int); |
53 |
void ff_put_pixels8_xy2_no_rnd_neon(uint8_t *, const uint8_t *, int, int); |
54 |
|
55 |
void ff_avg_pixels16_neon(uint8_t *, const uint8_t *, int, int); |
56 |
void ff_avg_pixels8_neon(uint8_t *, const uint8_t *, int, int); |
57 |
|
58 |
void ff_add_pixels_clamped_neon(const DCTELEM *, uint8_t *, int); |
59 |
void ff_put_pixels_clamped_neon(const DCTELEM *, uint8_t *, int); |
60 |
void ff_put_signed_pixels_clamped_neon(const DCTELEM *, uint8_t *, int); |
61 |
|
62 |
void ff_put_h264_qpel16_mc00_neon(uint8_t *, uint8_t *, int); |
63 |
void ff_put_h264_qpel16_mc10_neon(uint8_t *, uint8_t *, int); |
64 |
void ff_put_h264_qpel16_mc20_neon(uint8_t *, uint8_t *, int); |
65 |
void ff_put_h264_qpel16_mc30_neon(uint8_t *, uint8_t *, int); |
66 |
void ff_put_h264_qpel16_mc01_neon(uint8_t *, uint8_t *, int); |
67 |
void ff_put_h264_qpel16_mc11_neon(uint8_t *, uint8_t *, int); |
68 |
void ff_put_h264_qpel16_mc21_neon(uint8_t *, uint8_t *, int); |
69 |
void ff_put_h264_qpel16_mc31_neon(uint8_t *, uint8_t *, int); |
70 |
void ff_put_h264_qpel16_mc02_neon(uint8_t *, uint8_t *, int); |
71 |
void ff_put_h264_qpel16_mc12_neon(uint8_t *, uint8_t *, int); |
72 |
void ff_put_h264_qpel16_mc22_neon(uint8_t *, uint8_t *, int); |
73 |
void ff_put_h264_qpel16_mc32_neon(uint8_t *, uint8_t *, int); |
74 |
void ff_put_h264_qpel16_mc03_neon(uint8_t *, uint8_t *, int); |
75 |
void ff_put_h264_qpel16_mc13_neon(uint8_t *, uint8_t *, int); |
76 |
void ff_put_h264_qpel16_mc23_neon(uint8_t *, uint8_t *, int); |
77 |
void ff_put_h264_qpel16_mc33_neon(uint8_t *, uint8_t *, int); |
78 |
|
79 |
void ff_put_h264_qpel8_mc00_neon(uint8_t *, uint8_t *, int); |
80 |
void ff_put_h264_qpel8_mc10_neon(uint8_t *, uint8_t *, int); |
81 |
void ff_put_h264_qpel8_mc20_neon(uint8_t *, uint8_t *, int); |
82 |
void ff_put_h264_qpel8_mc30_neon(uint8_t *, uint8_t *, int); |
83 |
void ff_put_h264_qpel8_mc01_neon(uint8_t *, uint8_t *, int); |
84 |
void ff_put_h264_qpel8_mc11_neon(uint8_t *, uint8_t *, int); |
85 |
void ff_put_h264_qpel8_mc21_neon(uint8_t *, uint8_t *, int); |
86 |
void ff_put_h264_qpel8_mc31_neon(uint8_t *, uint8_t *, int); |
87 |
void ff_put_h264_qpel8_mc02_neon(uint8_t *, uint8_t *, int); |
88 |
void ff_put_h264_qpel8_mc12_neon(uint8_t *, uint8_t *, int); |
89 |
void ff_put_h264_qpel8_mc22_neon(uint8_t *, uint8_t *, int); |
90 |
void ff_put_h264_qpel8_mc32_neon(uint8_t *, uint8_t *, int); |
91 |
void ff_put_h264_qpel8_mc03_neon(uint8_t *, uint8_t *, int); |
92 |
void ff_put_h264_qpel8_mc13_neon(uint8_t *, uint8_t *, int); |
93 |
void ff_put_h264_qpel8_mc23_neon(uint8_t *, uint8_t *, int); |
94 |
void ff_put_h264_qpel8_mc33_neon(uint8_t *, uint8_t *, int); |
95 |
|
96 |
void ff_avg_h264_qpel16_mc00_neon(uint8_t *, uint8_t *, int); |
97 |
void ff_avg_h264_qpel16_mc10_neon(uint8_t *, uint8_t *, int); |
98 |
void ff_avg_h264_qpel16_mc20_neon(uint8_t *, uint8_t *, int); |
99 |
void ff_avg_h264_qpel16_mc30_neon(uint8_t *, uint8_t *, int); |
100 |
void ff_avg_h264_qpel16_mc01_neon(uint8_t *, uint8_t *, int); |
101 |
void ff_avg_h264_qpel16_mc11_neon(uint8_t *, uint8_t *, int); |
102 |
void ff_avg_h264_qpel16_mc21_neon(uint8_t *, uint8_t *, int); |
103 |
void ff_avg_h264_qpel16_mc31_neon(uint8_t *, uint8_t *, int); |
104 |
void ff_avg_h264_qpel16_mc02_neon(uint8_t *, uint8_t *, int); |
105 |
void ff_avg_h264_qpel16_mc12_neon(uint8_t *, uint8_t *, int); |
106 |
void ff_avg_h264_qpel16_mc22_neon(uint8_t *, uint8_t *, int); |
107 |
void ff_avg_h264_qpel16_mc32_neon(uint8_t *, uint8_t *, int); |
108 |
void ff_avg_h264_qpel16_mc03_neon(uint8_t *, uint8_t *, int); |
109 |
void ff_avg_h264_qpel16_mc13_neon(uint8_t *, uint8_t *, int); |
110 |
void ff_avg_h264_qpel16_mc23_neon(uint8_t *, uint8_t *, int); |
111 |
void ff_avg_h264_qpel16_mc33_neon(uint8_t *, uint8_t *, int); |
112 |
|
113 |
void ff_avg_h264_qpel8_mc00_neon(uint8_t *, uint8_t *, int); |
114 |
void ff_avg_h264_qpel8_mc10_neon(uint8_t *, uint8_t *, int); |
115 |
void ff_avg_h264_qpel8_mc20_neon(uint8_t *, uint8_t *, int); |
116 |
void ff_avg_h264_qpel8_mc30_neon(uint8_t *, uint8_t *, int); |
117 |
void ff_avg_h264_qpel8_mc01_neon(uint8_t *, uint8_t *, int); |
118 |
void ff_avg_h264_qpel8_mc11_neon(uint8_t *, uint8_t *, int); |
119 |
void ff_avg_h264_qpel8_mc21_neon(uint8_t *, uint8_t *, int); |
120 |
void ff_avg_h264_qpel8_mc31_neon(uint8_t *, uint8_t *, int); |
121 |
void ff_avg_h264_qpel8_mc02_neon(uint8_t *, uint8_t *, int); |
122 |
void ff_avg_h264_qpel8_mc12_neon(uint8_t *, uint8_t *, int); |
123 |
void ff_avg_h264_qpel8_mc22_neon(uint8_t *, uint8_t *, int); |
124 |
void ff_avg_h264_qpel8_mc32_neon(uint8_t *, uint8_t *, int); |
125 |
void ff_avg_h264_qpel8_mc03_neon(uint8_t *, uint8_t *, int); |
126 |
void ff_avg_h264_qpel8_mc13_neon(uint8_t *, uint8_t *, int); |
127 |
void ff_avg_h264_qpel8_mc23_neon(uint8_t *, uint8_t *, int); |
128 |
void ff_avg_h264_qpel8_mc33_neon(uint8_t *, uint8_t *, int); |
129 |
|
130 |
void ff_put_h264_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int); |
131 |
void ff_put_h264_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int); |
132 |
void ff_put_h264_chroma_mc2_neon(uint8_t *, uint8_t *, int, int, int, int); |
133 |
|
134 |
void ff_avg_h264_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int); |
135 |
void ff_avg_h264_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int); |
136 |
void ff_avg_h264_chroma_mc2_neon(uint8_t *, uint8_t *, int, int, int, int); |
137 |
|
138 |
void ff_vp3_v_loop_filter_neon(uint8_t *, int, int *); |
139 |
void ff_vp3_h_loop_filter_neon(uint8_t *, int, int *); |
140 |
|
141 |
void ff_vector_fmul_neon(float *dst, const float *src0, const float *src1, int len); |
142 |
void ff_vector_fmul_window_neon(float *dst, const float *src0, |
143 |
const float *src1, const float *win, |
144 |
float add_bias, int len); |
145 |
void ff_vector_fmul_scalar_neon(float *dst, const float *src, float mul, |
146 |
int len);
|
147 |
void ff_vector_fmul_sv_scalar_2_neon(float *dst, const float *src, |
148 |
const float **vp, float mul, int len); |
149 |
void ff_vector_fmul_sv_scalar_4_neon(float *dst, const float *src, |
150 |
const float **vp, float mul, int len); |
151 |
void ff_sv_fmul_scalar_2_neon(float *dst, const float **vp, float mul, |
152 |
int len);
|
153 |
void ff_sv_fmul_scalar_4_neon(float *dst, const float **vp, float mul, |
154 |
int len);
|
155 |
void ff_butterflies_float_neon(float *v1, float *v2, int len); |
156 |
float ff_scalarproduct_float_neon(const float *v1, const float *v2, int len); |
157 |
void ff_int32_to_float_fmul_scalar_neon(float *dst, const int *src, |
158 |
float mul, int len); |
159 |
void ff_vector_fmul_reverse_neon(float *dst, const float *src0, |
160 |
const float *src1, int len); |
161 |
void ff_vector_fmul_add_neon(float *dst, const float *src0, const float *src1, |
162 |
const float *src2, int len); |
163 |
|
164 |
void ff_vector_clipf_neon(float *dst, const float *src, float min, float max, |
165 |
int len);
|
166 |
void ff_float_to_int16_neon(int16_t *, const float *, long); |
167 |
void ff_float_to_int16_interleave_neon(int16_t *, const float **, long, int); |
168 |
|
169 |
void ff_vorbis_inverse_coupling_neon(float *mag, float *ang, int blocksize); |
170 |
|
171 |
int32_t ff_scalarproduct_int16_neon(const int16_t *v1, const int16_t *v2, int len, |
172 |
int shift);
|
173 |
int32_t ff_scalarproduct_and_madd_int16_neon(int16_t *v1, const int16_t *v2,
|
174 |
const int16_t *v3, int len, int mul); |
175 |
|
176 |
void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx)
|
177 |
{ |
178 |
if (!avctx->lowres) {
|
179 |
if (avctx->idct_algo == FF_IDCT_AUTO ||
|
180 |
avctx->idct_algo == FF_IDCT_SIMPLENEON) { |
181 |
c->idct_put = ff_simple_idct_put_neon; |
182 |
c->idct_add = ff_simple_idct_add_neon; |
183 |
c->idct = ff_simple_idct_neon; |
184 |
c->idct_permutation_type = FF_PARTTRANS_IDCT_PERM; |
185 |
} else if ((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || |
186 |
CONFIG_VP6_DECODER) && |
187 |
avctx->idct_algo == FF_IDCT_VP3) { |
188 |
c->idct_put = ff_vp3_idct_put_neon; |
189 |
c->idct_add = ff_vp3_idct_add_neon; |
190 |
c->idct = ff_vp3_idct_neon; |
191 |
c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; |
192 |
} |
193 |
} |
194 |
|
195 |
c->clear_block = ff_clear_block_neon; |
196 |
c->clear_blocks = ff_clear_blocks_neon; |
197 |
|
198 |
c->put_pixels_tab[0][0] = ff_put_pixels16_neon; |
199 |
c->put_pixels_tab[0][1] = ff_put_pixels16_x2_neon; |
200 |
c->put_pixels_tab[0][2] = ff_put_pixels16_y2_neon; |
201 |
c->put_pixels_tab[0][3] = ff_put_pixels16_xy2_neon; |
202 |
c->put_pixels_tab[1][0] = ff_put_pixels8_neon; |
203 |
c->put_pixels_tab[1][1] = ff_put_pixels8_x2_neon; |
204 |
c->put_pixels_tab[1][2] = ff_put_pixels8_y2_neon; |
205 |
c->put_pixels_tab[1][3] = ff_put_pixels8_xy2_neon; |
206 |
|
207 |
c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_neon; |
208 |
c->put_no_rnd_pixels_tab[0][1] = ff_put_pixels16_x2_no_rnd_neon; |
209 |
c->put_no_rnd_pixels_tab[0][2] = ff_put_pixels16_y2_no_rnd_neon; |
210 |
c->put_no_rnd_pixels_tab[0][3] = ff_put_pixels16_xy2_no_rnd_neon; |
211 |
c->put_no_rnd_pixels_tab[1][0] = ff_put_pixels8_neon; |
212 |
c->put_no_rnd_pixels_tab[1][1] = ff_put_pixels8_x2_no_rnd_neon; |
213 |
c->put_no_rnd_pixels_tab[1][2] = ff_put_pixels8_y2_no_rnd_neon; |
214 |
c->put_no_rnd_pixels_tab[1][3] = ff_put_pixels8_xy2_no_rnd_neon; |
215 |
|
216 |
c->avg_pixels_tab[0][0] = ff_avg_pixels16_neon; |
217 |
c->avg_pixels_tab[1][0] = ff_avg_pixels8_neon; |
218 |
|
219 |
c->add_pixels_clamped = ff_add_pixels_clamped_neon; |
220 |
c->put_pixels_clamped = ff_put_pixels_clamped_neon; |
221 |
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_neon; |
222 |
|
223 |
if (CONFIG_H264_DECODER) {
|
224 |
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_neon;
|
225 |
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_neon;
|
226 |
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_neon;
|
227 |
|
228 |
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_neon;
|
229 |
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_neon;
|
230 |
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_neon;
|
231 |
|
232 |
c->put_h264_qpel_pixels_tab[0][ 0] = ff_put_h264_qpel16_mc00_neon; |
233 |
c->put_h264_qpel_pixels_tab[0][ 1] = ff_put_h264_qpel16_mc10_neon; |
234 |
c->put_h264_qpel_pixels_tab[0][ 2] = ff_put_h264_qpel16_mc20_neon; |
235 |
c->put_h264_qpel_pixels_tab[0][ 3] = ff_put_h264_qpel16_mc30_neon; |
236 |
c->put_h264_qpel_pixels_tab[0][ 4] = ff_put_h264_qpel16_mc01_neon; |
237 |
c->put_h264_qpel_pixels_tab[0][ 5] = ff_put_h264_qpel16_mc11_neon; |
238 |
c->put_h264_qpel_pixels_tab[0][ 6] = ff_put_h264_qpel16_mc21_neon; |
239 |
c->put_h264_qpel_pixels_tab[0][ 7] = ff_put_h264_qpel16_mc31_neon; |
240 |
c->put_h264_qpel_pixels_tab[0][ 8] = ff_put_h264_qpel16_mc02_neon; |
241 |
c->put_h264_qpel_pixels_tab[0][ 9] = ff_put_h264_qpel16_mc12_neon; |
242 |
c->put_h264_qpel_pixels_tab[0][10] = ff_put_h264_qpel16_mc22_neon; |
243 |
c->put_h264_qpel_pixels_tab[0][11] = ff_put_h264_qpel16_mc32_neon; |
244 |
c->put_h264_qpel_pixels_tab[0][12] = ff_put_h264_qpel16_mc03_neon; |
245 |
c->put_h264_qpel_pixels_tab[0][13] = ff_put_h264_qpel16_mc13_neon; |
246 |
c->put_h264_qpel_pixels_tab[0][14] = ff_put_h264_qpel16_mc23_neon; |
247 |
c->put_h264_qpel_pixels_tab[0][15] = ff_put_h264_qpel16_mc33_neon; |
248 |
|
249 |
c->put_h264_qpel_pixels_tab[1][ 0] = ff_put_h264_qpel8_mc00_neon; |
250 |
c->put_h264_qpel_pixels_tab[1][ 1] = ff_put_h264_qpel8_mc10_neon; |
251 |
c->put_h264_qpel_pixels_tab[1][ 2] = ff_put_h264_qpel8_mc20_neon; |
252 |
c->put_h264_qpel_pixels_tab[1][ 3] = ff_put_h264_qpel8_mc30_neon; |
253 |
c->put_h264_qpel_pixels_tab[1][ 4] = ff_put_h264_qpel8_mc01_neon; |
254 |
c->put_h264_qpel_pixels_tab[1][ 5] = ff_put_h264_qpel8_mc11_neon; |
255 |
c->put_h264_qpel_pixels_tab[1][ 6] = ff_put_h264_qpel8_mc21_neon; |
256 |
c->put_h264_qpel_pixels_tab[1][ 7] = ff_put_h264_qpel8_mc31_neon; |
257 |
c->put_h264_qpel_pixels_tab[1][ 8] = ff_put_h264_qpel8_mc02_neon; |
258 |
c->put_h264_qpel_pixels_tab[1][ 9] = ff_put_h264_qpel8_mc12_neon; |
259 |
c->put_h264_qpel_pixels_tab[1][10] = ff_put_h264_qpel8_mc22_neon; |
260 |
c->put_h264_qpel_pixels_tab[1][11] = ff_put_h264_qpel8_mc32_neon; |
261 |
c->put_h264_qpel_pixels_tab[1][12] = ff_put_h264_qpel8_mc03_neon; |
262 |
c->put_h264_qpel_pixels_tab[1][13] = ff_put_h264_qpel8_mc13_neon; |
263 |
c->put_h264_qpel_pixels_tab[1][14] = ff_put_h264_qpel8_mc23_neon; |
264 |
c->put_h264_qpel_pixels_tab[1][15] = ff_put_h264_qpel8_mc33_neon; |
265 |
|
266 |
c->avg_h264_qpel_pixels_tab[0][ 0] = ff_avg_h264_qpel16_mc00_neon; |
267 |
c->avg_h264_qpel_pixels_tab[0][ 1] = ff_avg_h264_qpel16_mc10_neon; |
268 |
c->avg_h264_qpel_pixels_tab[0][ 2] = ff_avg_h264_qpel16_mc20_neon; |
269 |
c->avg_h264_qpel_pixels_tab[0][ 3] = ff_avg_h264_qpel16_mc30_neon; |
270 |
c->avg_h264_qpel_pixels_tab[0][ 4] = ff_avg_h264_qpel16_mc01_neon; |
271 |
c->avg_h264_qpel_pixels_tab[0][ 5] = ff_avg_h264_qpel16_mc11_neon; |
272 |
c->avg_h264_qpel_pixels_tab[0][ 6] = ff_avg_h264_qpel16_mc21_neon; |
273 |
c->avg_h264_qpel_pixels_tab[0][ 7] = ff_avg_h264_qpel16_mc31_neon; |
274 |
c->avg_h264_qpel_pixels_tab[0][ 8] = ff_avg_h264_qpel16_mc02_neon; |
275 |
c->avg_h264_qpel_pixels_tab[0][ 9] = ff_avg_h264_qpel16_mc12_neon; |
276 |
c->avg_h264_qpel_pixels_tab[0][10] = ff_avg_h264_qpel16_mc22_neon; |
277 |
c->avg_h264_qpel_pixels_tab[0][11] = ff_avg_h264_qpel16_mc32_neon; |
278 |
c->avg_h264_qpel_pixels_tab[0][12] = ff_avg_h264_qpel16_mc03_neon; |
279 |
c->avg_h264_qpel_pixels_tab[0][13] = ff_avg_h264_qpel16_mc13_neon; |
280 |
c->avg_h264_qpel_pixels_tab[0][14] = ff_avg_h264_qpel16_mc23_neon; |
281 |
c->avg_h264_qpel_pixels_tab[0][15] = ff_avg_h264_qpel16_mc33_neon; |
282 |
|
283 |
c->avg_h264_qpel_pixels_tab[1][ 0] = ff_avg_h264_qpel8_mc00_neon; |
284 |
c->avg_h264_qpel_pixels_tab[1][ 1] = ff_avg_h264_qpel8_mc10_neon; |
285 |
c->avg_h264_qpel_pixels_tab[1][ 2] = ff_avg_h264_qpel8_mc20_neon; |
286 |
c->avg_h264_qpel_pixels_tab[1][ 3] = ff_avg_h264_qpel8_mc30_neon; |
287 |
c->avg_h264_qpel_pixels_tab[1][ 4] = ff_avg_h264_qpel8_mc01_neon; |
288 |
c->avg_h264_qpel_pixels_tab[1][ 5] = ff_avg_h264_qpel8_mc11_neon; |
289 |
c->avg_h264_qpel_pixels_tab[1][ 6] = ff_avg_h264_qpel8_mc21_neon; |
290 |
c->avg_h264_qpel_pixels_tab[1][ 7] = ff_avg_h264_qpel8_mc31_neon; |
291 |
c->avg_h264_qpel_pixels_tab[1][ 8] = ff_avg_h264_qpel8_mc02_neon; |
292 |
c->avg_h264_qpel_pixels_tab[1][ 9] = ff_avg_h264_qpel8_mc12_neon; |
293 |
c->avg_h264_qpel_pixels_tab[1][10] = ff_avg_h264_qpel8_mc22_neon; |
294 |
c->avg_h264_qpel_pixels_tab[1][11] = ff_avg_h264_qpel8_mc32_neon; |
295 |
c->avg_h264_qpel_pixels_tab[1][12] = ff_avg_h264_qpel8_mc03_neon; |
296 |
c->avg_h264_qpel_pixels_tab[1][13] = ff_avg_h264_qpel8_mc13_neon; |
297 |
c->avg_h264_qpel_pixels_tab[1][14] = ff_avg_h264_qpel8_mc23_neon; |
298 |
c->avg_h264_qpel_pixels_tab[1][15] = ff_avg_h264_qpel8_mc33_neon; |
299 |
} |
300 |
|
301 |
if (CONFIG_VP3_DECODER) {
|
302 |
c->vp3_v_loop_filter = ff_vp3_v_loop_filter_neon; |
303 |
c->vp3_h_loop_filter = ff_vp3_h_loop_filter_neon; |
304 |
c->vp3_idct_dc_add = ff_vp3_idct_dc_add_neon; |
305 |
} |
306 |
|
307 |
c->vector_fmul = ff_vector_fmul_neon; |
308 |
c->vector_fmul_window = ff_vector_fmul_window_neon; |
309 |
c->vector_fmul_scalar = ff_vector_fmul_scalar_neon; |
310 |
c->butterflies_float = ff_butterflies_float_neon; |
311 |
c->scalarproduct_float = ff_scalarproduct_float_neon; |
312 |
c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_neon; |
313 |
c->vector_fmul_reverse = ff_vector_fmul_reverse_neon; |
314 |
c->vector_fmul_add = ff_vector_fmul_add_neon; |
315 |
c->vector_clipf = ff_vector_clipf_neon; |
316 |
|
317 |
c->vector_fmul_sv_scalar[0] = ff_vector_fmul_sv_scalar_2_neon;
|
318 |
c->vector_fmul_sv_scalar[1] = ff_vector_fmul_sv_scalar_4_neon;
|
319 |
|
320 |
c->sv_fmul_scalar[0] = ff_sv_fmul_scalar_2_neon;
|
321 |
c->sv_fmul_scalar[1] = ff_sv_fmul_scalar_4_neon;
|
322 |
|
323 |
if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
|
324 |
c->float_to_int16 = ff_float_to_int16_neon; |
325 |
c->float_to_int16_interleave = ff_float_to_int16_interleave_neon; |
326 |
} |
327 |
|
328 |
if (CONFIG_VORBIS_DECODER)
|
329 |
c->vorbis_inverse_coupling = ff_vorbis_inverse_coupling_neon; |
330 |
|
331 |
c->scalarproduct_int16 = ff_scalarproduct_int16_neon; |
332 |
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_neon; |
333 |
} |