Statistics
| Branch: | Revision:

ffmpeg / libavcodec / arm / dsputil_vfp.S @ 2912e87a

History | View | Annotate | Download (5.09 KB)

1
/*
2
 * Copyright (c) 2008 Siarhei Siamashka <ssvb@users.sourceforge.net>
3
 *
4
 * This file is part of Libav.
5
 *
6
 * Libav is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * Libav is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with Libav; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20

    
21
#include "config.h"
22
#include "asm.S"
23

    
24
        .syntax unified
25
/*
26
 * VFP is a floating point coprocessor used in some ARM cores. VFP11 has 1 cycle
27
 * throughput for almost all the instructions (except for double precision
28
 * arithmetics), but rather high latency. Latency is 4 cycles for loads and 8 cycles
29
 * for arithmetic operations. Scheduling code to avoid pipeline stalls is very
30
 * important for performance. One more interesting feature is that VFP has
31
 * independent load/store and arithmetics pipelines, so it is possible to make
32
 * them work simultaneously and get more than 1 operation per cycle. Load/store
33
 * pipeline can process 2 single precision floating point values per cycle and
34
 * supports bulk loads and stores for large sets of registers. Arithmetic operations
35
 * can be done on vectors, which allows to keep the arithmetics pipeline busy,
36
 * while the processor may issue and execute other instructions. Detailed
37
 * optimization manuals can be found at http://www.arm.com
38
 */
39

    
40
/**
41
 * ARM VFP optimized implementation of 'vector_fmul_c' function.
42
 * Assume that len is a positive number and is multiple of 8
43
 */
44
@ void ff_vector_fmul_vfp(float *dst, const float *src0, const float *src1, int len)
45
function ff_vector_fmul_vfp, export=1
46
        vpush           {d8-d15}
47
        fmrx            r12, fpscr
48
        orr             r12, r12, #(3 << 16) /* set vector size to 4 */
49
        fmxr            fpscr, r12
50

    
51
        vldmia          r1!, {s0-s3}
52
        vldmia          r2!, {s8-s11}
53
        vldmia          r1!, {s4-s7}
54
        vldmia          r2!, {s12-s15}
55
        vmul.f32        s8,  s0,  s8
56
1:
57
        subs            r3,  r3,  #16
58
        vmul.f32        s12, s4,  s12
59
        vldmiage        r1!, {s16-s19}
60
        vldmiage        r2!, {s24-s27}
61
        vldmiage        r1!, {s20-s23}
62
        vldmiage        r2!, {s28-s31}
63
        vmulge.f32      s24, s16, s24
64
        vstmia          r0!, {s8-s11}
65
        vstmia          r0!, {s12-s15}
66
        vmulge.f32      s28, s20, s28
67
        vldmiagt        r1!, {s0-s3}
68
        vldmiagt        r2!, {s8-s11}
69
        vldmiagt        r1!, {s4-s7}
70
        vldmiagt        r2!, {s12-s15}
71
        vmulge.f32      s8,  s0,  s8
72
        vstmiage        r0!, {s24-s27}
73
        vstmiage        r0!, {s28-s31}
74
        bgt             1b
75

    
76
        bic             r12, r12, #(7 << 16) /* set vector size back to 1 */
77
        fmxr            fpscr, r12
78
        vpop            {d8-d15}
79
        bx              lr
80
endfunc
81

    
82
/**
83
 * ARM VFP optimized implementation of 'vector_fmul_reverse_c' function.
84
 * Assume that len is a positive number and is multiple of 8
85
 */
86
@ void ff_vector_fmul_reverse_vfp(float *dst, const float *src0,
87
@                                 const float *src1, int len)
88
function ff_vector_fmul_reverse_vfp, export=1
89
        vpush           {d8-d15}
90
        add             r2,  r2,  r3, lsl #2
91
        vldmdb          r2!, {s0-s3}
92
        vldmia          r1!, {s8-s11}
93
        vldmdb          r2!, {s4-s7}
94
        vldmia          r1!, {s12-s15}
95
        vmul.f32        s8,  s3,  s8
96
        vmul.f32        s9,  s2,  s9
97
        vmul.f32        s10, s1,  s10
98
        vmul.f32        s11, s0,  s11
99
1:
100
        subs            r3,  r3,  #16
101
        vldmdbge        r2!, {s16-s19}
102
        vmul.f32        s12, s7,  s12
103
        vldmiage        r1!, {s24-s27}
104
        vmul.f32        s13, s6,  s13
105
        vldmdbge        r2!, {s20-s23}
106
        vmul.f32        s14, s5,  s14
107
        vldmiage        r1!, {s28-s31}
108
        vmul.f32        s15, s4,  s15
109
        vmulge.f32      s24, s19, s24
110
        vldmdbgt        r2!, {s0-s3}
111
        vmulge.f32      s25, s18, s25
112
        vstmia          r0!, {s8-s13}
113
        vmulge.f32      s26, s17, s26
114
        vldmiagt        r1!, {s8-s11}
115
        vmulge.f32      s27, s16, s27
116
        vmulge.f32      s28, s23, s28
117
        vldmdbgt        r2!, {s4-s7}
118
        vmulge.f32      s29, s22, s29
119
        vstmia          r0!, {s14-s15}
120
        vmulge.f32      s30, s21, s30
121
        vmulge.f32      s31, s20, s31
122
        vmulge.f32      s8,  s3,  s8
123
        vldmiagt        r1!, {s12-s15}
124
        vmulge.f32      s9,  s2,  s9
125
        vmulge.f32      s10, s1,  s10
126
        vstmiage        r0!, {s24-s27}
127
        vmulge.f32      s11, s0,  s11
128
        vstmiage        r0!, {s28-s31}
129
        bgt             1b
130

    
131
        vpop            {d8-d15}
132
        bx              lr
133
endfunc