Statistics
| Branch: | Revision:

ffmpeg / libavcodec / x86 / mathops.h @ 2912e87a

History | View | Annotate | Download (2.85 KB)

1
/*
2
 * simple math operations
3
 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
4
 *
5
 * This file is part of Libav.
6
 *
7
 * Libav is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * Libav is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with Libav; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#ifndef AVCODEC_X86_MATHOPS_H
23
#define AVCODEC_X86_MATHOPS_H
24

    
25
#include "config.h"
26
#include "libavutil/common.h"
27

    
28
#if ARCH_X86_32
29

    
30
#define MULL MULL
31
static av_always_inline av_const int MULL(int a, int b, unsigned shift)
32
{
33
    int rt, dummy;
34
    __asm__ (
35
        "imull %3               \n\t"
36
        "shrdl %4, %%edx, %%eax \n\t"
37
        :"=a"(rt), "=d"(dummy)
38
        :"a"(a), "rm"(b), "ci"((uint8_t)shift)
39
    );
40
    return rt;
41
}
42

    
43
#define MULH MULH
44
static av_always_inline av_const int MULH(int a, int b)
45
{
46
    int rt, dummy;
47
    __asm__ (
48
        "imull %3"
49
        :"=d"(rt), "=a"(dummy)
50
        :"a"(a), "rm"(b)
51
    );
52
    return rt;
53
}
54

    
55
#define MUL64 MUL64
56
static av_always_inline av_const int64_t MUL64(int a, int b)
57
{
58
    int64_t rt;
59
    __asm__ (
60
        "imull %2"
61
        :"=A"(rt)
62
        :"a"(a), "rm"(b)
63
    );
64
    return rt;
65
}
66

    
67
#endif /* ARCH_X86_32 */
68

    
69
#if HAVE_CMOV
70
/* median of 3 */
71
#define mid_pred mid_pred
72
static inline av_const int mid_pred(int a, int b, int c)
73
{
74
    int i=b;
75
    __asm__ volatile(
76
        "cmp    %2, %1 \n\t"
77
        "cmovg  %1, %0 \n\t"
78
        "cmovg  %2, %1 \n\t"
79
        "cmp    %3, %1 \n\t"
80
        "cmovl  %3, %1 \n\t"
81
        "cmp    %1, %0 \n\t"
82
        "cmovg  %1, %0 \n\t"
83
        :"+&r"(i), "+&r"(a)
84
        :"r"(b), "r"(c)
85
    );
86
    return i;
87
}
88
#endif
89

    
90
#if HAVE_CMOV
91
#define COPY3_IF_LT(x, y, a, b, c, d)\
92
__asm__ volatile(\
93
    "cmpl  %0, %3       \n\t"\
94
    "cmovl %3, %0       \n\t"\
95
    "cmovl %4, %1       \n\t"\
96
    "cmovl %5, %2       \n\t"\
97
    : "+&r" (x), "+&r" (a), "+r" (c)\
98
    : "r" (y), "r" (b), "r" (d)\
99
);
100
#endif
101

    
102
// avoid +32 for shift optimization (gcc should do that ...)
103
#define NEG_SSR32 NEG_SSR32
104
static inline  int32_t NEG_SSR32( int32_t a, int8_t s){
105
    __asm__ ("sarl %1, %0\n\t"
106
         : "+r" (a)
107
         : "ic" ((uint8_t)(-s))
108
    );
109
    return a;
110
}
111

    
112
#define NEG_USR32 NEG_USR32
113
static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
114
    __asm__ ("shrl %1, %0\n\t"
115
         : "+r" (a)
116
         : "ic" ((uint8_t)(-s))
117
    );
118
    return a;
119
}
120

    
121
#endif /* AVCODEC_X86_MATHOPS_H */