Statistics
| Branch: | Revision:

ffmpeg / libavcodec / x86 / mathops.h @ 92f441ae

History | View | Annotate | Download (2.68 KB)

1 99aed7c8 Luca Barbato
/*
2
 * simple math operations
3
 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
4
 *
5 b78e7197 Diego Biurrun
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8 99aed7c8 Luca Barbato
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10 b78e7197 Diego Biurrun
 * version 2.1 of the License, or (at your option) any later version.
11 99aed7c8 Luca Barbato
 *
12 b78e7197 Diego Biurrun
 * FFmpeg is distributed in the hope that it will be useful,
13 99aed7c8 Luca Barbato
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18 b78e7197 Diego Biurrun
 * License along with FFmpeg; if not, write to the Free Software
19 99aed7c8 Luca Barbato
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21
22 a6493a8f Diego Biurrun
#ifndef AVCODEC_X86_MATHOPS_H
23
#define AVCODEC_X86_MATHOPS_H
24 699b3f99 Måns Rullgård
25 199436b9 Aurelien Jacobs
#include "config.h"
26
#include "libavutil/common.h"
27
28 6166516d Loren Merritt
#if ARCH_X86_32
29 4deaa946 Måns Rullgård
#define MULL(ra, rb, shift) \
30 be449fca Diego Pettenò
        ({ int rt, dummy; __asm__ (\
31 99aed7c8 Luca Barbato
            "imull %3               \n\t"\
32
            "shrdl %4, %%edx, %%eax \n\t"\
33
            : "=a"(rt), "=d"(dummy)\
34 22cb6fb6 Michael Niedermayer
            : "a" ((int)(ra)), "rm" ((int)(rb)), "i"(shift));\
35 99aed7c8 Luca Barbato
         rt; })
36
37
#define MULH(ra, rb) \
38
    ({ int rt, dummy;\
39 22cb6fb6 Michael Niedermayer
     __asm__ ("imull %3\n\t" : "=d"(rt), "=a"(dummy): "a" ((int)(ra)), "rm" ((int)(rb)));\
40 99aed7c8 Luca Barbato
     rt; })
41
42
#define MUL64(ra, rb) \
43
    ({ int64_t rt;\
44 22cb6fb6 Michael Niedermayer
     __asm__ ("imull %2\n\t" : "=A"(rt) : "a" ((int)(ra)), "g" ((int)(rb)));\
45 99aed7c8 Luca Barbato
     rt; })
46 6166516d Loren Merritt
#endif
47 99aed7c8 Luca Barbato
48 199436b9 Aurelien Jacobs
#if HAVE_CMOV
49
/* median of 3 */
50
#define mid_pred mid_pred
51
static inline av_const int mid_pred(int a, int b, int c)
52
{
53
    int i=b;
54
    __asm__ volatile(
55
        "cmp    %2, %1 \n\t"
56
        "cmovg  %1, %0 \n\t"
57
        "cmovg  %2, %1 \n\t"
58
        "cmp    %3, %1 \n\t"
59
        "cmovl  %3, %1 \n\t"
60
        "cmp    %1, %0 \n\t"
61
        "cmovg  %1, %0 \n\t"
62
        :"+&r"(i), "+&r"(a)
63
        :"r"(b), "r"(c)
64
    );
65
    return i;
66
}
67
#endif
68
69 5e7dfb7d Måns Rullgård
#if HAVE_CMOV
70
#define COPY3_IF_LT(x, y, a, b, c, d)\
71
__asm__ volatile(\
72
    "cmpl  %0, %3       \n\t"\
73
    "cmovl %3, %0       \n\t"\
74
    "cmovl %4, %1       \n\t"\
75
    "cmovl %5, %2       \n\t"\
76
    : "+&r" (x), "+&r" (a), "+r" (c)\
77
    : "r" (y), "r" (b), "r" (d)\
78
);
79
#endif
80
81 5e46be96 Måns Rullgård
// avoid +32 for shift optimization (gcc should do that ...)
82
#define NEG_SSR32 NEG_SSR32
83
static inline  int32_t NEG_SSR32( int32_t a, int8_t s){
84
    __asm__ ("sarl %1, %0\n\t"
85
         : "+r" (a)
86
         : "ic" ((uint8_t)(-s))
87
    );
88
    return a;
89
}
90
91
#define NEG_USR32 NEG_USR32
92
static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
93
    __asm__ ("shrl %1, %0\n\t"
94
         : "+r" (a)
95
         : "ic" ((uint8_t)(-s))
96
    );
97
    return a;
98
}
99
100 a6493a8f Diego Biurrun
#endif /* AVCODEC_X86_MATHOPS_H */