Statistics
| Branch: | Revision:

ffmpeg / libavcodec / armv4l / mathops.h @ 01f54021

History | View | Annotate | Download (2.64 KB)

1
/*
2
 * simple math operations
3
 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#ifndef AVCODEC_ARMV4L_MATHOPS_H
23
#define AVCODEC_ARMV4L_MATHOPS_H
24

    
25
#include <stdint.h>
26
#include "libavutil/common.h"
27

    
28
#ifdef FRAC_BITS
29
#   define MULL(a, b) \
30
        ({  int lo, hi;\
31
         __asm__("smull %0, %1, %2, %3     \n\t"\
32
             "mov   %0, %0,     lsr %4\n\t"\
33
             "add   %1, %0, %1, lsl %5\n\t"\
34
             : "=&r"(lo), "=&r"(hi)\
35
             : "r"(b), "r"(a), "i"(FRAC_BITS), "i"(32-FRAC_BITS));\
36
         hi; })
37
#endif
38

    
39
#ifdef HAVE_ARMV6
40
static inline av_const int MULH(int a, int b)
41
{
42
    int r;
43
    __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
44
    return r;
45
}
46
#define MULH MULH
47
#else
48
#define MULH(a, b) \
49
    ({ int lo, hi;\
50
     __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));\
51
     hi; })
52
#endif
53

    
54
static inline av_const int64_t MUL64(int a, int b)
55
{
56
    union { uint64_t x; unsigned hl[2]; } x;
57
    __asm__ ("smull %0, %1, %2, %3"
58
         : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b));
59
    return x.x;
60
}
61
#define MUL64 MUL64
62

    
63
static inline av_const int64_t MAC64(int64_t d, int a, int b)
64
{
65
    union { uint64_t x; unsigned hl[2]; } x = { d };
66
    __asm__ ("smlal %0, %1, %2, %3"
67
         : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b));
68
    return x.x;
69
}
70
#define MAC64(d, a, b) ((d) = MAC64(d, a, b))
71
#define MLS64(d, a, b) MAC64(d, -(a), b)
72

    
73
#if defined(HAVE_ARMV5TE)
74

    
75
/* signed 16x16 -> 32 multiply add accumulate */
76
#   define MAC16(rt, ra, rb) \
77
        __asm__ ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
78
/* signed 16x16 -> 32 multiply */
79
#   define MUL16(ra, rb)                                                \
80
        ({ int __rt;                                                    \
81
         __asm__ ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb));  \
82
         __rt; })
83

    
84
#endif
85

    
86
#endif /* AVCODEC_ARMV4L_MATHOPS_H */