Statistics
| Branch: | Revision:

ffmpeg / libavutil / arm / intmath.h @ eb3755a5

History | View | Annotate | Download (2.88 KB)

1
/*
2
 * Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
3
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20

    
21
#ifndef AVUTIL_ARM_INTMATH_H
22
#define AVUTIL_ARM_INTMATH_H
23

    
24
#include <stdint.h>
25

    
26
#include "config.h"
27
#include "libavutil/attributes.h"
28

    
29
#if HAVE_INLINE_ASM
30

    
31
#if HAVE_ARMV6
32

    
33
#define FASTDIV FASTDIV
34
static av_always_inline av_const int FASTDIV(int a, int b)
35
{
36
    int r, t;
37
    __asm__ volatile("cmp     %3, #2               \n\t"
38
                     "ldr     %1, [%4, %3, lsl #2] \n\t"
39
                     "lsrle   %0, %2, #1           \n\t"
40
                     "smmulgt %0, %1, %2           \n\t"
41
                     : "=&r"(r), "=&r"(t) : "r"(a), "r"(b), "r"(ff_inverse));
42
    return r;
43
}
44

    
45
#define av_clip_uint8 av_clip_uint8_arm
46
static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
47
{
48
    unsigned x;
49
    __asm__ volatile ("usat %0, #8,  %1" : "=r"(x) : "r"(a));
50
    return x;
51
}
52

    
53
#define av_clip_int8 av_clip_int8_arm
54
static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
55
{
56
    unsigned x;
57
    __asm__ volatile ("ssat %0, #8,  %1" : "=r"(x) : "r"(a));
58
    return x;
59
}
60

    
61
#define av_clip_uint16 av_clip_uint16_arm
62
static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
63
{
64
    unsigned x;
65
    __asm__ volatile ("usat %0, #16, %1" : "=r"(x) : "r"(a));
66
    return x;
67
}
68

    
69
#define av_clip_int16 av_clip_int16_arm
70
static av_always_inline av_const int16_t av_clip_int16_arm(int a)
71
{
72
    int x;
73
    __asm__ volatile ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
74
    return x;
75
}
76

    
77
#else /* HAVE_ARMV6 */
78

    
79
#define FASTDIV FASTDIV
80
static av_always_inline av_const int FASTDIV(int a, int b)
81
{
82
    int r, t;
83
    __asm__ volatile("umull %1, %0, %2, %3"
84
                     : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b]));
85
    return r;
86
}
87

    
88
#endif /* HAVE_ARMV6 */
89

    
90
#define av_clipl_int32 av_clipl_int32_arm
91
static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
92
{
93
    int x, y;
94
    __asm__ volatile ("adds   %1, %R2, %Q2, lsr #31  \n\t"
95
                      "mvnne  %1, #1<<31             \n\t"
96
                      "eorne  %0, %1,  %R2, asr #31  \n\t"
97
                      : "=r"(x), "=&r"(y) : "r"(a));
98
    return x;
99
}
100

    
101
#endif /* HAVE_INLINE_ASM */
102

    
103
#endif /* AVUTIL_ARM_INTMATH_H */