Revision eb3755a5 libavutil/arm/intmath.h

View differences:

libavutil/arm/intmath.h
31 31
#if HAVE_ARMV6
32 32

  
33 33
#define FASTDIV FASTDIV
34
static inline av_const int FASTDIV(int a, int b)
34
static av_always_inline av_const int FASTDIV(int a, int b)
35 35
{
36 36
    int r, t;
37 37
    __asm__ volatile("cmp     %3, #2               \n\t"
......
43 43
}
44 44

  
45 45
#define av_clip_uint8 av_clip_uint8_arm
46
static inline av_const uint8_t av_clip_uint8_arm(int a)
46
static av_always_inline av_const uint8_t av_clip_uint8_arm(int a)
47 47
{
48 48
    unsigned x;
49 49
    __asm__ volatile ("usat %0, #8,  %1" : "=r"(x) : "r"(a));
......
51 51
}
52 52

  
53 53
#define av_clip_int8 av_clip_int8_arm
54
static inline av_const uint8_t av_clip_int8_arm(int a)
54
static av_always_inline av_const uint8_t av_clip_int8_arm(int a)
55 55
{
56 56
    unsigned x;
57 57
    __asm__ volatile ("ssat %0, #8,  %1" : "=r"(x) : "r"(a));
......
59 59
}
60 60

  
61 61
#define av_clip_uint16 av_clip_uint16_arm
62
static inline av_const uint16_t av_clip_uint16_arm(int a)
62
static av_always_inline av_const uint16_t av_clip_uint16_arm(int a)
63 63
{
64 64
    unsigned x;
65 65
    __asm__ volatile ("usat %0, #16, %1" : "=r"(x) : "r"(a));
......
67 67
}
68 68

  
69 69
#define av_clip_int16 av_clip_int16_arm
70
static inline av_const int16_t av_clip_int16_arm(int a)
70
static av_always_inline av_const int16_t av_clip_int16_arm(int a)
71 71
{
72 72
    int x;
73 73
    __asm__ volatile ("ssat %0, #16, %1" : "=r"(x) : "r"(a));
......
77 77
#else /* HAVE_ARMV6 */
78 78

  
79 79
#define FASTDIV FASTDIV
80
static inline av_const int FASTDIV(int a, int b)
80
static av_always_inline av_const int FASTDIV(int a, int b)
81 81
{
82 82
    int r, t;
83 83
    __asm__ volatile("umull %1, %0, %2, %3"
......
88 88
#endif /* HAVE_ARMV6 */
89 89

  
90 90
#define av_clipl_int32 av_clipl_int32_arm
91
static inline av_const int32_t av_clipl_int32_arm(int64_t a)
91
static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a)
92 92
{
93 93
    int x, y;
94 94
    __asm__ volatile ("adds   %1, %R2, %Q2, lsr #31  \n\t"

Also available in: Unified diff