Statistics
| Branch: | Revision:

ffmpeg / libavcodec / ps2 / mmi.h @ 2912e87a

History | View | Annotate | Download (5.56 KB)

1
/*
2
 * copyright (c) 2002 Leon van Stuivenberg
3
 *
4
 * This file is part of Libav.
5
 *
6
 * Libav is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * Libav is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with Libav; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20

    
21
#ifndef AVCODEC_PS2_MMI_H
22
#define AVCODEC_PS2_MMI_H
23

    
24
#define align16 __attribute__ ((aligned (16)))
25

    
26
/*
27
#define r0 $zero
28
#define r1 $at          //assembler!
29
#define r2 $v0          //return
30
#define r3 $v1          //return
31
#define r4 $a0          //arg
32
#define r5 $a1          //arg
33
#define r6 $a2          //arg
34
#define r7 $a3          //arg
35
#define r8 $t0          //temp
36
#define r9 $t1          //temp
37
#define r10 $t2         //temp
38
#define r11 $t3         //temp
39
#define r12 $t4         //temp
40
#define r13 $t5         //temp
41
#define r14 $t6         //temp
42
#define r15 $t7         //temp
43
#define r16 $s0         //saved temp
44
#define r17 $s1         //saved temp
45
#define r18 $s2         //saved temp
46
#define r19 $s3         //saved temp
47
#define r20 $s4         //saved temp
48
#define r21 $s5         //saved temp
49
#define r22 $s6         //saved temp
50
#define r23 $s7         //saved temp
51
#define r24 $t8         //temp
52
#define r25 $t9         //temp
53
#define r26 $k0         //kernel
54
#define r27 $k1         //kernel
55
#define r28 $gp         //global ptr
56
#define r29 $sp         //stack ptr
57
#define r30 $fp         //frame ptr
58
#define r31 $ra         //return addr
59
*/
60

    
61

    
62
#define         lq(base, off, reg)        \
63
        __asm__ volatile ("lq " #reg ", %0("#base ")" : : "i" (off) )
64

    
65
#define         lq2(mem, reg)        \
66
        __asm__ volatile ("lq " #reg ", %0" : : "r" (mem))
67

    
68
#define         sq(reg, off, base)        \
69
        __asm__ volatile ("sq " #reg ", %0("#base ")" : : "i" (off) )
70

    
71
/*
72
#define         ld(base, off, reg)        \
73
        __asm__ volatile ("ld " #reg ", " #off "("#base ")")
74
*/
75

    
76
#define         ld3(base, off, reg)        \
77
        __asm__ volatile (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off)))
78

    
79
#define         ldr3(base, off, reg)        \
80
        __asm__ volatile (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off)))
81

    
82
#define         ldl3(base, off, reg)        \
83
        __asm__ volatile (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off)))
84

    
85
/*
86
#define         sd(reg, off, base)        \
87
        __asm__ volatile ("sd " #reg ", " #off "("#base ")")
88
*/
89
//seems assembler has bug encoding mnemonic 'sd', so DIY
90
#define         sd3(reg, off, base)        \
91
        __asm__ volatile (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off)))
92

    
93
#define         sw(reg, off, base)        \
94
        __asm__ volatile ("sw " #reg ", " #off "("#base ")")
95

    
96
#define         sq2(reg, mem)        \
97
        __asm__ volatile ("sq " #reg ", %0" : : "m" (*(mem)))
98

    
99
#define         pinth(rs, rt, rd) \
100
        __asm__ volatile ("pinth  " #rd ", " #rs ", " #rt )
101

    
102
#define         phmadh(rs, rt, rd) \
103
        __asm__ volatile ("phmadh " #rd ", " #rs ", " #rt )
104

    
105
#define         pcpyud(rs, rt, rd) \
106
        __asm__ volatile ("pcpyud " #rd ", " #rs ", " #rt )
107

    
108
#define         pcpyld(rs, rt, rd) \
109
        __asm__ volatile ("pcpyld " #rd ", " #rs ", " #rt )
110

    
111
#define         pcpyh(rt, rd) \
112
        __asm__ volatile ("pcpyh  " #rd ", " #rt )
113

    
114
#define         paddw(rs, rt, rd) \
115
        __asm__ volatile ("paddw  " #rd ", " #rs ", " #rt )
116

    
117
#define         pextlw(rs, rt, rd) \
118
        __asm__ volatile ("pextlw " #rd ", " #rs ", " #rt )
119

    
120
#define         pextuw(rs, rt, rd) \
121
        __asm__ volatile ("pextuw " #rd ", " #rs ", " #rt )
122

    
123
#define         pextlh(rs, rt, rd) \
124
        __asm__ volatile ("pextlh " #rd ", " #rs ", " #rt )
125

    
126
#define         pextuh(rs, rt, rd) \
127
        __asm__ volatile ("pextuh " #rd ", " #rs ", " #rt )
128

    
129
#define         psubw(rs, rt, rd) \
130
        __asm__ volatile ("psubw  " #rd ", " #rs ", " #rt )
131

    
132
#define         psraw(rt, sa, rd) \
133
        __asm__ volatile ("psraw  " #rd ", " #rt ", %0" : : "i"(sa) )
134

    
135
#define         ppach(rs, rt, rd) \
136
        __asm__ volatile ("ppach  " #rd ", " #rs ", " #rt )
137

    
138
#define         ppacb(rs, rt, rd) \
139
        __asm__ volatile ("ppacb  " #rd ", " #rs ", " #rt )
140

    
141
#define         prevh(rt, rd) \
142
        __asm__ volatile ("prevh  " #rd ", " #rt )
143

    
144
#define         pmulth(rs, rt, rd) \
145
        __asm__ volatile ("pmulth " #rd ", " #rs ", " #rt )
146

    
147
#define         pmaxh(rs, rt, rd) \
148
        __asm__ volatile ("pmaxh " #rd ", " #rs ", " #rt )
149

    
150
#define         pminh(rs, rt, rd) \
151
        __asm__ volatile ("pminh " #rd ", " #rs ", " #rt )
152

    
153
#define         pinteh(rs, rt, rd) \
154
        __asm__ volatile ("pinteh  " #rd ", " #rs ", " #rt )
155

    
156
#define         paddh(rs, rt, rd) \
157
        __asm__ volatile ("paddh  " #rd ", " #rs ", " #rt )
158

    
159
#define         psubh(rs, rt, rd) \
160
        __asm__ volatile ("psubh  " #rd ", " #rs ", " #rt )
161

    
162
#define         psrah(rt, sa, rd) \
163
        __asm__ volatile ("psrah  " #rd ", " #rt ", %0" : : "i"(sa) )
164

    
165
#define         pmfhl_uw(rd) \
166
        __asm__ volatile ("pmfhl.uw  " #rd)
167

    
168
#define         pextlb(rs, rt, rd) \
169
        __asm__ volatile ("pextlb  " #rd ", " #rs ", " #rt )
170

    
171
#endif /* AVCODEC_PS2_MMI_H */