Statistics
| Branch: | Revision:

ffmpeg / libavcodec / x86 / x86inc.asm @ 3f87f39c

History | View | Annotate | Download (15.3 KB)

1
;*****************************************************************************
2
;* x86inc.asm
3
;*****************************************************************************
4
;* Copyright (C) 2005-2008 Loren Merritt <lorenm@u.washington.edu>
5
;*
6
;* This file is part of FFmpeg.
7
;*
8
;* FFmpeg is free software; you can redistribute it and/or
9
;* modify it under the terms of the GNU Lesser General Public
10
;* License as published by the Free Software Foundation; either
11
;* version 2.1 of the License, or (at your option) any later version.
12
;*
13
;* FFmpeg is distributed in the hope that it will be useful,
14
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
;* Lesser General Public License for more details.
17
;*
18
;* You should have received a copy of the GNU Lesser General Public
19
;* License along with FFmpeg; if not, write to the Free Software
20
;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
;*****************************************************************************
22

    
23
%ifdef ARCH_X86_64
24
    %ifidn __OUTPUT_FORMAT__,win32
25
        %define WIN64
26
    %else
27
        %define UNIX64
28
    %endif
29
%endif
30

    
31
; FIXME: All of the 64bit asm functions that take a stride as an argument
32
; via register, assume that the high dword of that register is filled with 0.
33
; This is true in practice (since we never do any 64bit arithmetic on strides,
34
; and x264's strides are all positive), but is not guaranteed by the ABI.
35

    
36
; Name of the .rodata section.
37
; Kludge: Something on OS X fails to align .rodata even given an align attribute,
38
; so use a different read-only section.
39
%macro SECTION_RODATA 0-1 16
40
    %ifidn __OUTPUT_FORMAT__,macho64
41
        SECTION .text align=%1
42
    %elifidn __OUTPUT_FORMAT__,macho
43
        SECTION .text align=%1
44
        fakegot:
45
    %else
46
        SECTION .rodata align=%1
47
    %endif
48
%endmacro
49

    
50
; PIC support macros.
51
; x86_64 can't fit 64bit address literals in most instruction types,
52
; so shared objects (under the assumption that they might be anywhere
53
; in memory) must use an address mode that does fit.
54
; So all accesses to global variables must use this macro, e.g.
55
;     mov eax, [foo GLOBAL]
56
;   instead of
57
;     mov eax, [foo]
58
;
59
; x86_32 doesn't require PIC.
60
; Some distros prefer shared objects to be PIC, but nothing breaks if
61
; the code contains a few textrels, so we'll skip that complexity.
62

    
63
%ifdef WIN64
64
    %define PIC
65
%elifndef ARCH_X86_64
66
    %undef PIC
67
%endif
68
%ifdef PIC
69
    %define GLOBAL wrt rip
70
%else
71
    %define GLOBAL
72
%endif
73

    
74
; Macros to eliminate most code duplication between x86_32 and x86_64:
75
; Currently this works only for leaf functions which load all their arguments
76
; into registers at the start, and make no other use of the stack. Luckily that
77
; covers most of x264's asm.
78

    
79
; PROLOGUE:
80
; %1 = number of arguments. loads them from stack if needed.
81
; %2 = number of registers used. pushes callee-saved regs if needed.
82
; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
83
; %4 = list of names to define to registers
84
; PROLOGUE can also be invoked by adding the same options to cglobal
85

    
86
; e.g.
87
; cglobal foo, 2,3, dst, src, tmp
88
; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
89

    
90
; TODO Some functions can use some args directly from the stack. If they're the
91
; last args then you can just not declare them, but if they're in the middle
92
; we need more flexible macro.
93

    
94
; RET:
95
; Pops anything that was pushed by PROLOGUE
96

    
97
; REP_RET:
98
; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
99
; which are slow when a normal ret follows a branch.
100

    
101
; registers:
102
; rN and rNq are the native-size register holding function argument N
103
; rNd, rNw, rNb are dword, word, and byte size
104
; rNm is the original location of arg N (a register or on the stack), dword
105
; rNmp is native size
106

    
107
%macro DECLARE_REG 6
108
    %define r%1q %2
109
    %define r%1d %3
110
    %define r%1w %4
111
    %define r%1b %5
112
    %define r%1m %6
113
    %ifid %6 ; i.e. it's a register
114
        %define r%1mp %2
115
    %elifdef ARCH_X86_64 ; memory
116
        %define r%1mp qword %6
117
    %else
118
        %define r%1mp dword %6
119
    %endif
120
    %define r%1  %2
121
%endmacro
122

    
123
%macro DECLARE_REG_SIZE 2
124
    %define r%1q r%1
125
    %define e%1q r%1
126
    %define r%1d e%1
127
    %define e%1d e%1
128
    %define r%1w %1
129
    %define e%1w %1
130
    %define r%1b %2
131
    %define e%1b %2
132
%ifndef ARCH_X86_64
133
    %define r%1  e%1
134
%endif
135
%endmacro
136

    
137
DECLARE_REG_SIZE ax, al
138
DECLARE_REG_SIZE bx, bl
139
DECLARE_REG_SIZE cx, cl
140
DECLARE_REG_SIZE dx, dl
141
DECLARE_REG_SIZE si, sil
142
DECLARE_REG_SIZE di, dil
143
DECLARE_REG_SIZE bp, bpl
144

    
145
; t# defines for when per-arch register allocation is more complex than just function arguments
146

    
147
%macro DECLARE_REG_TMP 1-*
148
    %assign %%i 0
149
    %rep %0
150
        CAT_XDEFINE t, %%i, r%1
151
        %assign %%i %%i+1
152
        %rotate 1
153
    %endrep
154
%endmacro
155

    
156
%macro DECLARE_REG_TMP_SIZE 0-*
157
    %rep %0
158
        %define t%1q t%1 %+ q
159
        %define t%1d t%1 %+ d
160
        %define t%1w t%1 %+ w
161
        %define t%1b t%1 %+ b
162
        %rotate 1
163
    %endrep
164
%endmacro
165

    
166
DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7
167

    
168
%ifdef ARCH_X86_64
169
    %define gprsize 8
170
%else
171
    %define gprsize 4
172
%endif
173

    
174
%macro PUSH 1
175
    push %1
176
    %assign stack_offset stack_offset+gprsize
177
%endmacro
178

    
179
%macro POP 1
180
    pop %1
181
    %assign stack_offset stack_offset-gprsize
182
%endmacro
183

    
184
%macro SUB 2
185
    sub %1, %2
186
    %ifidn %1, rsp
187
        %assign stack_offset stack_offset+(%2)
188
    %endif
189
%endmacro
190

    
191
%macro ADD 2
192
    add %1, %2
193
    %ifidn %1, rsp
194
        %assign stack_offset stack_offset-(%2)
195
    %endif
196
%endmacro
197

    
198
%macro movifnidn 2
199
    %ifnidn %1, %2
200
        mov %1, %2
201
    %endif
202
%endmacro
203

    
204
%macro movsxdifnidn 2
205
    %ifnidn %1, %2
206
        movsxd %1, %2
207
    %endif
208
%endmacro
209

    
210
%macro ASSERT 1
211
    %if (%1) == 0
212
        %error assert failed
213
    %endif
214
%endmacro
215

    
216
%macro DEFINE_ARGS 0-*
217
    %ifdef n_arg_names
218
        %assign %%i 0
219
        %rep n_arg_names
220
            CAT_UNDEF arg_name %+ %%i, q
221
            CAT_UNDEF arg_name %+ %%i, d
222
            CAT_UNDEF arg_name %+ %%i, w
223
            CAT_UNDEF arg_name %+ %%i, b
224
            CAT_UNDEF arg_name, %%i
225
            %assign %%i %%i+1
226
        %endrep
227
    %endif
228

    
229
    %assign %%i 0
230
    %rep %0
231
        %xdefine %1q r %+ %%i %+ q
232
        %xdefine %1d r %+ %%i %+ d
233
        %xdefine %1w r %+ %%i %+ w
234
        %xdefine %1b r %+ %%i %+ b
235
        CAT_XDEFINE arg_name, %%i, %1
236
        %assign %%i %%i+1
237
        %rotate 1
238
    %endrep
239
    %assign n_arg_names %%i
240
%endmacro
241

    
242
%ifdef WIN64 ; Windows x64 ;=================================================
243

    
244
DECLARE_REG 0, rcx, ecx, cx,  cl,  ecx
245
DECLARE_REG 1, rdx, edx, dx,  dl,  edx
246
DECLARE_REG 2, r8,  r8d, r8w, r8b, r8d
247
DECLARE_REG 3, r9,  r9d, r9w, r9b, r9d
248
DECLARE_REG 4, rdi, edi, di,  dil, [rsp + stack_offset + 40]
249
DECLARE_REG 5, rsi, esi, si,  sil, [rsp + stack_offset + 48]
250
DECLARE_REG 6, rax, eax, ax,  al,  [rsp + stack_offset + 56]
251
%define r7m [rsp + stack_offset + 64]
252
%define r8m [rsp + stack_offset + 72]
253

    
254
%macro LOAD_IF_USED 2 ; reg_id, number_of_args
255
    %if %1 < %2
256
        mov r%1, [rsp + stack_offset + 8 + %1*8]
257
    %endif
258
%endmacro
259

    
260
%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
261
    ASSERT %2 >= %1
262
    %assign regs_used %2
263
    ASSERT regs_used <= 7
264
    %if %0 > 2
265
        %assign xmm_regs_used %3
266
    %else
267
        %assign xmm_regs_used 0
268
    %endif
269
    ASSERT xmm_regs_used <= 16
270
    %if regs_used > 4
271
        push r4
272
        push r5
273
        %assign stack_offset stack_offset+16
274
    %endif
275
    %if xmm_regs_used > 6
276
        sub rsp, (xmm_regs_used-6)*16+16
277
        %assign stack_offset stack_offset+(xmm_regs_used-6)*16+16
278
        %assign %%i xmm_regs_used
279
        %rep (xmm_regs_used-6)
280
            %assign %%i %%i-1
281
            movdqa [rsp + (%%i-6)*16+8], xmm %+ %%i
282
        %endrep
283
    %endif
284
    LOAD_IF_USED 4, %1
285
    LOAD_IF_USED 5, %1
286
    LOAD_IF_USED 6, %1
287
    DEFINE_ARGS %4
288
%endmacro
289

    
290
%macro RESTORE_XMM_INTERNAL 1
291
    %if xmm_regs_used > 6
292
        %assign %%i xmm_regs_used
293
        %rep (xmm_regs_used-6)
294
            %assign %%i %%i-1
295
            movdqa xmm %+ %%i, [%1 + (%%i-6)*16+8]
296
        %endrep
297
        add %1, (xmm_regs_used-6)*16+16
298
    %endif
299
%endmacro
300

    
301
%macro RESTORE_XMM 1
302
    RESTORE_XMM_INTERNAL %1
303
    %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
304
    %assign xmm_regs_used 0
305
%endmacro
306

    
307
%macro RET 0
308
    RESTORE_XMM_INTERNAL rsp
309
    %if regs_used > 4
310
        pop r5
311
        pop r4
312
    %endif
313
    ret
314
%endmacro
315

    
316
%macro REP_RET 0
317
    %if regs_used > 4 || xmm_regs_used > 6
318
        RET
319
    %else
320
        rep ret
321
    %endif
322
%endmacro
323

    
324
%elifdef ARCH_X86_64 ; *nix x64 ;=============================================
325

    
326
DECLARE_REG 0, rdi, edi, di,  dil, edi
327
DECLARE_REG 1, rsi, esi, si,  sil, esi
328
DECLARE_REG 2, rdx, edx, dx,  dl,  edx
329
DECLARE_REG 3, rcx, ecx, cx,  cl,  ecx
330
DECLARE_REG 4, r8,  r8d, r8w, r8b, r8d
331
DECLARE_REG 5, r9,  r9d, r9w, r9b, r9d
332
DECLARE_REG 6, rax, eax, ax,  al,  [rsp + stack_offset + 8]
333
%define r7m [rsp + stack_offset + 16]
334
%define r8m [rsp + stack_offset + 24]
335

    
336
%macro LOAD_IF_USED 2 ; reg_id, number_of_args
337
    %if %1 < %2
338
        mov r%1, [rsp - 40 + %1*8]
339
    %endif
340
%endmacro
341

    
342
%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
343
    ASSERT %2 >= %1
344
    ASSERT %2 <= 7
345
    LOAD_IF_USED 6, %1
346
    DEFINE_ARGS %4
347
%endmacro
348

    
349
%macro RET 0
350
    ret
351
%endmacro
352

    
353
%macro REP_RET 0
354
    rep ret
355
%endmacro
356

    
357
%else ; X86_32 ;==============================================================
358

    
359
DECLARE_REG 0, eax, eax, ax, al,   [esp + stack_offset + 4]
360
DECLARE_REG 1, ecx, ecx, cx, cl,   [esp + stack_offset + 8]
361
DECLARE_REG 2, edx, edx, dx, dl,   [esp + stack_offset + 12]
362
DECLARE_REG 3, ebx, ebx, bx, bl,   [esp + stack_offset + 16]
363
DECLARE_REG 4, esi, esi, si, null, [esp + stack_offset + 20]
364
DECLARE_REG 5, edi, edi, di, null, [esp + stack_offset + 24]
365
DECLARE_REG 6, ebp, ebp, bp, null, [esp + stack_offset + 28]
366
%define r7m [esp + stack_offset + 32]
367
%define r8m [esp + stack_offset + 36]
368
%define rsp esp
369

    
370
%macro PUSH_IF_USED 1 ; reg_id
371
    %if %1 < regs_used
372
        push r%1
373
        %assign stack_offset stack_offset+4
374
    %endif
375
%endmacro
376

    
377
%macro POP_IF_USED 1 ; reg_id
378
    %if %1 < regs_used
379
        pop r%1
380
    %endif
381
%endmacro
382

    
383
%macro LOAD_IF_USED 2 ; reg_id, number_of_args
384
    %if %1 < %2
385
        mov r%1, [esp + stack_offset + 4 + %1*4]
386
    %endif
387
%endmacro
388

    
389
%macro PROLOGUE 2-4+ ; #args, #regs, arg_names...
390
    ASSERT %2 >= %1
391
    %assign regs_used %2
392
    ASSERT regs_used <= 7
393
    PUSH_IF_USED 3
394
    PUSH_IF_USED 4
395
    PUSH_IF_USED 5
396
    PUSH_IF_USED 6
397
    LOAD_IF_USED 0, %1
398
    LOAD_IF_USED 1, %1
399
    LOAD_IF_USED 2, %1
400
    LOAD_IF_USED 3, %1
401
    LOAD_IF_USED 4, %1
402
    LOAD_IF_USED 5, %1
403
    LOAD_IF_USED 6, %1
404
    DEFINE_ARGS %4
405
%endmacro
406

    
407
%macro RET 0
408
    POP_IF_USED 6
409
    POP_IF_USED 5
410
    POP_IF_USED 4
411
    POP_IF_USED 3
412
    ret
413
%endmacro
414

    
415
%macro REP_RET 0
416
    %if regs_used > 3
417
        RET
418
    %else
419
        rep ret
420
    %endif
421
%endmacro
422

    
423
%endif ;======================================================================
424

    
425

    
426

    
427
;=============================================================================
428
; arch-independent part
429
;=============================================================================
430

    
431
%assign function_align 16
432

    
433
; Symbol prefix for C linkage
434
%macro cglobal 1-2+
435
    %xdefine %1 ff_%1
436
    %ifdef PREFIX
437
        %xdefine %1 _ %+ %1
438
    %endif
439
    %ifidn __OUTPUT_FORMAT__,elf
440
        global %1:function hidden
441
    %else
442
        global %1
443
    %endif
444
    align function_align
445
    %1:
446
    RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
447
    %assign stack_offset 0
448
    %if %0 > 1
449
        PROLOGUE %2
450
    %endif
451
%endmacro
452

    
453
%macro cextern 1
454
    %ifdef PREFIX
455
        %xdefine %1 _%1
456
    %endif
457
    extern %1
458
%endmacro
459

    
460
; This is needed for ELF, otherwise the GNU linker assumes the stack is
461
; executable by default.
462
%ifidn __OUTPUT_FORMAT__,elf
463
SECTION .note.GNU-stack noalloc noexec nowrite progbits
464
%endif
465

    
466
%assign FENC_STRIDE 16
467
%assign FDEC_STRIDE 32
468

    
469
; merge mmx and sse*
470

    
471
%macro CAT_XDEFINE 3
472
    %xdefine %1%2 %3
473
%endmacro
474

    
475
%macro CAT_UNDEF 2
476
    %undef %1%2
477
%endmacro
478

    
479
%macro INIT_MMX 0
480
    %define RESET_MM_PERMUTATION INIT_MMX
481
    %define mmsize 8
482
    %define num_mmregs 8
483
    %define mova movq
484
    %define movu movq
485
    %define movh movd
486
    %define movnt movntq
487
    %assign %%i 0
488
    %rep 8
489
    CAT_XDEFINE m, %%i, mm %+ %%i
490
    CAT_XDEFINE nmm, %%i, %%i
491
    %assign %%i %%i+1
492
    %endrep
493
    %rep 8
494
    CAT_UNDEF m, %%i
495
    CAT_UNDEF nmm, %%i
496
    %assign %%i %%i+1
497
    %endrep
498
%endmacro
499

    
500
%macro INIT_XMM 0
501
    %define RESET_MM_PERMUTATION INIT_XMM
502
    %define mmsize 16
503
    %define num_mmregs 8
504
    %ifdef ARCH_X86_64
505
    %define num_mmregs 16
506
    %endif
507
    %define mova movdqa
508
    %define movu movdqu
509
    %define movh movq
510
    %define movnt movntdq
511
    %assign %%i 0
512
    %rep num_mmregs
513
    CAT_XDEFINE m, %%i, xmm %+ %%i
514
    CAT_XDEFINE nxmm, %%i, %%i
515
    %assign %%i %%i+1
516
    %endrep
517
%endmacro
518

    
519
INIT_MMX
520

    
521
; I often want to use macros that permute their arguments. e.g. there's no
522
; efficient way to implement butterfly or transpose or dct without swapping some
523
; arguments.
524
;
525
; I would like to not have to manually keep track of the permutations:
526
; If I insert a permutation in the middle of a function, it should automatically
527
; change everything that follows. For more complex macros I may also have multiple
528
; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
529
;
530
; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
531
; permutes its arguments. It's equivalent to exchanging the contents of the
532
; registers, except that this way you exchange the register names instead, so it
533
; doesn't cost any cycles.
534

    
535
%macro PERMUTE 2-* ; takes a list of pairs to swap
536
%rep %0/2
537
    %xdefine tmp%2 m%2
538
    %xdefine ntmp%2 nm%2
539
    %rotate 2
540
%endrep
541
%rep %0/2
542
    %xdefine m%1 tmp%2
543
    %xdefine nm%1 ntmp%2
544
    %undef tmp%2
545
    %undef ntmp%2
546
    %rotate 2
547
%endrep
548
%endmacro
549

    
550
%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
551
%rep %0-1
552
%ifdef m%1
553
    %xdefine tmp m%1
554
    %xdefine m%1 m%2
555
    %xdefine m%2 tmp
556
    CAT_XDEFINE n, m%1, %1
557
    CAT_XDEFINE n, m%2, %2
558
%else
559
    ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
560
    ; Be careful using this mode in nested macros though, as in some cases there may be
561
    ; other copies of m# that have already been dereferenced and don't get updated correctly.
562
    %xdefine %%n1 n %+ %1
563
    %xdefine %%n2 n %+ %2
564
    %xdefine tmp m %+ %%n1
565
    CAT_XDEFINE m, %%n1, m %+ %%n2
566
    CAT_XDEFINE m, %%n2, tmp
567
    CAT_XDEFINE n, m %+ %%n1, %%n1
568
    CAT_XDEFINE n, m %+ %%n2, %%n2
569
%endif
570
    %undef tmp
571
    %rotate 1
572
%endrep
573
%endmacro
574

    
575
%macro SAVE_MM_PERMUTATION 1
576
    %assign %%i 0
577
    %rep num_mmregs
578
    CAT_XDEFINE %1_m, %%i, m %+ %%i
579
    %assign %%i %%i+1
580
    %endrep
581
%endmacro
582

    
583
%macro LOAD_MM_PERMUTATION 1
584
    %assign %%i 0
585
    %rep num_mmregs
586
    CAT_XDEFINE m, %%i, %1_m %+ %%i
587
    CAT_XDEFINE n, m %+ %%i, %%i
588
    %assign %%i %%i+1
589
    %endrep
590
%endmacro
591

    
592
%macro call 1
593
    call %1
594
    %ifdef %1_m0
595
        LOAD_MM_PERMUTATION %1
596
    %endif
597
%endmacro
598

    
599
;Substitutions that reduce instruction size but are functionally equivalent
600
%define movdqa movaps
601
%define movdqu movups
602

    
603
%macro add 2
604
    %ifnum %2
605
        %if %2==128
606
            sub %1, -128
607
        %else
608
            add %1, %2
609
        %endif
610
    %else
611
        add %1, %2
612
    %endif
613
%endmacro
614

    
615
%macro sub 2
616
    %ifnum %2
617
        %if %2==128
618
            add %1, -128
619
        %else
620
            sub %1, %2
621
        %endif
622
    %else
623
        sub %1, %2
624
    %endif
625
%endmacro