Statistics
| Branch: | Revision:

ffmpeg / libavutil / intreadwrite.h @ 7918375f

History | View | Annotate | Download (14 KB)

1
/*
2
 * This file is part of FFmpeg.
3
 *
4
 * FFmpeg is free software; you can redistribute it and/or
5
 * modify it under the terms of the GNU Lesser General Public
6
 * License as published by the Free Software Foundation; either
7
 * version 2.1 of the License, or (at your option) any later version.
8
 *
9
 * FFmpeg is distributed in the hope that it will be useful,
10
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12
 * Lesser General Public License for more details.
13
 *
14
 * You should have received a copy of the GNU Lesser General Public
15
 * License along with FFmpeg; if not, write to the Free Software
16
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
 */
18

    
19
#ifndef AVUTIL_INTREADWRITE_H
20
#define AVUTIL_INTREADWRITE_H
21

    
22
#include <stdint.h>
23
#include "config.h"
24
#include "attributes.h"
25
#include "bswap.h"
26

    
27
typedef union {
28
    uint64_t u64;
29
    uint32_t u32[2];
30
    uint16_t u16[4];
31
    uint8_t  u8 [8];
32
    double   f64;
33
    float    f32[2];
34
} av_alias av_alias64;
35

    
36
typedef union {
37
    uint32_t u32;
38
    uint16_t u16[2];
39
    uint8_t  u8 [4];
40
    float    f32;
41
} av_alias av_alias32;
42

    
43
typedef union {
44
    uint16_t u16;
45
    uint8_t  u8 [2];
46
} av_alias av_alias16;
47

    
48
/*
49
 * Arch-specific headers can provide any combination of
50
 * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
51
 * Preprocessor symbols must be defined, even if these are implemented
52
 * as inline functions.
53
 */
54

    
55
#if   ARCH_ARM
56
#   include "arm/intreadwrite.h"
57
#elif ARCH_AVR32
58
#   include "avr32/intreadwrite.h"
59
#elif ARCH_MIPS
60
#   include "mips/intreadwrite.h"
61
#elif ARCH_PPC
62
#   include "ppc/intreadwrite.h"
63
#elif ARCH_TOMI
64
#   include "tomi/intreadwrite.h"
65
#elif ARCH_X86
66
#   include "x86/intreadwrite.h"
67
#endif
68

    
69
/*
70
 * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
71
 */
72

    
73
#if HAVE_BIGENDIAN
74

    
75
#   if    defined(AV_RN16) && !defined(AV_RB16)
76
#       define AV_RB16(p) AV_RN16(p)
77
#   elif !defined(AV_RN16) &&  defined(AV_RB16)
78
#       define AV_RN16(p) AV_RB16(p)
79
#   endif
80

    
81
#   if    defined(AV_WN16) && !defined(AV_WB16)
82
#       define AV_WB16(p, v) AV_WN16(p, v)
83
#   elif !defined(AV_WN16) &&  defined(AV_WB16)
84
#       define AV_WN16(p, v) AV_WB16(p, v)
85
#   endif
86

    
87
#   if    defined(AV_RN24) && !defined(AV_RB24)
88
#       define AV_RB24(p) AV_RN24(p)
89
#   elif !defined(AV_RN24) &&  defined(AV_RB24)
90
#       define AV_RN24(p) AV_RB24(p)
91
#   endif
92

    
93
#   if    defined(AV_WN24) && !defined(AV_WB24)
94
#       define AV_WB24(p, v) AV_WN24(p, v)
95
#   elif !defined(AV_WN24) &&  defined(AV_WB24)
96
#       define AV_WN24(p, v) AV_WB24(p, v)
97
#   endif
98

    
99
#   if    defined(AV_RN32) && !defined(AV_RB32)
100
#       define AV_RB32(p) AV_RN32(p)
101
#   elif !defined(AV_RN32) &&  defined(AV_RB32)
102
#       define AV_RN32(p) AV_RB32(p)
103
#   endif
104

    
105
#   if    defined(AV_WN32) && !defined(AV_WB32)
106
#       define AV_WB32(p, v) AV_WN32(p, v)
107
#   elif !defined(AV_WN32) &&  defined(AV_WB32)
108
#       define AV_WN32(p, v) AV_WB32(p, v)
109
#   endif
110

    
111
#   if    defined(AV_RN64) && !defined(AV_RB64)
112
#       define AV_RB64(p) AV_RN64(p)
113
#   elif !defined(AV_RN64) &&  defined(AV_RB64)
114
#       define AV_RN64(p) AV_RB64(p)
115
#   endif
116

    
117
#   if    defined(AV_WN64) && !defined(AV_WB64)
118
#       define AV_WB64(p, v) AV_WN64(p, v)
119
#   elif !defined(AV_WN64) &&  defined(AV_WB64)
120
#       define AV_WN64(p, v) AV_WB64(p, v)
121
#   endif
122

    
123
#else /* HAVE_BIGENDIAN */
124

    
125
#   if    defined(AV_RN16) && !defined(AV_RL16)
126
#       define AV_RL16(p) AV_RN16(p)
127
#   elif !defined(AV_RN16) &&  defined(AV_RL16)
128
#       define AV_RN16(p) AV_RL16(p)
129
#   endif
130

    
131
#   if    defined(AV_WN16) && !defined(AV_WL16)
132
#       define AV_WL16(p, v) AV_WN16(p, v)
133
#   elif !defined(AV_WN16) &&  defined(AV_WL16)
134
#       define AV_WN16(p, v) AV_WL16(p, v)
135
#   endif
136

    
137
#   if    defined(AV_RN24) && !defined(AV_RL24)
138
#       define AV_RL24(p) AV_RN24(p)
139
#   elif !defined(AV_RN24) &&  defined(AV_RL24)
140
#       define AV_RN24(p) AV_RL24(p)
141
#   endif
142

    
143
#   if    defined(AV_WN24) && !defined(AV_WL24)
144
#       define AV_WL24(p, v) AV_WN24(p, v)
145
#   elif !defined(AV_WN24) &&  defined(AV_WL24)
146
#       define AV_WN24(p, v) AV_WL24(p, v)
147
#   endif
148

    
149
#   if    defined(AV_RN32) && !defined(AV_RL32)
150
#       define AV_RL32(p) AV_RN32(p)
151
#   elif !defined(AV_RN32) &&  defined(AV_RL32)
152
#       define AV_RN32(p) AV_RL32(p)
153
#   endif
154

    
155
#   if    defined(AV_WN32) && !defined(AV_WL32)
156
#       define AV_WL32(p, v) AV_WN32(p, v)
157
#   elif !defined(AV_WN32) &&  defined(AV_WL32)
158
#       define AV_WN32(p, v) AV_WL32(p, v)
159
#   endif
160

    
161
#   if    defined(AV_RN64) && !defined(AV_RL64)
162
#       define AV_RL64(p) AV_RN64(p)
163
#   elif !defined(AV_RN64) &&  defined(AV_RL64)
164
#       define AV_RN64(p) AV_RL64(p)
165
#   endif
166

    
167
#   if    defined(AV_WN64) && !defined(AV_WL64)
168
#       define AV_WL64(p, v) AV_WN64(p, v)
169
#   elif !defined(AV_WN64) &&  defined(AV_WL64)
170
#       define AV_WN64(p, v) AV_WL64(p, v)
171
#   endif
172

    
173
#endif /* !HAVE_BIGENDIAN */
174

    
175
/*
176
 * Define AV_[RW]N helper macros to simplify definitions not provided
177
 * by per-arch headers.
178
 */
179

    
180
#if   HAVE_ATTRIBUTE_PACKED
181

    
182
union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
183
union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
184
union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
185

    
186
#   define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)
187
#   define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))
188

    
189
#elif defined(__DECC)
190

    
191
#   define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
192
#   define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
193

    
194
#elif HAVE_FAST_UNALIGNED
195

    
196
#   define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
197
#   define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
198

    
199
#else
200

    
201
#ifndef AV_RB16
202
#   define AV_RB16(x)                           \
203
    ((((const uint8_t*)(x))[0] << 8) |          \
204
      ((const uint8_t*)(x))[1])
205
#endif
206
#ifndef AV_WB16
207
#   define AV_WB16(p, d) do {                   \
208
        ((uint8_t*)(p))[1] = (d);               \
209
        ((uint8_t*)(p))[0] = (d)>>8;            \
210
    } while(0)
211
#endif
212

    
213
#ifndef AV_RL16
214
#   define AV_RL16(x)                           \
215
    ((((const uint8_t*)(x))[1] << 8) |          \
216
      ((const uint8_t*)(x))[0])
217
#endif
218
#ifndef AV_WL16
219
#   define AV_WL16(p, d) do {                   \
220
        ((uint8_t*)(p))[0] = (d);               \
221
        ((uint8_t*)(p))[1] = (d)>>8;            \
222
    } while(0)
223
#endif
224

    
225
#ifndef AV_RB32
226
#   define AV_RB32(x)                           \
227
    ((((const uint8_t*)(x))[0] << 24) |         \
228
     (((const uint8_t*)(x))[1] << 16) |         \
229
     (((const uint8_t*)(x))[2] <<  8) |         \
230
      ((const uint8_t*)(x))[3])
231
#endif
232
#ifndef AV_WB32
233
#   define AV_WB32(p, d) do {                   \
234
        ((uint8_t*)(p))[3] = (d);               \
235
        ((uint8_t*)(p))[2] = (d)>>8;            \
236
        ((uint8_t*)(p))[1] = (d)>>16;           \
237
        ((uint8_t*)(p))[0] = (d)>>24;           \
238
    } while(0)
239
#endif
240

    
241
#ifndef AV_RL32
242
#   define AV_RL32(x)                           \
243
    ((((const uint8_t*)(x))[3] << 24) |         \
244
     (((const uint8_t*)(x))[2] << 16) |         \
245
     (((const uint8_t*)(x))[1] <<  8) |         \
246
      ((const uint8_t*)(x))[0])
247
#endif
248
#ifndef AV_WL32
249
#   define AV_WL32(p, d) do {                   \
250
        ((uint8_t*)(p))[0] = (d);               \
251
        ((uint8_t*)(p))[1] = (d)>>8;            \
252
        ((uint8_t*)(p))[2] = (d)>>16;           \
253
        ((uint8_t*)(p))[3] = (d)>>24;           \
254
    } while(0)
255
#endif
256

    
257
#ifndef AV_RB64
258
#   define AV_RB64(x)                                   \
259
    (((uint64_t)((const uint8_t*)(x))[0] << 56) |       \
260
     ((uint64_t)((const uint8_t*)(x))[1] << 48) |       \
261
     ((uint64_t)((const uint8_t*)(x))[2] << 40) |       \
262
     ((uint64_t)((const uint8_t*)(x))[3] << 32) |       \
263
     ((uint64_t)((const uint8_t*)(x))[4] << 24) |       \
264
     ((uint64_t)((const uint8_t*)(x))[5] << 16) |       \
265
     ((uint64_t)((const uint8_t*)(x))[6] <<  8) |       \
266
      (uint64_t)((const uint8_t*)(x))[7])
267
#endif
268
#ifndef AV_WB64
269
#   define AV_WB64(p, d) do {                   \
270
        ((uint8_t*)(p))[7] = (d);               \
271
        ((uint8_t*)(p))[6] = (d)>>8;            \
272
        ((uint8_t*)(p))[5] = (d)>>16;           \
273
        ((uint8_t*)(p))[4] = (d)>>24;           \
274
        ((uint8_t*)(p))[3] = (d)>>32;           \
275
        ((uint8_t*)(p))[2] = (d)>>40;           \
276
        ((uint8_t*)(p))[1] = (d)>>48;           \
277
        ((uint8_t*)(p))[0] = (d)>>56;           \
278
    } while(0)
279
#endif
280

    
281
#ifndef AV_RL64
282
#   define AV_RL64(x)                                   \
283
    (((uint64_t)((const uint8_t*)(x))[7] << 56) |       \
284
     ((uint64_t)((const uint8_t*)(x))[6] << 48) |       \
285
     ((uint64_t)((const uint8_t*)(x))[5] << 40) |       \
286
     ((uint64_t)((const uint8_t*)(x))[4] << 32) |       \
287
     ((uint64_t)((const uint8_t*)(x))[3] << 24) |       \
288
     ((uint64_t)((const uint8_t*)(x))[2] << 16) |       \
289
     ((uint64_t)((const uint8_t*)(x))[1] <<  8) |       \
290
      (uint64_t)((const uint8_t*)(x))[0])
291
#endif
292
#ifndef AV_WL64
293
#   define AV_WL64(p, d) do {                   \
294
        ((uint8_t*)(p))[0] = (d);               \
295
        ((uint8_t*)(p))[1] = (d)>>8;            \
296
        ((uint8_t*)(p))[2] = (d)>>16;           \
297
        ((uint8_t*)(p))[3] = (d)>>24;           \
298
        ((uint8_t*)(p))[4] = (d)>>32;           \
299
        ((uint8_t*)(p))[5] = (d)>>40;           \
300
        ((uint8_t*)(p))[6] = (d)>>48;           \
301
        ((uint8_t*)(p))[7] = (d)>>56;           \
302
    } while(0)
303
#endif
304

    
305
#if HAVE_BIGENDIAN
306
#   define AV_RN(s, p)    AV_RB##s(p)
307
#   define AV_WN(s, p, v) AV_WB##s(p, v)
308
#else
309
#   define AV_RN(s, p)    AV_RL##s(p)
310
#   define AV_WN(s, p, v) AV_WL##s(p, v)
311
#endif
312

    
313
#endif /* HAVE_FAST_UNALIGNED */
314

    
315
#ifndef AV_RN16
316
#   define AV_RN16(p) AV_RN(16, p)
317
#endif
318

    
319
#ifndef AV_RN32
320
#   define AV_RN32(p) AV_RN(32, p)
321
#endif
322

    
323
#ifndef AV_RN64
324
#   define AV_RN64(p) AV_RN(64, p)
325
#endif
326

    
327
#ifndef AV_WN16
328
#   define AV_WN16(p, v) AV_WN(16, p, v)
329
#endif
330

    
331
#ifndef AV_WN32
332
#   define AV_WN32(p, v) AV_WN(32, p, v)
333
#endif
334

    
335
#ifndef AV_WN64
336
#   define AV_WN64(p, v) AV_WN(64, p, v)
337
#endif
338

    
339
#if HAVE_BIGENDIAN
340
#   define AV_RB(s, p)    AV_RN##s(p)
341
#   define AV_WB(s, p, v) AV_WN##s(p, v)
342
#   define AV_RL(s, p)    bswap_##s(AV_RN##s(p))
343
#   define AV_WL(s, p, v) AV_WN##s(p, bswap_##s(v))
344
#else
345
#   define AV_RB(s, p)    bswap_##s(AV_RN##s(p))
346
#   define AV_WB(s, p, v) AV_WN##s(p, bswap_##s(v))
347
#   define AV_RL(s, p)    AV_RN##s(p)
348
#   define AV_WL(s, p, v) AV_WN##s(p, v)
349
#endif
350

    
351
#define AV_RB8(x)     (((const uint8_t*)(x))[0])
352
#define AV_WB8(p, d)  do { ((uint8_t*)(p))[0] = (d); } while(0)
353

    
354
#define AV_RL8(x)     AV_RB8(x)
355
#define AV_WL8(p, d)  AV_WB8(p, d)
356

    
357
#ifndef AV_RB16
358
#   define AV_RB16(p)    AV_RB(16, p)
359
#endif
360
#ifndef AV_WB16
361
#   define AV_WB16(p, v) AV_WB(16, p, v)
362
#endif
363

    
364
#ifndef AV_RL16
365
#   define AV_RL16(p)    AV_RL(16, p)
366
#endif
367
#ifndef AV_WL16
368
#   define AV_WL16(p, v) AV_WL(16, p, v)
369
#endif
370

    
371
#ifndef AV_RB32
372
#   define AV_RB32(p)    AV_RB(32, p)
373
#endif
374
#ifndef AV_WB32
375
#   define AV_WB32(p, v) AV_WB(32, p, v)
376
#endif
377

    
378
#ifndef AV_RL32
379
#   define AV_RL32(p)    AV_RL(32, p)
380
#endif
381
#ifndef AV_WL32
382
#   define AV_WL32(p, v) AV_WL(32, p, v)
383
#endif
384

    
385
#ifndef AV_RB64
386
#   define AV_RB64(p)    AV_RB(64, p)
387
#endif
388
#ifndef AV_WB64
389
#   define AV_WB64(p, v) AV_WB(64, p, v)
390
#endif
391

    
392
#ifndef AV_RL64
393
#   define AV_RL64(p)    AV_RL(64, p)
394
#endif
395
#ifndef AV_WL64
396
#   define AV_WL64(p, v) AV_WL(64, p, v)
397
#endif
398

    
399
#ifndef AV_RB24
400
#   define AV_RB24(x)                           \
401
    ((((const uint8_t*)(x))[0] << 16) |         \
402
     (((const uint8_t*)(x))[1] <<  8) |         \
403
      ((const uint8_t*)(x))[2])
404
#endif
405
#ifndef AV_WB24
406
#   define AV_WB24(p, d) do {                   \
407
        ((uint8_t*)(p))[2] = (d);               \
408
        ((uint8_t*)(p))[1] = (d)>>8;            \
409
        ((uint8_t*)(p))[0] = (d)>>16;           \
410
    } while(0)
411
#endif
412

    
413
#ifndef AV_RL24
414
#   define AV_RL24(x)                           \
415
    ((((const uint8_t*)(x))[2] << 16) |         \
416
     (((const uint8_t*)(x))[1] <<  8) |         \
417
      ((const uint8_t*)(x))[0])
418
#endif
419
#ifndef AV_WL24
420
#   define AV_WL24(p, d) do {                   \
421
        ((uint8_t*)(p))[0] = (d);               \
422
        ((uint8_t*)(p))[1] = (d)>>8;            \
423
        ((uint8_t*)(p))[2] = (d)>>16;           \
424
    } while(0)
425
#endif
426

    
427
/*
428
 * The AV_[RW]NA macros access naturally aligned data
429
 * in a type-safe way.
430
 */
431

    
432
#define AV_RNA(s, p)    (((const av_alias##s*)(p))->u##s)
433
#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
434

    
435
#ifndef AV_RN16A
436
#   define AV_RN16A(p) AV_RNA(16, p)
437
#endif
438

    
439
#ifndef AV_RN32A
440
#   define AV_RN32A(p) AV_RNA(32, p)
441
#endif
442

    
443
#ifndef AV_RN64A
444
#   define AV_RN64A(p) AV_RNA(64, p)
445
#endif
446

    
447
#ifndef AV_WN16A
448
#   define AV_WN16A(p, v) AV_WNA(16, p, v)
449
#endif
450

    
451
#ifndef AV_WN32A
452
#   define AV_WN32A(p, v) AV_WNA(32, p, v)
453
#endif
454

    
455
#ifndef AV_WN64A
456
#   define AV_WN64A(p, v) AV_WNA(64, p, v)
457
#endif
458

    
459
/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
460
 * naturally aligned. They may be implemented using MMX,
461
 * so emms_c() must be called before using any float code
462
 * afterwards.
463
 */
464

    
465
#define AV_COPY(n, d, s) \
466
    (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
467

    
468
#ifndef AV_COPY16
469
#   define AV_COPY16(d, s) AV_COPY(16, d, s)
470
#endif
471

    
472
#ifndef AV_COPY32
473
#   define AV_COPY32(d, s) AV_COPY(32, d, s)
474
#endif
475

    
476
#ifndef AV_COPY64
477
#   define AV_COPY64(d, s) AV_COPY(64, d, s)
478
#endif
479

    
480
#ifndef AV_COPY128
481
#   define AV_COPY128(d, s)                    \
482
    do {                                       \
483
        AV_COPY64(d, s);                       \
484
        AV_COPY64((char*)(d)+8, (char*)(s)+8); \
485
    } while(0)
486
#endif
487

    
488
#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))
489

    
490
#ifndef AV_SWAP64
491
#   define AV_SWAP64(a, b) AV_SWAP(64, a, b)
492
#endif
493

    
494
#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
495

    
496
#ifndef AV_ZERO16
497
#   define AV_ZERO16(d) AV_ZERO(16, d)
498
#endif
499

    
500
#ifndef AV_ZERO32
501
#   define AV_ZERO32(d) AV_ZERO(32, d)
502
#endif
503

    
504
#ifndef AV_ZERO64
505
#   define AV_ZERO64(d) AV_ZERO(64, d)
506
#endif
507

    
508
#ifndef AV_ZERO128
509
#   define AV_ZERO128(d)         \
510
    do {                         \
511
        AV_ZERO64(d);            \
512
        AV_ZERO64((char*)(d)+8); \
513
    } while(0)
514
#endif
515

    
516
#endif /* AVUTIL_INTREADWRITE_H */