Revision 99969243

View differences:

postproc/rgb2rgb.c
13 13
#include "../mmx_defs.h"
14 14

  
15 15
#ifdef HAVE_MMX
16
static const uint64_t mask32b  __attribute__((aligned(8))) = 0x000000FF000000FFULL;
17
static const uint64_t mask32g  __attribute__((aligned(8))) = 0x0000FF000000FF00ULL;
18
static const uint64_t mask32r  __attribute__((aligned(8))) = 0x00FF000000FF0000ULL;
16 19
static const uint64_t mask32   __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
17 20
static const uint64_t mask24l  __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
18 21
static const uint64_t mask24h  __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
......
28 31
static const uint64_t red_15mask  __attribute__((aligned(8))) = 0x00007c000000f800ULL;
29 32
static const uint64_t green_15mask __attribute__((aligned(8)))= 0x000003e0000007e0ULL;
30 33
static const uint64_t blue_15mask __attribute__((aligned(8))) = 0x0000001f0000001fULL;
34
#if 0
35
static volatile uint64_t __attribute__((aligned(8))) b5Dither;
36
static volatile uint64_t __attribute__((aligned(8))) g5Dither;
37
static volatile uint64_t __attribute__((aligned(8))) g6Dither;
38
static volatile uint64_t __attribute__((aligned(8))) r5Dither;
39

  
40
static uint64_t __attribute__((aligned(8))) dither4[2]={
41
	0x0103010301030103LL,
42
	0x0200020002000200LL,};
43

  
44
static uint64_t __attribute__((aligned(8))) dither8[2]={
45
	0x0602060206020602LL,
46
	0x0004000400040004LL,};
47
#endif
31 48
#endif
32 49

  
33 50
void rgb24to32(const uint8_t *src,uint8_t *dst,unsigned src_size)
......
561 578
	for(i=0; i<num_pixels; i++)
562 579
		((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
563 580
}
581

  
582
void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
583
{
584
	int num_pixels= src_size >> 2;
585
#ifdef HAVE_MMX
586
	asm volatile (
587
		"xorl %%eax, %%eax		\n\t"
588
		"1:				\n\t"
589
		PREFETCH" 32(%0, %%eax)		\n\t"
590
		"movq (%0, %%eax), %%mm0	\n\t"
591
		"movq %%mm0, %%mm1		\n\t"
592
		"movq %%mm0, %%mm2		\n\t"
593
		"pslld $16, %%mm0		\n\t"
594
		"psrld $16, %%mm1		\n\t"
595
		"pand mask32r, %%mm0		\n\t"
596
		"pand mask32g, %%mm2		\n\t"
597
		"pand mask32b, %%mm1		\n\t"
598
		"por %%mm0, %%mm2		\n\t"
599
		"por %%mm1, %%mm2		\n\t"
600
		MOVNTQ" %%mm2, (%1, %%eax)	\n\t"
601
		"addl $2, %%eax			\n\t"
602
		"cmpl %2, %%eax			\n\t"
603
		" jb 1b				\n\t"
604
		:: "r" (src), "r"(dst), "r" (num_pixels)
605
		: "%eax"
606
	);
607
#else
608
	int i;
609
	for(i=0; i<num_pixels; i++)
610
	{
611
		dst[4*i + 0] = src[4*i + 2];
612
		dst[4*i + 1] = src[4*i + 1];
613
		dst[4*i + 2] = src[4*i + 0];
614
	}
615
#endif
616
}
617

  
564 618
/**
565 619
 *
566 620
 * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
postproc/rgb2rgb_template.c
13 13
#include "../mmx_defs.h"
14 14

  
15 15
#ifdef HAVE_MMX
16
static const uint64_t mask32b  __attribute__((aligned(8))) = 0x000000FF000000FFULL;
17
static const uint64_t mask32g  __attribute__((aligned(8))) = 0x0000FF000000FF00ULL;
18
static const uint64_t mask32r  __attribute__((aligned(8))) = 0x00FF000000FF0000ULL;
16 19
static const uint64_t mask32   __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
17 20
static const uint64_t mask24l  __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
18 21
static const uint64_t mask24h  __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
......
28 31
static const uint64_t red_15mask  __attribute__((aligned(8))) = 0x00007c000000f800ULL;
29 32
static const uint64_t green_15mask __attribute__((aligned(8)))= 0x000003e0000007e0ULL;
30 33
static const uint64_t blue_15mask __attribute__((aligned(8))) = 0x0000001f0000001fULL;
34
#if 0
35
static volatile uint64_t __attribute__((aligned(8))) b5Dither;
36
static volatile uint64_t __attribute__((aligned(8))) g5Dither;
37
static volatile uint64_t __attribute__((aligned(8))) g6Dither;
38
static volatile uint64_t __attribute__((aligned(8))) r5Dither;
39

  
40
static uint64_t __attribute__((aligned(8))) dither4[2]={
41
	0x0103010301030103LL,
42
	0x0200020002000200LL,};
43

  
44
static uint64_t __attribute__((aligned(8))) dither8[2]={
45
	0x0602060206020602LL,
46
	0x0004000400040004LL,};
47
#endif
31 48
#endif
32 49

  
33 50
void rgb24to32(const uint8_t *src,uint8_t *dst,unsigned src_size)
......
561 578
	for(i=0; i<num_pixels; i++)
562 579
		((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
563 580
}
581

  
582
void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
583
{
584
	int num_pixels= src_size >> 2;
585
#ifdef HAVE_MMX
586
	asm volatile (
587
		"xorl %%eax, %%eax		\n\t"
588
		"1:				\n\t"
589
		PREFETCH" 32(%0, %%eax)		\n\t"
590
		"movq (%0, %%eax), %%mm0	\n\t"
591
		"movq %%mm0, %%mm1		\n\t"
592
		"movq %%mm0, %%mm2		\n\t"
593
		"pslld $16, %%mm0		\n\t"
594
		"psrld $16, %%mm1		\n\t"
595
		"pand mask32r, %%mm0		\n\t"
596
		"pand mask32g, %%mm2		\n\t"
597
		"pand mask32b, %%mm1		\n\t"
598
		"por %%mm0, %%mm2		\n\t"
599
		"por %%mm1, %%mm2		\n\t"
600
		MOVNTQ" %%mm2, (%1, %%eax)	\n\t"
601
		"addl $2, %%eax			\n\t"
602
		"cmpl %2, %%eax			\n\t"
603
		" jb 1b				\n\t"
604
		:: "r" (src), "r"(dst), "r" (num_pixels)
605
		: "%eax"
606
	);
607
#else
608
	int i;
609
	for(i=0; i<num_pixels; i++)
610
	{
611
		dst[4*i + 0] = src[4*i + 2];
612
		dst[4*i + 1] = src[4*i + 1];
613
		dst[4*i + 2] = src[4*i + 0];
614
	}
615
#endif
616
}
617

  
564 618
/**
565 619
 *
566 620
 * height should be a multiple of 2 and width should be a multiple of 16 (if this is a

Also available in: Unified diff