Revision a3aece93 postproc/rgb2rgb.c

View differences:

postproc/rgb2rgb.c
1
/* 
2
 *
3
 *  rgb2rgb.c, Software RGB to RGB convertor
4
 *  Written by Nick Kurshev.
5
 */
1 6
#include <inttypes.h>
2 7
#include "../config.h"
3 8
#include "rgb2rgb.h"
9
#include "../mmx_defs.h"
10

  
4 11
#ifdef HAVE_MMX
5
#include "mmx.h"
12
static const uint64_t mask32   __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
13
static const uint64_t mask24l  __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
14
static const uint64_t mask24h  __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
15
static const uint64_t mask15b  __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111  xxB */
16
static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000  RGx */
6 17
#endif
7
#include "../mmx_defs.h"
8 18

  
9 19
void rgb24to32(uint8_t *src,uint8_t *dst,uint32_t src_size)
10 20
{
......
12 22
  uint8_t *s = src;
13 23
  uint8_t *end;
14 24
#ifdef HAVE_MMX
15
  const uint64_t mask32 = 0x00FFFFFF00FFFFFFULL;
16 25
  uint8_t *mm_end;
17 26
#endif
18 27
  end = s + src_size;
19 28
#ifdef HAVE_MMX
20
  __asm __volatile(PREFETCH" %0\n\t"::"m"(*s):"memory");
29
  __asm __volatile(PREFETCH"	%0"::"m"(*s):"memory");
21 30
  mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
22
  __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory");
31
  __asm __volatile("movq	%0, %%mm7"::"m"(mask32):"memory");
23 32
  if(mm_end == end) mm_end -= MMREG_SIZE*2;
24 33
  while(s < mm_end)
25 34
  {
26 35
    __asm __volatile(
27
	PREFETCH" 32%1\n\t"
36
	PREFETCH"	32%1\n\t"
28 37
	"movd	%1, %%mm0\n\t"
29 38
	"movd	3%1, %%mm1\n\t"
30 39
	"movd	6%1, %%mm2\n\t"
......
59 68
  uint8_t *s = src;
60 69
  uint8_t *end;
61 70
#ifdef HAVE_MMX
62
  const uint64_t mask24l = 0x0000000000FFFFFFULL;
63
  const uint64_t mask24h = 0x0000FFFFFF000000ULL;
64 71
  uint8_t *mm_end;
65 72
#endif
66 73
  end = s + src_size;
67 74
#ifdef HAVE_MMX
68
  __asm __volatile(PREFETCH" %0\n\t"::"m"(*s):"memory");
75
  __asm __volatile(PREFETCH"	%0"::"m"(*s):"memory");
69 76
  mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
70 77
  __asm __volatile(
71
    "movq %0, %%mm7\n\t"
72
    "movq %1, %%mm6\n\t"
73
    ::"m"(mask24l),"m"(mask24h):"memory");
78
	"movq	%0, %%mm7\n\t"
79
	"movq	%1, %%mm6"
80
	::"m"(mask24l),"m"(mask24h):"memory");
74 81
  if(mm_end == end) mm_end -= MMREG_SIZE*2;
75 82
  while(s < mm_end)
76 83
  {
77 84
    __asm __volatile(
78
	PREFETCH" 32%1\n\t"
85
	PREFETCH"	32%1\n\t"
79 86
	"movq	%1, %%mm0\n\t"
80 87
	"movq	8%1, %%mm1\n\t"
81 88
	"movq	%%mm0, %%mm2\n\t"
......
108 115
  }
109 116
}
110 117

  
111
/* TODO: 3DNOW, MMX2 optimization */
112

  
113
/* Original by Strepto/Astral
114
 ported to gcc & bugfixed : A'rpi */
118
/*
119
 Original by Strepto/Astral
120
 ported to gcc & bugfixed : A'rpi
121
 MMX,  3DNOW optimization by Nick Kurshev
122
*/
115 123
void rgb15to16(uint8_t *src,uint8_t *dst,uint32_t src_size)
116 124
{
117 125
#ifdef HAVE_MMX
118
  static uint64_t mask_b  = 0x001F001F001F001FLL; // 00000000 00011111  xxB
119
  static uint64_t mask_rg = 0x7FE07FE07FE07FE0LL; // 01111111 11100000  RGx
120 126
  register char* s=src+src_size;
121 127
  register char* d=dst+src_size;
122 128
  register int offs=-src_size;
123
  movq_m2r (mask_b,  mm4);
124
  movq_m2r (mask_rg, mm5);
125
  while(offs<0){
126
    movq_m2r (*(s+offs), mm0);
127
    movq_r2r (mm0, mm1);
128

  
129
    movq_m2r (*(s+8+offs), mm2);
130
    movq_r2r (mm2, mm3);
131
    
132
    pand_r2r (mm4, mm0);
133
    pand_r2r (mm5, mm1);
134
    
135
    psllq_i2r(1,mm1);
136
    pand_r2r (mm4, mm2);
137

  
138
    pand_r2r (mm5, mm3);
139
    por_r2r  (mm1, mm0);
140

  
141
    psllq_i2r(1,mm3);
142
    movq_r2m (mm0,*(d+offs));
143

  
144
    por_r2r  (mm3,mm2);
145
    movq_r2m (mm2,*(d+8+offs));
146

  
147
    offs+=16;
129
  __asm __volatile(PREFETCH"	%0"::"m"(*(s+offs)):"memory");
130
  __asm __volatile(
131
	"movq	%0, %%mm4\n\t"
132
	"movq	%1, %%mm5"
133
	::"m"(mask15b), "m"(mask15rg):"memory");
134
  while(offs<0)
135
  {
136
	__asm __volatile(
137
		PREFETCH"	32%1\n\t"
138
		"movq	%1, %%mm0\n\t"
139
		"movq	8%1, %%mm2\n\t"
140
		"movq	%%mm0, %%mm1\n\t"
141
		"movq	%%mm2, %%mm3\n\t"
142
		"pand	%%mm4, %%mm0\n\t"
143
		"pand	%%mm5, %%mm1\n\t"
144
		"pand	%%mm4, %%mm2\n\t"
145
		"pand	%%mm5, %%mm3\n\t"
146
		"psllq	$1, %%mm1\n\t"
147
		"psllq	$1, %%mm3\n\t"
148
		"por	%%mm1, %%mm0\n\t"
149
		"por	%%mm3, %%mm2\n\t"
150
		MOVNTQ"	%%mm0, %0\n\t"
151
		MOVNTQ"	%%mm2, 8%0"
152
		:"=m"(*(d+offs))
153
		:"m"(*(s+offs))
154
		:"memory");
155
	offs+=16;
148 156
  }
149
  emms();
157
  __asm __volatile(SFENCE:::"memory");
158
  __asm __volatile(EMMS:::"memory");
150 159
#else
151 160
   uint16_t *s1=( uint16_t * )src;
152 161
   uint16_t *d1=( uint16_t * )dst;

Also available in: Unified diff