diff options
author | Søren Sandmann Pedersen <sandmann@daimi.au.dk> | 2005-08-10 20:22:57 +0000 |
---|---|---|
committer | Søren Sandmann Pedersen <sandmann@daimi.au.dk> | 2005-08-10 20:22:57 +0000 |
commit | 130fffc0cdbfdc29f33f1ee97c09e744c19e243a (patch) | |
tree | 1afe53c316e5bb8895c691c624d7f8ba09f0d0b8 /fb | |
parent | ef50bba5694ef276a239882fae3502638b4ec784 (diff) |
Wed Aug 10 16:17:38 2005 Søren Sandmann <sandmann@redhat.com>
Add back non-SSE implementations. Define USE_SSE if the CPU is amd64/x86-64
Diffstat (limited to 'fb')
-rw-r--r-- | fb/fbmmx.c | 61 |
1 files changed, 61 insertions, 0 deletions
diff --git a/fb/fbmmx.c b/fb/fbmmx.c index fd1284496..ba649393f 100644 --- a/fb/fbmmx.c +++ b/fb/fbmmx.c @@ -36,6 +36,10 @@ #ifdef USE_MMX +#if defined(__amd64__) || defined(__x86_64__) +#define USE_SSE +#endif + #include <mmintrin.h> #include <xmmintrin.h> /* for _mm_shuffle_pi16 and _MM_SHUFFLE */ @@ -155,6 +159,8 @@ pix_add (__m64 a, __m64 b) return _mm_adds_pu8 (a, b); } +#ifdef USE_SSE + static __inline__ __m64 expand_alpha (__m64 pixel) { @@ -173,6 +179,61 @@ invert_colors (__m64 pixel) return _mm_shuffle_pi16 (pixel, _MM_SHUFFLE(3, 0, 1, 2)); } +#else + +static __inline__ __m64 +expand_alpha (__m64 pixel) +{ + __m64 t1, t2; + + t1 = shift (pixel, -48); + t2 = shift (t1, 16); + t1 = _mm_or_si64 (t1, t2); + t2 = shift (t1, 32); + t1 = _mm_or_si64 (t1, t2); + + return t1; +} + +static __inline__ __m64 +expand_alpha_rev (__m64 pixel) +{ + __m64 t1, t2; + + /* move alpha to low 16 bits and zero the rest */ + t1 = shift (pixel, 48); + t1 = shift (t1, -48); + + t2 = shift (t1, 16); + t1 = _mm_or_si64 (t1, t2); + t2 = shift (t1, 32); + t1 = _mm_or_si64 (t1, t2); + + return t1; +} + +static __inline__ __m64 +invert_colors (__m64 pixel) +{ + __m64 x, y, z; + + x = y = z = pixel; + + x = _mm_and_si64 (x, MC(ffff0000ffff0000)); + y = _mm_and_si64 (y, MC(000000000000ffff)); + z = _mm_and_si64 (z, MC(0000ffff00000000)); + + y = shift (y, 32); + z = shift (z, -32); + + x = _mm_or_si64 (x, y); + x = _mm_or_si64 (x, z); + + return x; +} + +#endif + static __inline__ __m64 over (__m64 src, __m64 srca, __m64 dest) { |