SSE/PacketMath.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // Eigen is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 3 of the License, or (at your option) any later version.
10 //
11 // Alternatively, you can redistribute it and/or
12 // modify it under the terms of the GNU General Public License as
13 // published by the Free Software Foundation; either version 2 of
14 // the License, or (at your option) any later version.
15 //
16 // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
17 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
19 // GNU General Public License for more details.
20 //
21 // You should have received a copy of the GNU Lesser General Public
22 // License and a copy of the GNU General Public License along with
23 // Eigen. If not, see <http://www.gnu.org/licenses/>.
24 
25 #ifndef EIGEN_PACKET_MATH_SSE_H
26 #define EIGEN_PACKET_MATH_SSE_H
27 
28 namespace Eigen {
29 
30 namespace internal {
31 
32 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
33 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
34 #endif
35 
36 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
37 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
38 #endif
39 
40 typedef __m128 Packet4f;
41 typedef __m128i Packet4i;
42 typedef __m128d Packet2d;
43 
44 template<> struct is_arithmetic<__m128> { enum { value = true }; };
45 template<> struct is_arithmetic<__m128i> { enum { value = true }; };
46 template<> struct is_arithmetic<__m128d> { enum { value = true }; };
47 
48 #define vec4f_swizzle1(v,p,q,r,s) \
49  (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
50 
51 #define vec4i_swizzle1(v,p,q,r,s) \
52  (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
53 
54 #define vec2d_swizzle1(v,p,q) \
55  (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
56 
57 #define vec4f_swizzle2(a,b,p,q,r,s) \
58  (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
59 
60 #define vec4i_swizzle2(a,b,p,q,r,s) \
61  (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
62 
63 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
64  const Packet4f p4f_##NAME = pset1<Packet4f>(X)
65 
66 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
67  const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
68 
69 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
70  const Packet4i p4i_##NAME = pset1<Packet4i>(X)
71 
72 
73 template<> struct packet_traits<float> : default_packet_traits
74 {
75  typedef Packet4f type;
76  enum {
77  Vectorizable = 1,
78  AlignedOnScalar = 1,
79  size=4,
80 
81  HasDiv = 1,
82  HasSin = EIGEN_FAST_MATH,
83  HasCos = EIGEN_FAST_MATH,
84  HasLog = 1,
85  HasExp = 1,
86  HasSqrt = 1
87  };
88 };
89 template<> struct packet_traits<double> : default_packet_traits
90 {
91  typedef Packet2d type;
92  enum {
93  Vectorizable = 1,
94  AlignedOnScalar = 1,
95  size=2,
96 
97  HasDiv = 1
98  };
99 };
100 template<> struct packet_traits<int> : default_packet_traits
101 {
102  typedef Packet4i type;
103  enum {
104  // FIXME check the Has*
105  Vectorizable = 1,
106  AlignedOnScalar = 1,
107  size=4
108  };
109 };
110 
111 template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; };
112 template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; };
113 template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; };
114 
115 #if defined(_MSC_VER) && (_MSC_VER==1500)
116 // Workaround MSVC 9 internal compiler error.
117 // TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
118 // TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
119 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set_ps(from,from,from,from); }
120 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
121 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set_epi32(from,from,from,from); }
122 #else
123 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return _mm_set1_ps(from); }
124 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
125 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return _mm_set1_epi32(from); }
126 #endif
127 
128 template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
129 template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
130 template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
131 
132 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
133 template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
134 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
135 
136 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
137 template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
138 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
139 
140 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
141 {
142  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
143  return _mm_xor_ps(a,mask);
144 }
146 {
147  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
148  return _mm_xor_pd(a,mask);
149 }
150 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
151 {
152  return psub(_mm_setr_epi32(0,0,0,0), a);
153 }
154 
155 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
156 template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
157 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
158 {
159 #ifdef EIGEN_VECTORIZE_SSE4_1
160  return _mm_mullo_epi32(a,b);
161 #else
162  // this version is slightly faster than 4 scalar products
163  return vec4i_swizzle1(
165  _mm_mul_epu32(a,b),
166  _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
167  vec4i_swizzle1(b,1,0,3,2)),
168  0,2,0,2),
169  0,2,1,3);
170 #endif
171 }
172 
173 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
174 template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
175 template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
176 { eigen_assert(false && "packet integer division are not supported by SSE");
177  return pset1<Packet4i>(0);
178 }
179 
180 // for some weird raisons, it has to be overloaded for packet of integers
181 template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
182 
183 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
184 template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
185 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
186 {
187  // after some bench, this version *is* faster than a scalar implementation
188  Packet4i mask = _mm_cmplt_epi32(a,b);
189  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
190 }
191 
192 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
193 template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
194 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
195 {
196  // after some bench, this version *is* faster than a scalar implementation
197  Packet4i mask = _mm_cmpgt_epi32(a,b);
198  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
199 }
200 
201 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
202 template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
203 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
204 
205 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
206 template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
207 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
208 
209 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
210 template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
211 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
212 
213 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
214 template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
215 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
216 
217 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
218 template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
219 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const Packet4i*>(from)); }
220 
221 #if defined(_MSC_VER)
222  template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) {
224  #if (_MSC_VER==1600)
225  // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
226  // (i.e., it does not generate an unaligned load!!
227  // TODO On most architectures this version should also be faster than a single _mm_loadu_ps
228  // so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so...
229  __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
230  res = _mm_loadh_pi(res, (const __m64*)(from+2));
231  return res;
232  #else
233  return _mm_loadu_ps(from);
234  #endif
235  }
236  template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
237  template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); }
238 #else
239 // Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
240 // require pointer casting to incompatible pointer types and leads to invalid code
241 // because of the strict aliasing rule. The "dummy" stuff are required to enforce
242 // a correct instruction dependency.
243 // TODO: do the same for MSVC (ICC is compatible)
244 // NOTE: with the code below, MSVC's compiler crashes!
245 
246 #if defined(__GNUC__) && defined(__i386__)
247  // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
248  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
249 #elif defined(__clang__)
250  // bug 201: Segfaults in __mm_loadh_pd with clang 2.8
251  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
252 #else
253  #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
254 #endif
255 
256 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
257 {
259 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
260  return _mm_loadu_ps(from);
261 #else
262  __m128d res;
263  res = _mm_load_sd((const double*)(from)) ;
264  res = _mm_loadh_pd(res, (const double*)(from+2)) ;
265  return _mm_castpd_ps(res);
266 #endif
267 }
268 template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
269 {
271 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
272  return _mm_loadu_pd(from);
273 #else
274  __m128d res;
275  res = _mm_load_sd(from) ;
276  res = _mm_loadh_pd(res,from+1);
277  return res;
278 #endif
279 }
280 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
281 {
283 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
284  return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from));
285 #else
286  __m128d res;
287  res = _mm_load_sd((const double*)(from)) ;
288  res = _mm_loadh_pd(res, (const double*)(from+2)) ;
289  return _mm_castpd_si128(res);
290 #endif
291 }
292 #endif
293 
294 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)
295 {
296  return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
297 }
298 template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from)
299 { return pset1<Packet2d>(from[0]); }
300 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from)
301 {
302  Packet4i tmp;
303  tmp = _mm_loadl_epi64(reinterpret_cast<const Packet4i*>(from));
304  return vec4i_swizzle1(tmp, 0, 0, 1, 1);
305 }
306 
307 template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
308 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
309 template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<Packet4i*>(to), from); }
310 
311 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
313  _mm_storel_pd((to), from);
314  _mm_storeh_pd((to+1), from);
315 }
316 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castps_pd(from)); }
317 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castsi128_pd(from)); }
318 
319 // some compilers might be tempted to perform multiple moves instead of using a vector path.
320 template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
321 {
322  Packet4f pa = _mm_set_ss(a);
323  pstore(to, vec4f_swizzle1(pa,0,0,0,0));
324 }
325 // some compilers might be tempted to perform multiple moves instead of using a vector path.
326 template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
327 {
328  Packet2d pa = _mm_set_sd(a);
329  pstore(to, vec2d_swizzle1(pa,0,0));
330 }
331 
332 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
333 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
334 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
335 
336 #if defined(_MSC_VER) && defined(_WIN64) && !defined(__INTEL_COMPILER)
337 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
338 // Direct of the struct members fixed bug #62.
339 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
340 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
341 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
342 #elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
343 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
344 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
345 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
346 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
347 #else
348 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
349 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
350 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
351 #endif
352 
353 template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
354 { return _mm_shuffle_ps(a,a,0x1B); }
356 { return _mm_shuffle_pd(a,a,0x1); }
357 template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
358 { return _mm_shuffle_epi32(a,0x1B); }
359 
360 
361 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
362 {
363  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
364  return _mm_and_ps(a,mask);
365 }
367 {
368  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
369  return _mm_and_pd(a,mask);
370 }
371 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
372 {
373  #ifdef EIGEN_VECTORIZE_SSSE3
374  return _mm_abs_epi32(a);
375  #else
376  Packet4i aux = _mm_srai_epi32(a,31);
377  return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
378  #endif
379 }
380 
382 {
383  vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
384  vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
385  vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
386  vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
387 }
388 
389 #ifdef EIGEN_VECTORIZE_SSE3
390 // TODO implement SSE2 versions as well as integer versions
392 {
393  return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
394 }
396 {
397  return _mm_hadd_pd(vecs[0], vecs[1]);
398 }
399 // SSSE3 version:
400 // EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs)
401 // {
402 // return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
403 // }
404 
405 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
406 {
407  Packet4f tmp0 = _mm_hadd_ps(a,a);
408  return pfirst(_mm_hadd_ps(tmp0, tmp0));
409 }
410 
411 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst(_mm_hadd_pd(a, a)); }
412 
413 // SSSE3 version:
414 // EIGEN_STRONG_INLINE float predux(const Packet4i& a)
415 // {
416 // Packet4i tmp0 = _mm_hadd_epi32(a,a);
417 // return pfirst(_mm_hadd_epi32(tmp0, tmp0));
418 // }
419 #else
420 // SSE2 versions
421 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
422 {
423  Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
424  return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
425 }
426 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
427 {
428  return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
429 }
430 
432 {
433  Packet4f tmp0, tmp1, tmp2;
434  tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
435  tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
436  tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
437  tmp0 = _mm_add_ps(tmp0, tmp1);
438  tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
439  tmp1 = _mm_add_ps(tmp1, tmp2);
440  tmp2 = _mm_movehl_ps(tmp1, tmp0);
441  tmp0 = _mm_movelh_ps(tmp0, tmp1);
442  return _mm_add_ps(tmp0, tmp2);
443 }
444 
446 {
447  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
448 }
449 #endif // SSE3
450 
451 template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
452 {
453  Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
454  return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1));
455 }
456 
458 {
459  Packet4i tmp0, tmp1, tmp2;
460  tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
461  tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
462  tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
463  tmp0 = _mm_add_epi32(tmp0, tmp1);
464  tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
465  tmp1 = _mm_add_epi32(tmp1, tmp2);
466  tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
467  tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
468  return _mm_add_epi32(tmp0, tmp2);
469 }
470 
471 // Other reduction functions:
472 
473 // mul
474 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
475 {
476  Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
477  return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
478 }
480 {
481  return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
482 }
483 template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
484 {
485  // after some experiments, it is seems this is the fastest way to implement it
486  // for GCC (eg., reusing pmul is very slow !)
487  // TODO try to call _mm_mul_epu32 directly
488  EIGEN_ALIGN16 int aux[4];
489  pstore(aux, a);
490  return (aux[0] * aux[1]) * (aux[2] * aux[3]);;
491 }
492 
493 // min
494 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
495 {
496  Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
497  return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
498 }
500 {
501  return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
502 }
503 template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
504 {
505  // after some experiments, it is seems this is the fastest way to implement it
506  // for GCC (eg., it does not like using std::min after the pstore !!)
507  EIGEN_ALIGN16 int aux[4];
508  pstore(aux, a);
509  register int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
510  register int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
511  return aux0<aux2 ? aux0 : aux2;
512 }
513 
514 // max
515 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
516 {
517  Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
518  return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
519 }
521 {
522  return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
523 }
524 template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
525 {
526  // after some experiments, it is seems this is the fastest way to implement it
527  // for GCC (eg., it does not like using std::min after the pstore !!)
528  EIGEN_ALIGN16 int aux[4];
529  pstore(aux, a);
530  register int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
531  register int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
532  return aux0>aux2 ? aux0 : aux2;
533 }
534 
535 #if (defined __GNUC__)
536 // template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)
537 // {
538 // Packet4f res = b;
539 // asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
540 // return res;
541 // }
542 // EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i& a, const Packet4i& b, const int i)
543 // {
544 // Packet4i res = a;
545 // asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
546 // return res;
547 // }
548 #endif
549 
550 #ifdef EIGEN_VECTORIZE_SSSE3
551 // SSSE3 versions
552 template<int Offset>
553 struct palign_impl<Offset,Packet4f>
554 {
555  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
556  {
557  if (Offset!=0)
558  first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
559  }
560 };
561 
562 template<int Offset>
563 struct palign_impl<Offset,Packet4i>
564 {
565  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
566  {
567  if (Offset!=0)
568  first = _mm_alignr_epi8(second,first, Offset*4);
569  }
570 };
571 
572 template<int Offset>
573 struct palign_impl<Offset,Packet2d>
574 {
575  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
576  {
577  if (Offset==1)
578  first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
579  }
580 };
581 #else
582 // SSE2 versions
583 template<int Offset>
584 struct palign_impl<Offset,Packet4f>
585 {
586  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
587  {
588  if (Offset==1)
589  {
590  first = _mm_move_ss(first,second);
591  first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
592  }
593  else if (Offset==2)
594  {
595  first = _mm_movehl_ps(first,first);
596  first = _mm_movelh_ps(first,second);
597  }
598  else if (Offset==3)
599  {
600  first = _mm_move_ss(first,second);
601  first = _mm_shuffle_ps(first,second,0x93);
602  }
603  }
604 };
605 
606 template<int Offset>
607 struct palign_impl<Offset,Packet4i>
608 {
609  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
610  {
611  if (Offset==1)
612  {
613  first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
614  first = _mm_shuffle_epi32(first,0x39);
615  }
616  else if (Offset==2)
617  {
618  first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
619  first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
620  }
621  else if (Offset==3)
622  {
623  first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
624  first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
625  }
626  }
627 };
628 
629 template<int Offset>
630 struct palign_impl<Offset,Packet2d>
631 {
632  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
633  {
634  if (Offset==1)
635  {
636  first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
637  first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
638  }
639  }
640 };
641 #endif
642 
643 } // end namespace internal
644 
645 } // end namespace Eigen
646 
647 #endif // EIGEN_PACKET_MATH_SSE_H