SelfadjointMatrixVector.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // Eigen is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 3 of the License, or (at your option) any later version.
10 //
11 // Alternatively, you can redistribute it and/or
12 // modify it under the terms of the GNU General Public License as
13 // published by the Free Software Foundation; either version 2 of
14 // the License, or (at your option) any later version.
15 //
16 // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
17 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
19 // GNU General Public License for more details.
20 //
21 // You should have received a copy of the GNU Lesser General Public
22 // License and a copy of the GNU General Public License along with
23 // Eigen. If not, see <http://www.gnu.org/licenses/>.
24 
25 #ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H
26 #define EIGEN_SELFADJOINT_MATRIX_VECTOR_H
27 
28 namespace Eigen {
29 
30 namespace internal {
31 
32 /* Optimized selfadjoint matrix * vector product:
33  * This algorithm processes 2 columns at onces that allows to both reduce
34  * the number of load/stores of the result by a factor 2 and to reduce
35  * the instruction dependency.
36  */
37 
38 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version=Specialized>
39 struct selfadjoint_matrix_vector_product;
40 
41 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>
42 struct selfadjoint_matrix_vector_product
43 
44 {
45 static EIGEN_DONT_INLINE void run(
46  Index size,
47  const Scalar* lhs, Index lhsStride,
48  const Scalar* _rhs, Index rhsIncr,
49  Scalar* res,
50  Scalar alpha)
51 {
52  typedef typename packet_traits<Scalar>::type Packet;
53  typedef typename NumTraits<Scalar>::Real RealScalar;
54  const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
55 
56  enum {
57  IsRowMajor = StorageOrder==RowMajor ? 1 : 0,
58  IsLower = UpLo == Lower ? 1 : 0,
59  FirstTriangular = IsRowMajor == IsLower
60  };
61 
62  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> cj0;
63  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1;
65 
66  conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0;
67  conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
68 
69  Scalar cjAlpha = ConjugateRhs ? conj(alpha) : alpha;
70 
71  // FIXME this copy is now handled outside product_selfadjoint_vector, so it could probably be removed.
72  // if the rhs is not sequentially stored in memory we copy it to a temporary buffer,
73  // this is because we need to extract packets
74  ei_declare_aligned_stack_constructed_variable(Scalar,rhs,size,rhsIncr==1 ? const_cast<Scalar*>(_rhs) : 0);
75  if (rhsIncr!=1)
76  {
77  const Scalar* it = _rhs;
78  for (Index i=0; i<size; ++i, it+=rhsIncr)
79  rhs[i] = *it;
80  }
81 
82  Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;
83  if (FirstTriangular)
84  bound = size - bound;
85 
86  for (Index j=FirstTriangular ? bound : 0;
87  j<(FirstTriangular ? size : bound);j+=2)
88  {
89  register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
90  register const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
91 
92  Scalar t0 = cjAlpha * rhs[j];
93  Packet ptmp0 = pset1<Packet>(t0);
94  Scalar t1 = cjAlpha * rhs[j+1];
95  Packet ptmp1 = pset1<Packet>(t1);
96 
97  Scalar t2(0);
98  Packet ptmp2 = pset1<Packet>(t2);
99  Scalar t3(0);
100  Packet ptmp3 = pset1<Packet>(t3);
101 
102  size_t starti = FirstTriangular ? 0 : j+2;
103  size_t endi = FirstTriangular ? j : size;
104  size_t alignedStart = (starti) + internal::first_aligned(&res[starti], endi-starti);
105  size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);
106 
107  // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
108  res[j] += cjd.pmul(internal::real(A0[j]), t0);
109  res[j+1] += cjd.pmul(internal::real(A1[j+1]), t1);
110  if(FirstTriangular)
111  {
112  res[j] += cj0.pmul(A1[j], t1);
113  t3 += cj1.pmul(A1[j], rhs[j]);
114  }
115  else
116  {
117  res[j+1] += cj0.pmul(A0[j+1],t0);
118  t2 += cj1.pmul(A0[j+1], rhs[j+1]);
119  }
120 
121  for (size_t i=starti; i<alignedStart; ++i)
122  {
123  res[i] += t0 * A0[i] + t1 * A1[i];
124  t2 += conj(A0[i]) * rhs[i];
125  t3 += conj(A1[i]) * rhs[i];
126  }
127  // Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)
128  // gcc 4.2 does this optimization automatically.
129  const Scalar* EIGEN_RESTRICT a0It = A0 + alignedStart;
130  const Scalar* EIGEN_RESTRICT a1It = A1 + alignedStart;
131  const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart;
132  Scalar* EIGEN_RESTRICT resIt = res + alignedStart;
133  for (size_t i=alignedStart; i<alignedEnd; i+=PacketSize)
134  {
135  Packet A0i = ploadu<Packet>(a0It); a0It += PacketSize;
136  Packet A1i = ploadu<Packet>(a1It); a1It += PacketSize;
137  Packet Bi = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases
138  Packet Xi = pload <Packet>(resIt);
139 
140  Xi = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi));
141  ptmp2 = pcj1.pmadd(A0i, Bi, ptmp2);
142  ptmp3 = pcj1.pmadd(A1i, Bi, ptmp3);
143  pstore(resIt,Xi); resIt += PacketSize;
144  }
145  for (size_t i=alignedEnd; i<endi; i++)
146  {
147  res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);
148  t2 += cj1.pmul(A0[i], rhs[i]);
149  t3 += cj1.pmul(A1[i], rhs[i]);
150  }
151 
152  res[j] += alpha * (t2 + predux(ptmp2));
153  res[j+1] += alpha * (t3 + predux(ptmp3));
154  }
155  for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
156  {
157  register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
158 
159  Scalar t1 = cjAlpha * rhs[j];
160  Scalar t2(0);
161  // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
162  res[j] += cjd.pmul(internal::real(A0[j]), t1);
163  for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)
164  {
165  res[i] += cj0.pmul(A0[i], t1);
166  t2 += cj1.pmul(A0[i], rhs[i]);
167  }
168  res[j] += alpha * t2;
169  }
170 }
171 };
172 
173 } // end namespace internal
174 
175 /***************************************************************************
176 * Wrapper to product_selfadjoint_vector
177 ***************************************************************************/
178 
179 namespace internal {
180 template<typename Lhs, int LhsMode, typename Rhs>
181 struct traits<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true> >
182  : traits<ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>, Lhs, Rhs> >
183 {};
184 }
185 
186 template<typename Lhs, int LhsMode, typename Rhs>
187 struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
188  : public ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>, Lhs, Rhs >
189 {
190  EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
191 
192  enum {
193  LhsUpLo = LhsMode&(Upper|Lower)
194  };
195 
196  SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
197 
198  template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
199  {
200  typedef typename Dest::Scalar ResScalar;
201  typedef typename Base::RhsScalar RhsScalar;
202  typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
203 
204  eigen_assert(dest.rows()==m_lhs.rows() && dest.cols()==m_rhs.cols());
205 
206  typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
207  typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
208 
209  Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
210  * RhsBlasTraits::extractScalarFactor(m_rhs);
211 
212  enum {
213  EvalToDest = (Dest::InnerStrideAtCompileTime==1),
214  UseRhs = (_ActualRhsType::InnerStrideAtCompileTime==1)
215  };
216 
217  internal::gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,!EvalToDest> static_dest;
218  internal::gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!UseRhs> static_rhs;
219 
220  ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
221  EvalToDest ? dest.data() : static_dest.data());
222 
223  ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(),
224  UseRhs ? const_cast<RhsScalar*>(rhs.data()) : static_rhs.data());
225 
226  if(!EvalToDest)
227  {
228  #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
229  int size = dest.size();
230  EIGEN_DENSE_STORAGE_CTOR_PLUGIN
231  #endif
232  MappedDest(actualDestPtr, dest.size()) = dest;
233  }
234 
235  if(!UseRhs)
236  {
237  #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
238  int size = rhs.size();
239  EIGEN_DENSE_STORAGE_CTOR_PLUGIN
240  #endif
241  Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, rhs.size()) = rhs;
242  }
243 
244 
245  internal::selfadjoint_matrix_vector_product<Scalar, Index, (internal::traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run
246  (
247  lhs.rows(), // size
248  &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info
249  actualRhsPtr, 1, // rhs info
250  actualDestPtr, // result info
251  actualAlpha // scale factor
252  );
253 
254  if(!EvalToDest)
255  dest = MappedDest(actualDestPtr, dest.size());
256  }
257 };
258 
259 namespace internal {
260 template<typename Lhs, typename Rhs, int RhsMode>
261 struct traits<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false> >
262  : traits<ProductBase<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>, Lhs, Rhs> >
263 {};
264 }
265 
266 template<typename Lhs, typename Rhs, int RhsMode>
267 struct SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>
268  : public ProductBase<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>, Lhs, Rhs >
269 {
270  EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
271 
272  enum {
273  RhsUpLo = RhsMode&(Upper|Lower)
274  };
275 
276  SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
277 
278  template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
279  {
280  // let's simply transpose the product
281  Transpose<Dest> destT(dest);
282  SelfadjointProductMatrix<Transpose<const Rhs>, int(RhsUpLo)==Upper ? Lower : Upper, false,
283  Transpose<const Lhs>, 0, true>(m_rhs.transpose(), m_lhs.transpose()).scaleAndAddTo(destT, alpha);
284  }
285 };
286 
287 } // end namespace Eigen
288 
289 #endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H