1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008 Gael Guennebaud <[email protected]>
5 // Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
11 #ifndef EIGEN_XPRHELPER_H
12 #define EIGEN_XPRHELPER_H
13
14 // just a workaround because GCC seems to not really like empty structs
15 // FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
16 // so currently we simply disable this optimization for gcc 4.3
17 #if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3)
18 #define EIGEN_EMPTY_STRUCT_CTOR(X) \
19 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \
20 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {}
21 #else
22 #define EIGEN_EMPTY_STRUCT_CTOR(X)
23 #endif
24
25 namespace Eigen {
26
27 namespace internal {
28
29 template<typename IndexDest, typename IndexSrc>
30 EIGEN_DEVICE_FUNC
convert_index(const IndexSrc & idx)31 inline IndexDest convert_index(const IndexSrc& idx) {
32 // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:
33 eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && "Index value to big for target type");
34 return IndexDest(idx);
35 }
36
37 // true if T can be considered as an integral index (i.e., and integral type or enum)
38 template<typename T> struct is_valid_index_type
39 {
40 enum { value =
41 #if EIGEN_HAS_TYPE_TRAITS
42 internal::is_integral<T>::value || std::is_enum<T>::value
43 #elif EIGEN_COMP_MSVC
44 internal::is_integral<T>::value || __is_enum(T)
45 #else
46 // without C++11, we use is_convertible to Index instead of is_integral in order to treat enums as Index.
47 internal::is_convertible<T,Index>::value && !internal::is_same<T,float>::value && !is_same<T,double>::value
48 #endif
49 };
50 };
51
52 // true if both types are not valid index types
53 template<typename RowIndices, typename ColIndices>
54 struct valid_indexed_view_overload {
55 enum { value = !(internal::is_valid_index_type<RowIndices>::value && internal::is_valid_index_type<ColIndices>::value) };
56 };
57
58 // promote_scalar_arg is an helper used in operation between an expression and a scalar, like:
59 // expression * scalar
60 // Its role is to determine how the type T of the scalar operand should be promoted given the scalar type ExprScalar of the given expression.
61 // The IsSupported template parameter must be provided by the caller as: internal::has_ReturnType<ScalarBinaryOpTraits<ExprScalar,T,op> >::value using the proper order for ExprScalar and T.
62 // Then the logic is as follows:
63 // - if the operation is natively supported as defined by IsSupported, then the scalar type is not promoted, and T is returned.
64 // - otherwise, NumTraits<ExprScalar>::Literal is returned if T is implicitly convertible to NumTraits<ExprScalar>::Literal AND that this does not imply a float to integer conversion.
65 // - otherwise, ExprScalar is returned if T is implicitly convertible to ExprScalar AND that this does not imply a float to integer conversion.
66 // - In all other cases, the promoted type is not defined, and the respective operation is thus invalid and not available (SFINAE).
67 template<typename ExprScalar,typename T, bool IsSupported>
68 struct promote_scalar_arg;
69
70 template<typename S,typename T>
71 struct promote_scalar_arg<S,T,true>
72 {
73 typedef T type;
74 };
75
76 // Recursively check safe conversion to PromotedType, and then ExprScalar if they are different.
77 template<typename ExprScalar,typename T,typename PromotedType,
78 bool ConvertibleToLiteral = internal::is_convertible<T,PromotedType>::value,
79 bool IsSafe = NumTraits<T>::IsInteger || !NumTraits<PromotedType>::IsInteger>
80 struct promote_scalar_arg_unsupported;
81
82 // Start recursion with NumTraits<ExprScalar>::Literal
83 template<typename S,typename T>
84 struct promote_scalar_arg<S,T,false> : promote_scalar_arg_unsupported<S,T,typename NumTraits<S>::Literal> {};
85
86 // We found a match!
87 template<typename S,typename T, typename PromotedType>
88 struct promote_scalar_arg_unsupported<S,T,PromotedType,true,true>
89 {
90 typedef PromotedType type;
91 };
92
93 // No match, but no real-to-integer issues, and ExprScalar and current PromotedType are different,
94 // so let's try to promote to ExprScalar
95 template<typename ExprScalar,typename T, typename PromotedType>
96 struct promote_scalar_arg_unsupported<ExprScalar,T,PromotedType,false,true>
97 : promote_scalar_arg_unsupported<ExprScalar,T,ExprScalar>
98 {};
99
100 // Unsafe real-to-integer, let's stop.
101 template<typename S,typename T, typename PromotedType, bool ConvertibleToLiteral>
102 struct promote_scalar_arg_unsupported<S,T,PromotedType,ConvertibleToLiteral,false> {};
103
104 // T is not even convertible to ExprScalar, let's stop.
105 template<typename S,typename T>
106 struct promote_scalar_arg_unsupported<S,T,S,false,true> {};
107
108 //classes inheriting no_assignment_operator don't generate a default operator=.
109 class no_assignment_operator
110 {
111 private:
112 no_assignment_operator& operator=(const no_assignment_operator&);
113 protected:
114 EIGEN_DEFAULT_COPY_CONSTRUCTOR(no_assignment_operator)
115 EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(no_assignment_operator)
116 };
117
118 /** \internal return the index type with the largest number of bits */
119 template<typename I1, typename I2>
120 struct promote_index_type
121 {
122 typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
123 };
124
125 /** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
126 * can be accessed using value() and setValue().
127 * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
128 */
129 template<typename T, int Value> class variable_if_dynamic
130 {
131 public:
132 EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(variable_if_dynamic)
133 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
134 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
135 T value() { return T(Value); }
136 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
137 operator T() const { return T(Value); }
138 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
139 void setValue(T v) const { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
140 };
141
142 template<typename T> class variable_if_dynamic<T, Dynamic>
143 {
144 T m_value;
145 public:
146 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value = 0) EIGEN_NO_THROW : m_value(value) {}
147 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T value() const { return m_value; }
148 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator T() const { return m_value; }
149 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
150 };
151
152 /** \internal like variable_if_dynamic but for DynamicIndex
153 */
154 template<typename T, int Value> class variable_if_dynamicindex
155 {
156 public:
157 EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
158 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
159 EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
160 T value() { return T(Value); }
161 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
162 void setValue(T) {}
163 };
164
165 template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
166 {
167 T m_value;
168 EIGEN_DEVICE_FUNC variable_if_dynamicindex() { eigen_assert(false); }
169 public:
170 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T value) : m_value(value) {}
171 EIGEN_DEVICE_FUNC T EIGEN_STRONG_INLINE value() const { return m_value; }
172 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
173 };
174
175 template<typename T> struct functor_traits
176 {
177 enum
178 {
179 Cost = 10,
180 PacketAccess = false,
181 IsRepeatable = false
182 };
183 };
184
185 template<typename T> struct packet_traits;
186
187 template<typename T> struct unpacket_traits;
188
189 template<int Size, typename PacketType,
190 bool Stop = Size==Dynamic || (Size%unpacket_traits<PacketType>::size)==0 || is_same<PacketType,typename unpacket_traits<PacketType>::half>::value>
191 struct find_best_packet_helper;
192
193 template< int Size, typename PacketType>
194 struct find_best_packet_helper<Size,PacketType,true>
195 {
196 typedef PacketType type;
197 };
198
199 template<int Size, typename PacketType>
200 struct find_best_packet_helper<Size,PacketType,false>
201 {
202 typedef typename find_best_packet_helper<Size,typename unpacket_traits<PacketType>::half>::type type;
203 };
204
205 template<typename T, int Size>
206 struct find_best_packet
207 {
208 typedef typename find_best_packet_helper<Size,typename packet_traits<T>::type>::type type;
209 };
210
211 #if EIGEN_MAX_STATIC_ALIGN_BYTES>0
212 template<int ArrayBytes, int AlignmentBytes,
213 bool Match = bool((ArrayBytes%AlignmentBytes)==0),
214 bool TryHalf = bool(EIGEN_MIN_ALIGN_BYTES<AlignmentBytes) >
215 struct compute_default_alignment_helper
216 {
217 enum { value = 0 };
218 };
219
220 template<int ArrayBytes, int AlignmentBytes, bool TryHalf>
221 struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, true, TryHalf> // Match
222 {
223 enum { value = AlignmentBytes };
224 };
225
226 template<int ArrayBytes, int AlignmentBytes>
227 struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, false, true> // Try-half
228 {
229 // current packet too large, try with an half-packet
230 enum { value = compute_default_alignment_helper<ArrayBytes, AlignmentBytes/2>::value };
231 };
232 #else
233 // If static alignment is disabled, no need to bother.
234 // This also avoids a division by zero in "bool Match = bool((ArrayBytes%AlignmentBytes)==0)"
235 template<int ArrayBytes, int AlignmentBytes>
236 struct compute_default_alignment_helper
237 {
238 enum { value = 0 };
239 };
240 #endif
241
242 template<typename T, int Size> struct compute_default_alignment {
243 enum { value = compute_default_alignment_helper<Size*sizeof(T),EIGEN_MAX_STATIC_ALIGN_BYTES>::value };
244 };
245
246 template<typename T> struct compute_default_alignment<T,Dynamic> {
247 enum { value = EIGEN_MAX_ALIGN_BYTES };
248 };
249
250 template<typename _Scalar, int _Rows, int _Cols,
251 int _Options = AutoAlign |
252 ( (_Rows==1 && _Cols!=1) ? RowMajor
253 : (_Cols==1 && _Rows!=1) ? ColMajor
254 : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
255 int _MaxRows = _Rows,
256 int _MaxCols = _Cols
257 > class make_proper_matrix_type
258 {
259 enum {
260 IsColVector = _Cols==1 && _Rows!=1,
261 IsRowVector = _Rows==1 && _Cols!=1,
262 Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
263 : IsRowVector ? (_Options | RowMajor) & ~ColMajor
264 : _Options
265 };
266 public:
267 typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
268 };
269
270 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
271 class compute_matrix_flags
272 {
273 enum { row_major_bit = Options&RowMajor ? RowMajorBit : 0 };
274 public:
275 // FIXME currently we still have to handle DirectAccessBit at the expression level to handle DenseCoeffsBase<>
276 // and then propagate this information to the evaluator's flags.
277 // However, I (Gael) think that DirectAccessBit should only matter at the evaluation stage.
278 enum { ret = DirectAccessBit | LvalueBit | NestByRefBit | row_major_bit };
279 };
280
281 template<int _Rows, int _Cols> struct size_at_compile_time
282 {
283 enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
284 };
285
286 template<typename XprType> struct size_of_xpr_at_compile_time
287 {
288 enum { ret = size_at_compile_time<traits<XprType>::RowsAtCompileTime,traits<XprType>::ColsAtCompileTime>::ret };
289 };
290
291 /* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
292 * whereas eval is a const reference in the case of a matrix
293 */
294
295 template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
296 template<typename T, typename BaseClassType, int Flags> struct plain_matrix_type_dense;
297 template<typename T> struct plain_matrix_type<T,Dense>
298 {
299 typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, traits<T>::Flags>::type type;
300 };
301 template<typename T> struct plain_matrix_type<T,DiagonalShape>
302 {
303 typedef typename T::PlainObject type;
304 };
305
306 template<typename T, int Flags> struct plain_matrix_type_dense<T,MatrixXpr,Flags>
307 {
308 typedef Matrix<typename traits<T>::Scalar,
309 traits<T>::RowsAtCompileTime,
310 traits<T>::ColsAtCompileTime,
311 AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
312 traits<T>::MaxRowsAtCompileTime,
313 traits<T>::MaxColsAtCompileTime
314 > type;
315 };
316
317 template<typename T, int Flags> struct plain_matrix_type_dense<T,ArrayXpr,Flags>
318 {
319 typedef Array<typename traits<T>::Scalar,
320 traits<T>::RowsAtCompileTime,
321 traits<T>::ColsAtCompileTime,
322 AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
323 traits<T>::MaxRowsAtCompileTime,
324 traits<T>::MaxColsAtCompileTime
325 > type;
326 };
327
328 /* eval : the return type of eval(). For matrices, this is just a const reference
329 * in order to avoid a useless copy
330 */
331
332 template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
333
334 template<typename T> struct eval<T,Dense>
335 {
336 typedef typename plain_matrix_type<T>::type type;
337 // typedef typename T::PlainObject type;
338 // typedef T::Matrix<typename traits<T>::Scalar,
339 // traits<T>::RowsAtCompileTime,
340 // traits<T>::ColsAtCompileTime,
341 // AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
342 // traits<T>::MaxRowsAtCompileTime,
343 // traits<T>::MaxColsAtCompileTime
344 // > type;
345 };
346
347 template<typename T> struct eval<T,DiagonalShape>
348 {
349 typedef typename plain_matrix_type<T>::type type;
350 };
351
352 // for matrices, no need to evaluate, just use a const reference to avoid a useless copy
353 template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
354 struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
355 {
356 typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
357 };
358
359 template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
360 struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
361 {
362 typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
363 };
364
365
366 /* similar to plain_matrix_type, but using the evaluator's Flags */
367 template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_object_eval;
368
369 template<typename T>
370 struct plain_object_eval<T,Dense>
371 {
372 typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, evaluator<T>::Flags>::type type;
373 };
374
375
376 /* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
377 */
378 template<typename T> struct plain_matrix_type_column_major
379 {
380 enum { Rows = traits<T>::RowsAtCompileTime,
381 Cols = traits<T>::ColsAtCompileTime,
382 MaxRows = traits<T>::MaxRowsAtCompileTime,
383 MaxCols = traits<T>::MaxColsAtCompileTime
384 };
385 typedef Matrix<typename traits<T>::Scalar,
386 Rows,
387 Cols,
388 (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
389 MaxRows,
390 MaxCols
391 > type;
392 };
393
394 /* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
395 */
396 template<typename T> struct plain_matrix_type_row_major
397 {
398 enum { Rows = traits<T>::RowsAtCompileTime,
399 Cols = traits<T>::ColsAtCompileTime,
400 MaxRows = traits<T>::MaxRowsAtCompileTime,
401 MaxCols = traits<T>::MaxColsAtCompileTime
402 };
403 typedef Matrix<typename traits<T>::Scalar,
404 Rows,
405 Cols,
406 (MaxCols==1&&MaxRows!=1) ? ColMajor : RowMajor,
407 MaxRows,
408 MaxCols
409 > type;
410 };
411
412 /** \internal The reference selector for template expressions. The idea is that we don't
413 * need to use references for expressions since they are light weight proxy
414 * objects which should generate no copying overhead. */
415 template <typename T>
416 struct ref_selector
417 {
418 typedef typename conditional<
419 bool(traits<T>::Flags & NestByRefBit),
420 T const&,
421 const T
422 >::type type;
423
424 typedef typename conditional<
425 bool(traits<T>::Flags & NestByRefBit),
426 T &,
427 T
428 >::type non_const_type;
429 };
430
431 /** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */
432 template<typename T1, typename T2>
433 struct transfer_constness
434 {
435 typedef typename conditional<
436 bool(internal::is_const<T1>::value),
437 typename internal::add_const_on_value_type<T2>::type,
438 T2
439 >::type type;
440 };
441
442
443 // However, we still need a mechanism to detect whether an expression which is evaluated multiple time
444 // has to be evaluated into a temporary.
445 // That's the purpose of this new nested_eval helper:
446 /** \internal Determines how a given expression should be nested when evaluated multiple times.
447 * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
448 * evaluated into the bigger product expression. The choice is between nesting the expression b+c as-is, or
449 * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
450 * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
451 * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
452 *
453 * \tparam T the type of the expression being nested.
454 * \tparam n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
455 * \tparam PlainObject the type of the temporary if needed.
456 */
457 template<typename T, int n, typename PlainObject = typename plain_object_eval<T>::type> struct nested_eval
458 {
459 enum {
460 ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
461 CoeffReadCost = evaluator<T>::CoeffReadCost, // NOTE What if an evaluator evaluate itself into a temporary?
462 // Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1.
463 // This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON
464 // for all evaluator creating a temporary. This flag is then propagated by the parent evaluators.
465 // Another solution could be to count the number of temps?
466 NAsInteger = n == Dynamic ? HugeCost : n,
467 CostEval = (NAsInteger+1) * ScalarReadCost + CoeffReadCost,
468 CostNoEval = NAsInteger * CoeffReadCost,
469 Evaluate = (int(evaluator<T>::Flags) & EvalBeforeNestingBit) || (int(CostEval) < int(CostNoEval))
470 };
471
472 typedef typename conditional<Evaluate, PlainObject, typename ref_selector<T>::type>::type type;
473 };
474
475 template<typename T>
476 EIGEN_DEVICE_FUNC
477 inline T* const_cast_ptr(const T* ptr)
478 {
479 return const_cast<T*>(ptr);
480 }
481
482 template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
483 struct dense_xpr_base
484 {
485 /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
486 };
487
488 template<typename Derived>
489 struct dense_xpr_base<Derived, MatrixXpr>
490 {
491 typedef MatrixBase<Derived> type;
492 };
493
494 template<typename Derived>
495 struct dense_xpr_base<Derived, ArrayXpr>
496 {
497 typedef ArrayBase<Derived> type;
498 };
499
500 template<typename Derived, typename XprKind = typename traits<Derived>::XprKind, typename StorageKind = typename traits<Derived>::StorageKind>
501 struct generic_xpr_base;
502
503 template<typename Derived, typename XprKind>
504 struct generic_xpr_base<Derived, XprKind, Dense>
505 {
506 typedef typename dense_xpr_base<Derived,XprKind>::type type;
507 };
508
509 template<typename XprType, typename CastType> struct cast_return_type
510 {
511 typedef typename XprType::Scalar CurrentScalarType;
512 typedef typename remove_all<CastType>::type _CastType;
513 typedef typename _CastType::Scalar NewScalarType;
514 typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
515 const XprType&,CastType>::type type;
516 };
517
518 template <typename A, typename B> struct promote_storage_type;
519
520 template <typename A> struct promote_storage_type<A,A>
521 {
522 typedef A ret;
523 };
524 template <typename A> struct promote_storage_type<A, const A>
525 {
526 typedef A ret;
527 };
528 template <typename A> struct promote_storage_type<const A, A>
529 {
530 typedef A ret;
531 };
532
533 /** \internal Specify the "storage kind" of applying a coefficient-wise
534 * binary operations between two expressions of kinds A and B respectively.
535 * The template parameter Functor permits to specialize the resulting storage kind wrt to
536 * the functor.
537 * The default rules are as follows:
538 * \code
539 * A op A -> A
540 * A op dense -> dense
541 * dense op B -> dense
542 * sparse op dense -> sparse
543 * dense op sparse -> sparse
544 * \endcode
545 */
546 template <typename A, typename B, typename Functor> struct cwise_promote_storage_type;
547
548 template <typename A, typename Functor> struct cwise_promote_storage_type<A,A,Functor> { typedef A ret; };
549 template <typename Functor> struct cwise_promote_storage_type<Dense,Dense,Functor> { typedef Dense ret; };
550 template <typename A, typename Functor> struct cwise_promote_storage_type<A,Dense,Functor> { typedef Dense ret; };
551 template <typename B, typename Functor> struct cwise_promote_storage_type<Dense,B,Functor> { typedef Dense ret; };
552 template <typename Functor> struct cwise_promote_storage_type<Sparse,Dense,Functor> { typedef Sparse ret; };
553 template <typename Functor> struct cwise_promote_storage_type<Dense,Sparse,Functor> { typedef Sparse ret; };
554
555 template <typename LhsKind, typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order {
556 enum { value = LhsOrder };
557 };
558
559 template <typename LhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order<LhsKind,Sparse,LhsOrder,RhsOrder> { enum { value = RhsOrder }; };
560 template <typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order<Sparse,RhsKind,LhsOrder,RhsOrder> { enum { value = LhsOrder }; };
561 template <int Order> struct cwise_promote_storage_order<Sparse,Sparse,Order,Order> { enum { value = Order }; };
562
563
564 /** \internal Specify the "storage kind" of multiplying an expression of kind A with kind B.
565 * The template parameter ProductTag permits to specialize the resulting storage kind wrt to
566 * some compile-time properties of the product: GemmProduct, GemvProduct, OuterProduct, InnerProduct.
567 * The default rules are as follows:
568 * \code
569 * K * K -> K
570 * dense * K -> dense
571 * K * dense -> dense
572 * diag * K -> K
573 * K * diag -> K
574 * Perm * K -> K
575 * K * Perm -> K
576 * \endcode
577 */
578 template <typename A, typename B, int ProductTag> struct product_promote_storage_type;
579
580 template <typename A, int ProductTag> struct product_promote_storage_type<A, A, ProductTag> { typedef A ret;};
581 template <int ProductTag> struct product_promote_storage_type<Dense, Dense, ProductTag> { typedef Dense ret;};
582 template <typename A, int ProductTag> struct product_promote_storage_type<A, Dense, ProductTag> { typedef Dense ret; };
583 template <typename B, int ProductTag> struct product_promote_storage_type<Dense, B, ProductTag> { typedef Dense ret; };
584
585 template <typename A, int ProductTag> struct product_promote_storage_type<A, DiagonalShape, ProductTag> { typedef A ret; };
586 template <typename B, int ProductTag> struct product_promote_storage_type<DiagonalShape, B, ProductTag> { typedef B ret; };
587 template <int ProductTag> struct product_promote_storage_type<Dense, DiagonalShape, ProductTag> { typedef Dense ret; };
588 template <int ProductTag> struct product_promote_storage_type<DiagonalShape, Dense, ProductTag> { typedef Dense ret; };
589
590 template <typename A, int ProductTag> struct product_promote_storage_type<A, PermutationStorage, ProductTag> { typedef A ret; };
591 template <typename B, int ProductTag> struct product_promote_storage_type<PermutationStorage, B, ProductTag> { typedef B ret; };
592 template <int ProductTag> struct product_promote_storage_type<Dense, PermutationStorage, ProductTag> { typedef Dense ret; };
593 template <int ProductTag> struct product_promote_storage_type<PermutationStorage, Dense, ProductTag> { typedef Dense ret; };
594
595 /** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
596 * \tparam Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
597 */
598 template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
599 struct plain_row_type
600 {
601 typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
602 int(ExpressionType::PlainObject::Options) | int(RowMajor), 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
603 typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
604 int(ExpressionType::PlainObject::Options) | int(RowMajor), 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
605
606 typedef typename conditional<
607 is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
608 MatrixRowType,
609 ArrayRowType
610 >::type type;
611 };
612
613 template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
614 struct plain_col_type
615 {
616 typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
617 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
618 typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
619 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
620
621 typedef typename conditional<
622 is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
623 MatrixColType,
624 ArrayColType
625 >::type type;
626 };
627
628 template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
629 struct plain_diag_type
630 {
631 enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
632 max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
633 };
634 typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
635 typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
636
637 typedef typename conditional<
638 is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
639 MatrixDiagType,
640 ArrayDiagType
641 >::type type;
642 };
643
644 template<typename Expr,typename Scalar = typename Expr::Scalar>
645 struct plain_constant_type
646 {
647 enum { Options = (traits<Expr>::Flags&RowMajorBit)?RowMajor:0 };
648
649 typedef Array<Scalar, traits<Expr>::RowsAtCompileTime, traits<Expr>::ColsAtCompileTime,
650 Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> array_type;
651
652 typedef Matrix<Scalar, traits<Expr>::RowsAtCompileTime, traits<Expr>::ColsAtCompileTime,
653 Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> matrix_type;
654
655 typedef CwiseNullaryOp<scalar_constant_op<Scalar>, const typename conditional<is_same< typename traits<Expr>::XprKind, MatrixXpr >::value, matrix_type, array_type>::type > type;
656 };
657
658 template<typename ExpressionType>
659 struct is_lvalue
660 {
661 enum { value = (!bool(is_const<ExpressionType>::value)) &&
662 bool(traits<ExpressionType>::Flags & LvalueBit) };
663 };
664
665 template<typename T> struct is_diagonal
666 { enum { ret = false }; };
667
668 template<typename T> struct is_diagonal<DiagonalBase<T> >
669 { enum { ret = true }; };
670
671 template<typename T> struct is_diagonal<DiagonalWrapper<T> >
672 { enum { ret = true }; };
673
674 template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
675 { enum { ret = true }; };
676
677
678 template<typename T> struct is_identity
679 { enum { value = false }; };
680
681 template<typename T> struct is_identity<CwiseNullaryOp<internal::scalar_identity_op<typename T::Scalar>, T> >
682 { enum { value = true }; };
683
684
685 template<typename S1, typename S2> struct glue_shapes;
686 template<> struct glue_shapes<DenseShape,TriangularShape> { typedef TriangularShape type; };
687
688 template<typename T1, typename T2>
689 struct possibly_same_dense {
690 enum { value = has_direct_access<T1>::ret && has_direct_access<T2>::ret && is_same<typename T1::Scalar,typename T2::Scalar>::value };
691 };
692
693 template<typename T1, typename T2>
694 EIGEN_DEVICE_FUNC
695 bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<possibly_same_dense<T1,T2>::value>::type * = 0)
696 {
697 return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());
698 }
699
700 template<typename T1, typename T2>
701 EIGEN_DEVICE_FUNC
702 bool is_same_dense(const T1 &, const T2 &, typename enable_if<!possibly_same_dense<T1,T2>::value>::type * = 0)
703 {
704 return false;
705 }
706
707 // Internal helper defining the cost of a scalar division for the type T.
708 // The default heuristic can be specialized for each scalar type and architecture.
709 template<typename T,bool Vectorized=false,typename EnableIf = void>
710 struct scalar_div_cost {
711 enum { value = 8*NumTraits<T>::MulCost };
712 };
713
714 template<typename T,bool Vectorized>
715 struct scalar_div_cost<std::complex<T>, Vectorized> {
716 enum { value = 2*scalar_div_cost<T>::value
717 + 6*NumTraits<T>::MulCost
718 + 3*NumTraits<T>::AddCost
719 };
720 };
721
722
723 template<bool Vectorized>
724 struct scalar_div_cost<signed long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 24 }; };
725 template<bool Vectorized>
726 struct scalar_div_cost<unsigned long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 21 }; };
727
728
729 #ifdef EIGEN_DEBUG_ASSIGN
730 std::string demangle_traversal(int t)
731 {
732 if(t==DefaultTraversal) return "DefaultTraversal";
733 if(t==LinearTraversal) return "LinearTraversal";
734 if(t==InnerVectorizedTraversal) return "InnerVectorizedTraversal";
735 if(t==LinearVectorizedTraversal) return "LinearVectorizedTraversal";
736 if(t==SliceVectorizedTraversal) return "SliceVectorizedTraversal";
737 return "?";
738 }
739 std::string demangle_unrolling(int t)
740 {
741 if(t==NoUnrolling) return "NoUnrolling";
742 if(t==InnerUnrolling) return "InnerUnrolling";
743 if(t==CompleteUnrolling) return "CompleteUnrolling";
744 return "?";
745 }
746 std::string demangle_flags(int f)
747 {
748 std::string res;
749 if(f&RowMajorBit) res += " | RowMajor";
750 if(f&PacketAccessBit) res += " | Packet";
751 if(f&LinearAccessBit) res += " | Linear";
752 if(f&LvalueBit) res += " | Lvalue";
753 if(f&DirectAccessBit) res += " | Direct";
754 if(f&NestByRefBit) res += " | NestByRef";
755 if(f&NoPreferredStorageOrderBit) res += " | NoPreferredStorageOrderBit";
756
757 return res;
758 }
759 #endif
760
761 } // end namespace internal
762
763
764 /** \class ScalarBinaryOpTraits
765 * \ingroup Core_Module
766 *
767 * \brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is.
768 *
769 * This class permits to control the scalar return type of any binary operation performed on two different scalar types through (partial) template specializations.
770 *
771 * For instance, let \c U1, \c U2 and \c U3 be three user defined scalar types for which most operations between instances of \c U1 and \c U2 returns an \c U3.
772 * You can let %Eigen knows that by defining:
773 \code
774 template<typename BinaryOp>
775 struct ScalarBinaryOpTraits<U1,U2,BinaryOp> { typedef U3 ReturnType; };
776 template<typename BinaryOp>
777 struct ScalarBinaryOpTraits<U2,U1,BinaryOp> { typedef U3 ReturnType; };
778 \endcode
779 * You can then explicitly disable some particular operations to get more explicit error messages:
780 \code
781 template<>
782 struct ScalarBinaryOpTraits<U1,U2,internal::scalar_max_op<U1,U2> > {};
783 \endcode
784 * Or customize the return type for individual operation:
785 \code
786 template<>
787 struct ScalarBinaryOpTraits<U1,U2,internal::scalar_sum_op<U1,U2> > { typedef U1 ReturnType; };
788 \endcode
789 *
790 * By default, the following generic combinations are supported:
791 <table class="manual">
792 <tr><th>ScalarA</th><th>ScalarB</th><th>BinaryOp</th><th>ReturnType</th><th>Note</th></tr>
793 <tr ><td>\c T </td><td>\c T </td><td>\c * </td><td>\c T </td><td></td></tr>
794 <tr class="alt"><td>\c NumTraits<T>::Real </td><td>\c T </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
795 <tr ><td>\c T </td><td>\c NumTraits<T>::Real </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
796 </table>
797 *
798 * \sa CwiseBinaryOp
799 */
800 template<typename ScalarA, typename ScalarB, typename BinaryOp=internal::scalar_product_op<ScalarA,ScalarB> >
801 struct ScalarBinaryOpTraits
802 #ifndef EIGEN_PARSED_BY_DOXYGEN
803 // for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class.
804 : internal::scalar_product_traits<ScalarA,ScalarB>
805 #endif // EIGEN_PARSED_BY_DOXYGEN
806 {};
807
808 template<typename T, typename BinaryOp>
809 struct ScalarBinaryOpTraits<T,T,BinaryOp>
810 {
811 typedef T ReturnType;
812 };
813
814 template <typename T, typename BinaryOp>
815 struct ScalarBinaryOpTraits<T, typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, BinaryOp>
816 {
817 typedef T ReturnType;
818 };
819 template <typename T, typename BinaryOp>
820 struct ScalarBinaryOpTraits<typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, T, BinaryOp>
821 {
822 typedef T ReturnType;
823 };
824
825 // For Matrix * Permutation
826 template<typename T, typename BinaryOp>
827 struct ScalarBinaryOpTraits<T,void,BinaryOp>
828 {
829 typedef T ReturnType;
830 };
831
832 // For Permutation * Matrix
833 template<typename T, typename BinaryOp>
834 struct ScalarBinaryOpTraits<void,T,BinaryOp>
835 {
836 typedef T ReturnType;
837 };
838
839 // for Permutation*Permutation
840 template<typename BinaryOp>
841 struct ScalarBinaryOpTraits<void,void,BinaryOp>
842 {
843 typedef void ReturnType;
844 };
845
846 // We require Lhs and Rhs to have "compatible" scalar types.
847 // It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
848 // So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
849 // add together a float matrix and a double matrix.
850 #define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
851 EIGEN_STATIC_ASSERT((Eigen::internal::has_ReturnType<ScalarBinaryOpTraits<LHS, RHS,BINOP> >::value), \
852 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
853
854 } // end namespace Eigen
855
856 #endif // EIGEN_XPRHELPER_H
857