xref: /aosp_15_r20/external/eigen/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h (revision bf2c37156dfe67e5dfebd6d394bad8b2ab5804d4)
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2017 Gagan Goel <[email protected]>
5 // Copyright (C) 2017 Benoit Steiner <[email protected]>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
13 
14 namespace Eigen {
15 
16 /** \class TensorTrace
17   * \ingroup CXX11_Tensor_Module
18   *
19   * \brief Tensor Trace class.
20   *
21   *
22   */
23 
24 namespace internal {
25 template<typename Dims, typename XprType>
26 struct traits<TensorTraceOp<Dims, XprType> > : public traits<XprType>
27 {
28   typedef typename XprType::Scalar Scalar;
29   typedef traits<XprType> XprTraits;
30   typedef typename XprTraits::StorageKind StorageKind;
31   typedef typename XprTraits::Index Index;
32   typedef typename XprType::Nested Nested;
33   typedef typename remove_reference<Nested>::type _Nested;
34   static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
35   static const int Layout = XprTraits::Layout;
36 };
37 
38 template<typename Dims, typename XprType>
39 struct eval<TensorTraceOp<Dims, XprType>, Eigen::Dense>
40 {
41   typedef const TensorTraceOp<Dims, XprType>& type;
42 };
43 
44 template<typename Dims, typename XprType>
45 struct nested<TensorTraceOp<Dims, XprType>, 1, typename eval<TensorTraceOp<Dims, XprType> >::type>
46 {
47   typedef TensorTraceOp<Dims, XprType> type;
48 };
49 
50 } // end namespace internal
51 
52 
53 template<typename Dims, typename XprType>
54 class TensorTraceOp : public TensorBase<TensorTraceOp<Dims, XprType> >
55 {
56   public:
57     typedef typename Eigen::internal::traits<TensorTraceOp>::Scalar Scalar;
58     typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
59     typedef typename XprType::CoeffReturnType CoeffReturnType;
60     typedef typename Eigen::internal::nested<TensorTraceOp>::type Nested;
61     typedef typename Eigen::internal::traits<TensorTraceOp>::StorageKind StorageKind;
62     typedef typename Eigen::internal::traits<TensorTraceOp>::Index Index;
63 
64     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTraceOp(const XprType& expr, const Dims& dims)
65       : m_xpr(expr), m_dims(dims) {
66     }
67 
68     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
69     const Dims& dims() const { return m_dims; }
70 
71     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
72     const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; }
73 
74   protected:
75     typename XprType::Nested m_xpr;
76     const Dims m_dims;
77 };
78 
79 
80 // Eval as rvalue
81 template<typename Dims, typename ArgType, typename Device>
82 struct TensorEvaluator<const TensorTraceOp<Dims, ArgType>, Device>
83 {
84   typedef TensorTraceOp<Dims, ArgType> XprType;
85   static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
86   static const int NumReducedDims = internal::array_size<Dims>::value;
87   static const int NumOutputDims = NumInputDims - NumReducedDims;
88   typedef typename XprType::Index Index;
89   typedef DSizes<Index, NumOutputDims> Dimensions;
90   typedef typename XprType::Scalar Scalar;
91   typedef typename XprType::CoeffReturnType CoeffReturnType;
92   typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
93   static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
94   typedef StorageMemory<CoeffReturnType, Device> Storage;
95   typedef typename Storage::Type EvaluatorPointerType;
96 
97   enum {
98     IsAligned = false,
99     PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
100     BlockAccess = false,
101     PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
102     Layout = TensorEvaluator<ArgType, Device>::Layout,
103     CoordAccess = false,
104     RawAccess = false
105   };
106 
107   //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
108   typedef internal::TensorBlockNotImplemented TensorBlock;
109   //===--------------------------------------------------------------------===//
110 
111   EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
112     : m_impl(op.expression(), device), m_traceDim(1), m_device(device)
113   {
114 
115     EIGEN_STATIC_ASSERT((NumOutputDims >= 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
116     EIGEN_STATIC_ASSERT((NumReducedDims >= 2) || ((NumReducedDims == 0) && (NumInputDims == 0)), YOU_MADE_A_PROGRAMMING_MISTAKE);
117 
118     for (int i = 0; i < NumInputDims; ++i) {
119       m_reduced[i] = false;
120     }
121 
122     const Dims& op_dims = op.dims();
123     for (int i = 0; i < NumReducedDims; ++i) {
124       eigen_assert(op_dims[i] >= 0);
125       eigen_assert(op_dims[i] < NumInputDims);
126       m_reduced[op_dims[i]] = true;
127     }
128 
129     // All the dimensions should be distinct to compute the trace
130     int num_distinct_reduce_dims = 0;
131     for (int i = 0; i < NumInputDims; ++i) {
132       if (m_reduced[i]) {
133         ++num_distinct_reduce_dims;
134       }
135     }
136 
137     eigen_assert(num_distinct_reduce_dims == NumReducedDims);
138 
139     // Compute the dimensions of the result.
140     const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
141 
142     int output_index = 0;
143     int reduced_index = 0;
144     for (int i = 0; i < NumInputDims; ++i) {
145       if (m_reduced[i]) {
146         m_reducedDims[reduced_index] = input_dims[i];
147         if (reduced_index > 0) {
148           // All the trace dimensions must have the same size
149           eigen_assert(m_reducedDims[0] == m_reducedDims[reduced_index]);
150         }
151         ++reduced_index;
152       }
153       else {
154         m_dimensions[output_index] = input_dims[i];
155         ++output_index;
156       }
157     }
158 
159     if (NumReducedDims != 0) {
160       m_traceDim = m_reducedDims[0];
161     }
162 
163     // Compute the output strides
164     if (NumOutputDims > 0) {
165       if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
166         m_outputStrides[0] = 1;
167         for (int i = 1; i < NumOutputDims; ++i) {
168           m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
169         }
170       }
171       else {
172         m_outputStrides.back() = 1;
173         for (int i = NumOutputDims - 2; i >= 0; --i) {
174           m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
175         }
176       }
177     }
178 
179     // Compute the input strides
180     if (NumInputDims > 0) {
181       array<Index, NumInputDims> input_strides;
182       if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
183         input_strides[0] = 1;
184         for (int i = 1; i < NumInputDims; ++i) {
185           input_strides[i] = input_strides[i - 1] * input_dims[i - 1];
186         }
187       }
188       else {
189         input_strides.back() = 1;
190         for (int i = NumInputDims - 2; i >= 0; --i) {
191           input_strides[i] = input_strides[i + 1] * input_dims[i + 1];
192         }
193       }
194 
195       output_index = 0;
196       reduced_index = 0;
197       for (int i = 0; i < NumInputDims; ++i) {
198         if(m_reduced[i]) {
199           m_reducedStrides[reduced_index] = input_strides[i];
200           ++reduced_index;
201         }
202         else {
203           m_preservedStrides[output_index] = input_strides[i];
204           ++output_index;
205         }
206       }
207     }
208   }
209 
210   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
211     return m_dimensions;
212   }
213 
214   EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
215     m_impl.evalSubExprsIfNeeded(NULL);
216     return true;
217   }
218 
219   EIGEN_STRONG_INLINE void cleanup() {
220     m_impl.cleanup();
221   }
222 
223   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
224   {
225     // Initialize the result
226     CoeffReturnType result = internal::cast<int, CoeffReturnType>(0);
227     Index index_stride = 0;
228     for (int i = 0; i < NumReducedDims; ++i) {
229       index_stride += m_reducedStrides[i];
230     }
231 
232     // If trace is requested along all dimensions, starting index would be 0
233     Index cur_index = 0;
234     if (NumOutputDims != 0)
235       cur_index = firstInput(index);
236     for (Index i = 0; i < m_traceDim; ++i) {
237         result += m_impl.coeff(cur_index);
238         cur_index += index_stride;
239     }
240 
241     return result;
242   }
243 
244   template<int LoadMode>
245   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const {
246 
247     EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
248     eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
249 
250     EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
251     for (int i = 0; i < PacketSize; ++i) {
252         values[i] = coeff(index + i);
253     }
254     PacketReturnType result = internal::ploadt<PacketReturnType, LoadMode>(values);
255     return result;
256   }
257 
258 #ifdef EIGEN_USE_SYCL
259   // binding placeholder accessors to a command group handler for SYCL
260   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
261     m_impl.bind(cgh);
262   }
263 #endif
264 
265  protected:
266   // Given the output index, finds the first index in the input tensor used to compute the trace
267   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
268     Index startInput = 0;
269     if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
270       for (int i = NumOutputDims - 1; i > 0; --i) {
271         const Index idx = index / m_outputStrides[i];
272         startInput += idx * m_preservedStrides[i];
273         index -= idx * m_outputStrides[i];
274       }
275       startInput += index * m_preservedStrides[0];
276     }
277     else {
278       for (int i = 0; i < NumOutputDims - 1; ++i) {
279         const Index idx = index / m_outputStrides[i];
280         startInput += idx * m_preservedStrides[i];
281         index -= idx * m_outputStrides[i];
282       }
283       startInput += index * m_preservedStrides[NumOutputDims - 1];
284     }
285     return startInput;
286   }
287 
288   Dimensions m_dimensions;
289   TensorEvaluator<ArgType, Device> m_impl;
290   // Initialize the size of the trace dimension
291   Index m_traceDim;
292   const Device EIGEN_DEVICE_REF m_device;
293   array<bool, NumInputDims> m_reduced;
294   array<Index, NumReducedDims> m_reducedDims;
295   array<Index, NumOutputDims> m_outputStrides;
296   array<Index, NumReducedDims> m_reducedStrides;
297   array<Index, NumOutputDims> m_preservedStrides;
298 };
299 
300 
301 } // End namespace Eigen
302 
303 #endif // EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
304