Tensor.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 // Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_H
13 
14 namespace Eigen {
15 
62 template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
63 class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
64 {
65  public:
68  typedef typename Eigen::internal::nested<Self>::type Nested;
69  typedef typename internal::traits<Self>::StorageKind StorageKind;
70  typedef typename internal::traits<Self>::Index Index;
71  typedef Scalar_ Scalar;
72  typedef typename internal::packet_traits<Scalar>::type Packet;
73  typedef typename NumTraits<Scalar>::Real RealScalar;
74  typedef typename Base::CoeffReturnType CoeffReturnType;
75  typedef typename Base::PacketReturnType PacketReturnType;
76 
77  enum {
78  IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign),
79  PacketAccess = (internal::packet_traits<Scalar>::size > 1),
80  Layout = Options_ & RowMajor ? RowMajor : ColMajor,
81  CoordAccess = true
82  };
83 
84  static const int Options = Options_;
85  static const int NumIndices = NumIndices_;
86  typedef DSizes<Index, NumIndices_> Dimensions;
87 
88  protected:
89  TensorStorage<Scalar, Dimensions, Options> m_storage;
90 
91 #ifdef EIGEN_HAS_SFINAE
92  template<typename CustomIndices>
93  struct isOfNormalIndex{
94  static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>::value;
95  static const bool is_int = NumTraits<CustomIndices>::IsInteger;
96  static const bool value = is_array | is_int;
97  };
98 #endif
99 
100  public:
101  // Metadata
102  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
103  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
104  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
105  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
106  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
107  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
108 
109  // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
110  // work, because that uses base().coeffRef() - and we don't yet
111  // implement a similar class hierarchy
112  inline Self& base() { return *this; }
113  inline const Self& base() const { return *this; }
114 
115 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
116  template<typename... IndexTypes>
117  EIGEN_DEVICE_FUNC inline const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
118  {
119  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
120  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
121  return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
122  }
123 #endif
124 
125  // normal indices
126  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
127  {
128  eigen_internal_assert(checkIndexRange(indices));
129  return m_storage.data()[linearizedIndex(indices)];
130  }
131 
132  // custom indices
133 #ifdef EIGEN_HAS_SFINAE
134  template<typename CustomIndices,
135  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
136  >
137  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const
138  {
139  return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
140  }
141 #endif
142 
143  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const
144  {
145  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
146  return m_storage.data()[0];
147  }
148 
149  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
150  {
151  eigen_internal_assert(index >= 0 && index < size());
152  return m_storage.data()[index];
153  }
154 
155 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
156  template<typename... IndexTypes>
157  inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
158  {
159  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
160  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
161  return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
162  }
163 #endif
164 
165  // normal indices
166  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
167  {
168  eigen_internal_assert(checkIndexRange(indices));
169  return m_storage.data()[linearizedIndex(indices)];
170  }
171 
172  // custom indices
173 #ifdef EIGEN_HAS_SFINAE
174  template<typename CustomIndices,
175  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
176  >
177  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices)
178  {
179  return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
180  }
181 #endif
182 
183  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef()
184  {
185  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
186  return m_storage.data()[0];
187  }
188 
189  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
190  {
191  eigen_internal_assert(index >= 0 && index < size());
192  return m_storage.data()[index];
193  }
194 
195 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
196  template<typename... IndexTypes>
197  inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
198  {
199  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
200  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
201  return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
202  }
203 #else
204  EIGEN_DEVICE_FUNC
205  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
206  {
207  return coeff(array<Index, 2>(i0, i1));
208  }
209  EIGEN_DEVICE_FUNC
210  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
211  {
212  return coeff(array<Index, 3>(i0, i1, i2));
213  }
214  EIGEN_DEVICE_FUNC
215  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
216  {
217  return coeff(array<Index, 4>(i0, i1, i2, i3));
218  }
219  EIGEN_DEVICE_FUNC
220  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
221  {
222  return coeff(array<Index, 5>(i0, i1, i2, i3, i4));
223  }
224 #endif
225 
226  // custom indices
227 #ifdef EIGEN_HAS_SFINAE
228  template<typename CustomIndices,
229  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
230  >
231  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const
232  {
233  return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
234  }
235 #endif
236 
237  // normal indices
238  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
239  {
240  return coeff(indices);
241  }
242 
243  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
244  {
245  eigen_internal_assert(index >= 0 && index < size());
246  return coeff(index);
247  }
248 
249  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const
250  {
251  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
252  return coeff();
253  }
254 
255  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
256  {
257  // The bracket operator is only for vectors, use the parenthesis operator instead.
258  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
259  return coeff(index);
260  }
261 
262 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
263  template<typename... IndexTypes>
264  inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
265  {
266  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
267  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
268  return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
269  }
270 #else
271  EIGEN_DEVICE_FUNC
272  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
273  {
274  return coeffRef(array<Index, 2>(i0, i1));
275  }
276  EIGEN_DEVICE_FUNC
277  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
278  {
279  return coeffRef(array<Index, 3>(i0, i1, i2));
280  }
281  EIGEN_DEVICE_FUNC
282  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
283  {
284  return coeffRef(array<Index, 4>(i0, i1, i2, i3));
285  }
286  EIGEN_DEVICE_FUNC
287  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
288  {
289  return coeffRef(array<Index, 5>(i0, i1, i2, i3, i4));
290  }
291 #endif
292 
293  // normal indices
294  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
295  {
296  return coeffRef(indices);
297  }
298 
299  // custom indices
300 #ifdef EIGEN_HAS_SFINAE
301  template<typename CustomIndices,
302  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
303  >
304  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices)
305  {
306  return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
307  }
308 #endif
309 
310  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index)
311  {
312  eigen_assert(index >= 0 && index < size());
313  return coeffRef(index);
314  }
315 
316  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()()
317  {
318  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
319  return coeffRef();
320  }
321 
322  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index)
323  {
324  // The bracket operator is only for vectors, use the parenthesis operator instead
325  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
326  return coeffRef(index);
327  }
328 
329  EIGEN_DEVICE_FUNC
330  EIGEN_STRONG_INLINE Tensor()
331  : m_storage()
332  {
333  }
334 
335  EIGEN_DEVICE_FUNC
336  EIGEN_STRONG_INLINE Tensor(const Self& other)
337  : m_storage(other.m_storage)
338  {
339  }
340 
341 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
342  template<typename... IndexTypes>
343  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions)
344  : m_storage(internal::array_prod(array<Index, NumIndices>{{firstDimension, otherDimensions...}}), array<Index, NumIndices>{{firstDimension, otherDimensions...}})
345  {
346  // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
347  EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
348  }
349 #else
350  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1)
351  : m_storage(dim1, array<Index, 1>(dim1))
352  {
353  EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
354  }
355  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1, Index dim2)
356  : m_storage(dim1*dim2, array<Index, 2>(dim1, dim2))
357  {
358  EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
359  }
360  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1, Index dim2, Index dim3)
361  : m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3))
362  {
363  EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
364  }
365  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1, Index dim2, Index dim3, Index dim4)
366  : m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4))
367  {
368  EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
369  }
370  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5)
371  : m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5))
372  {
373  EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
374  }
375 #endif
376 
378  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions)
379  : m_storage(internal::array_prod(dimensions), dimensions)
380  {
381  EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
382  }
383 
384  template<typename OtherDerived>
385  EIGEN_DEVICE_FUNC
386  EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
387  {
388  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
389  Assign assign(*this, other.derived());
390  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
391  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
392  }
393  template<typename OtherDerived>
394  EIGEN_DEVICE_FUNC
395  EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, WriteAccessors>& other)
396  {
397  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
398  Assign assign(*this, other.derived());
399  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
400  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
401  }
402 
403  EIGEN_DEVICE_FUNC
404  EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other)
405  {
406  typedef TensorAssignOp<Tensor, const Tensor> Assign;
407  Assign assign(*this, other);
408  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
409  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
410  return *this;
411  }
412  template<typename OtherDerived>
413  EIGEN_DEVICE_FUNC
414  EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other)
415  {
416  typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
417  Assign assign(*this, other);
418  resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
419  internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
420  return *this;
421  }
422 
423 #ifdef EIGEN_HAS_VARIADIC_TEMPLATES
424  template<typename... IndexTypes> EIGEN_DEVICE_FUNC
425  void resize(Index firstDimension, IndexTypes... otherDimensions)
426  {
427  // The number of dimensions used to resize a tensor must be equal to the rank of the tensor.
428  EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
429  resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
430  }
431 #endif
432 
434  EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions)
435  {
436  int i;
437  Index size = Index(1);
438  for (i = 0; i < NumIndices; i++) {
439  internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]);
440  size *= dimensions[i];
441  }
442  #ifdef EIGEN_INITIALIZE_COEFFS
443  bool size_changed = size != this->size();
444  m_storage.resize(size, dimensions);
445  if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
446  #else
447  m_storage.resize(size, dimensions);
448  #endif
449  }
450 
451  // Why this overload, DSizes is derived from array ??? //
452  EIGEN_DEVICE_FUNC void resize(const DSizes<Index, NumIndices>& dimensions) {
453  array<Index, NumIndices> dims;
454  for (int i = 0; i < NumIndices; ++i) {
455  dims[i] = dimensions[i];
456  }
457  resize(dims);
458  }
459 
460  EIGEN_DEVICE_FUNC
461  void resize()
462  {
463  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
464  // Nothing to do: rank 0 tensors have fixed size
465  }
466 
468 #ifdef EIGEN_HAS_SFINAE
469  template<typename CustomDimension,
470  EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomDimension>::value) )
471  >
472  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions)
473  {
474  resize(internal::customIndices2Array<Index,NumIndices>(dimensions));
475  }
476 #endif
477 
478 #ifndef EIGEN_EMULATE_CXX11_META_H
479  template <typename std::ptrdiff_t... Indices>
480  EIGEN_DEVICE_FUNC
481  void resize(const Sizes<Indices...>& dimensions) {
482  array<Index, NumIndices> dims;
483  for (int i = 0; i < NumIndices; ++i) {
484  dims[i] = static_cast<Index>(dimensions[i]);
485  }
486  resize(dims);
487  }
488 #else
489  template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
490  EIGEN_DEVICE_FUNC
491  void resize(const Sizes<V1, V2, V3, V4, V5>& dimensions) {
492  array<Index, NumIndices> dims;
493  for (int i = 0; i < NumIndices; ++i) {
494  dims[i] = static_cast<Index>(dimensions[i]);
495  }
496  resize(dims);
497  }
498 #endif
499 
500  protected:
501 
502  bool checkIndexRange(const array<Index, NumIndices>& indices) const
503  {
504  using internal::array_apply_and_reduce;
505  using internal::array_zip_and_reduce;
506  using internal::greater_equal_zero_op;
507  using internal::logical_and_op;
508  using internal::lesser_op;
509 
510  return
511  // check whether the indices are all >= 0
512  array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
513  // check whether the indices fit in the dimensions
514  array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
515  }
516 
517  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
518  {
519  if (Options&RowMajor) {
520  return m_storage.dimensions().IndexOfRowMajor(indices);
521  } else {
522  return m_storage.dimensions().IndexOfColMajor(indices);
523  }
524  }
525 };
526 
527 } // end namespace Eigen
528 
529 #endif // EIGEN_CXX11_TENSOR_TENSOR_H
void resize(const array< Index, NumIndices > &dimensions)
Definition: Tensor.h:434
Namespace containing all symbols from the Eigen library.
Definition: CXX11Meta.h:13
The tensor evaluator classes.
Definition: TensorEvaluator.h:28
Tensor(const array< Index, NumIndices > &dimensions)
Definition: Tensor.h:378
The tensor base class.
Definition: TensorForwardDeclarations.h:19
void resize(const Sizes< Indices... > &dimensions)
Definition: Tensor.h:481
The tensor class.
Definition: Tensor.h:63