// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2013 Christian Seiler // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H #define EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H namespace Eigen { enum { NegationFlag = 0x01, ConjugationFlag = 0x02 }; enum { GlobalRealFlag = 0x01, GlobalImagFlag = 0x02, GlobalZeroFlag = 0x03 }; namespace internal { template struct tensor_symmetry_pre_analysis; template struct tensor_static_symgroup; template struct tensor_static_symgroup_if; template struct tensor_symmetry_calculate_flags; template struct tensor_symmetry_assign_value; template struct tensor_symmetry_num_indices; } // end namespace internal template struct Symmetry { static_assert(One_ != Two_, "Symmetries must cover distinct indices."); constexpr static int One = One_; constexpr static int Two = Two_; constexpr static int Flags = 0; }; template struct AntiSymmetry { static_assert(One_ != Two_, "Symmetries must cover distinct indices."); constexpr static int One = One_; constexpr static int Two = Two_; constexpr static int Flags = NegationFlag; }; template struct Hermiticity { static_assert(One_ != Two_, "Symmetries must cover distinct indices."); constexpr static int One = One_; constexpr static int Two = Two_; constexpr static int Flags = ConjugationFlag; }; template struct AntiHermiticity { static_assert(One_ != Two_, "Symmetries must cover distinct indices."); constexpr static int One = One_; constexpr static int Two = Two_; constexpr static int Flags = ConjugationFlag | NegationFlag; }; /** \class DynamicSGroup * \ingroup TensorSymmetry_Module * * \brief Dynamic symmetry group * * The %DynamicSGroup class represents a symmetry group that need not be known at * compile time. It is useful if one wants to support arbitrary run-time defineable * symmetries for tensors, but it is also instantiated if a symmetry group is defined * at compile time that would be either too large for the compiler to reasonably * generate (using templates to calculate this at compile time is very inefficient) * or that the compiler could generate the group but that it wouldn't make sense to * unroll the loop for setting coefficients anymore. */ class DynamicSGroup; /** \internal * * \class DynamicSGroupFromTemplateArgs * \ingroup TensorSymmetry_Module * * \brief Dynamic symmetry group, initialized from template arguments * * This class is a child class of DynamicSGroup. It uses the template arguments * specified to initialize itself. */ template class DynamicSGroupFromTemplateArgs; /** \class StaticSGroup * \ingroup TensorSymmetry_Module * * \brief Static symmetry group * * This class represents a symmetry group that is known and resolved completely * at compile time. Ideally, no run-time penalty is incurred compared to the * manual unrolling of the symmetry. * * CAUTION: * * Do not use this class directly for large symmetry groups. The compiler * may run into a limit, or segfault or in the very least will take a very, * very, very long time to compile the code. Use the SGroup class instead * if you want a static group. That class contains logic that will * automatically select the DynamicSGroup class instead if the symmetry * group becomes too large. (In that case, unrolling may not even be * beneficial.) */ template class StaticSGroup; /** \class SGroup * \ingroup TensorSymmetry_Module * * \brief Symmetry group, initialized from template arguments * * This class represents a symmetry group whose generators are already * known at compile time. It may or may not be resolved at compile time, * depending on the estimated size of the group. * * \sa StaticSGroup * \sa DynamicSGroup */ template class SGroup : public internal::tensor_symmetry_pre_analysis::value, Gen...>::root_type { public: constexpr static std::size_t NumIndices = internal::tensor_symmetry_num_indices::value; typedef typename internal::tensor_symmetry_pre_analysis::root_type Base; // make standard constructors + assignment operators public inline SGroup() : Base() { } inline SGroup(const SGroup& other) : Base(other) { } inline SGroup(SGroup&& other) : Base(other) { } inline SGroup& operator=(const SGroup& other) { Base::operator=(other); return *this; } inline SGroup& operator=(SGroup&& other) { Base::operator=(other); return *this; } // all else is defined in the base class }; namespace internal { template struct tensor_symmetry_num_indices { constexpr static std::size_t value = 1; }; template struct tensor_symmetry_num_indices, Sym...> { private: constexpr static std::size_t One = static_cast(One_); constexpr static std::size_t Two = static_cast(Two_); constexpr static std::size_t Three = tensor_symmetry_num_indices::value; // don't use std::max, since it's not constexpr until C++14... constexpr static std::size_t maxOneTwoPlusOne = ((One > Two) ? One : Two) + 1; public: constexpr static std::size_t value = (maxOneTwoPlusOne > Three) ? maxOneTwoPlusOne : Three; }; template struct tensor_symmetry_num_indices, Sym...> : public tensor_symmetry_num_indices, Sym...> {}; template struct tensor_symmetry_num_indices, Sym...> : public tensor_symmetry_num_indices, Sym...> {}; template struct tensor_symmetry_num_indices, Sym...> : public tensor_symmetry_num_indices, Sym...> {}; /** \internal * * \class tensor_symmetry_pre_analysis * \ingroup TensorSymmetry_Module * * \brief Pre-select whether to use a static or dynamic symmetry group * * When a symmetry group could in principle be determined at compile time, * this template implements the logic whether to actually do that or whether * to rather defer that to runtime. * * The logic is as follows: *
*
No generators (trivial symmetry):
*
Use a trivial static group. Ideally, this has no performance impact * compared to not using symmetry at all. In practice, this might not * be the case.
*
More than 4 generators:
*
Calculate the group at run time, it is likely far too large for the * compiler to be able to properly generate it in a realistic time.
*
Up to and including 4 generators:
*
Actually enumerate all group elements, but then check how many there * are. If there are more than 16, it is unlikely that unrolling the * loop (as is done in the static compile-time case) is sensible, so * use a dynamic group instead. If there are at most 16 elements, actually * use that static group. Note that the largest group with 4 generators * still compiles with reasonable resources.
*
* * Note: Example compile time performance with g++-4.6 on an Intenl Core i5-3470 * with 16 GiB RAM (all generators non-redundant and the subgroups don't * factorize): * * # Generators -O0 -ggdb -O2 * ------------------------------------------------------------------- * 1 0.5 s / 250 MiB 0.45s / 230 MiB * 2 0.5 s / 260 MiB 0.5 s / 250 MiB * 3 0.65s / 310 MiB 0.62s / 310 MiB * 4 2.2 s / 860 MiB 1.7 s / 770 MiB * 5 130 s / 13000 MiB 120 s / 11000 MiB * * It is clear that everything is still very efficient up to 4 generators, then * the memory and CPU requirements become unreasonable. Thus we only instantiate * the template group theory logic if the number of generators supplied is 4 or * lower, otherwise this will be forced to be done during runtime, where the * algorithm is reasonably fast. */ template struct tensor_symmetry_pre_analysis { typedef StaticSGroup<> root_type; }; template struct tensor_symmetry_pre_analysis { constexpr static std::size_t max_static_generators = 4; constexpr static std::size_t max_static_elements = 16; typedef tensor_static_symgroup_if<(sizeof...(Gens_) + 1 <= max_static_generators), NumIndices, Gen_, Gens_...> helper; constexpr static std::size_t possible_size = helper::size; typedef typename conditional< possible_size == 0 || possible_size >= max_static_elements, DynamicSGroupFromTemplateArgs, typename helper::type >::type root_type; }; template struct tensor_static_symgroup_if { constexpr static std::size_t size = 0; typedef void type; }; template struct tensor_static_symgroup_if : tensor_static_symgroup {}; template struct tensor_symmetry_assign_value { typedef typename Tensor_::Index Index; typedef typename Tensor_::Scalar Scalar; constexpr static std::size_t NumIndices = Tensor_::NumIndices; static inline int run(const std::array& transformed_indices, int transformation_flags, int dummy, Tensor_& tensor, const Scalar& value_) { Scalar value(value_); if (transformation_flags & ConjugationFlag) value = numext::conj(value); if (transformation_flags & NegationFlag) value = -value; tensor.coeffRef(transformed_indices) = value; return dummy; } }; template struct tensor_symmetry_calculate_flags { typedef typename Tensor_::Index Index; constexpr static std::size_t NumIndices = Tensor_::NumIndices; static inline int run(const std::array& transformed_indices, int transform_flags, int current_flags, const std::array& orig_indices) { if (transformed_indices == orig_indices) { if (transform_flags & (ConjugationFlag | NegationFlag)) return current_flags | GlobalImagFlag; // anti-hermitian diagonal else if (transform_flags & ConjugationFlag) return current_flags | GlobalRealFlag; // hermitian diagonal else if (transform_flags & NegationFlag) return current_flags | GlobalZeroFlag; // anti-symmetric diagonal } return current_flags; } }; template class tensor_symmetry_value_setter { public: typedef typename Tensor_::Index Index; typedef typename Tensor_::Scalar Scalar; constexpr static std::size_t NumIndices = Tensor_::NumIndices; inline tensor_symmetry_value_setter(Tensor_& tensor, Symmetry_ const& symmetry, std::array const& indices) : m_tensor(tensor), m_symmetry(symmetry), m_indices(indices) { } inline tensor_symmetry_value_setter& operator=(Scalar const& value) { doAssign(value); return *this; } private: Tensor_& m_tensor; Symmetry_ m_symmetry; std::array m_indices; inline void doAssign(Scalar const& value) { #ifdef EIGEN_TENSOR_SYMMETRY_CHECK_VALUES int value_flags = m_symmetry.template apply, int>(m_indices, m_symmetry.globalFlags(), m_indices); if (value_flags & GlobalRealFlag) eigen_assert(numext::imag(value) == 0); if (value_flags & GlobalImagFlag) eigen_assert(numext::real(value) == 0); #endif m_symmetry.template apply, int>(m_indices, 0, m_tensor, value); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H /* * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; */