-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | Native, complete, matrix-free linear algebra. -- -- The term numerical linear algebra is often used almost -- synonymous with matrix modifications. However, what's -- interesting for most applications are really just points in some -- vector space and linear mappings between them, not matrices (which -- represent points or mappings, but inherently depend on a particular -- choice of basis / coordinate system). -- -- This library implements the crucial LA operations like solving linear -- equations and eigenvalue problems, without requiring that the vectors -- are represented in some particular basis. Apart from conceptual -- elegance (only operations that are actually geometrically sensible -- will typecheck – this is far stronger than just confirming that the -- dimensions match, as some other libraries do), this also opens up good -- optimisation possibilities: the vectors can be unboxed, use dedicated -- sparse compression, possibly carry out the computations on accelerated -- hardware (GPU etc.). The spaces can even be infinite-dimensional (e.g. -- function spaces). -- -- The linear algebra algorithms in this package only require the vectors -- to support fundamental operations like addition, scalar products, -- double-dual-space coercion and tensor products; none of this requires -- a basis representation. @package linearmap-category @version 0.3.2.0 module Math.VectorSpace.ZeroDimensional data ZeroDim s :: * -> * Origin :: ZeroDim s -- | Warning: These lenses will probably change their domain in the -- future. module Math.LinearMap.Category.Derivatives (/∂) :: (Num' s, LinearSpace x, LinearSpace y, LinearSpace v, LinearSpace q, s ~ Scalar x, s ~ Scalar y, s ~ Scalar v, s ~ Scalar q) => Lens' y v -> Lens' x q -> Lens' (LinearMap s x y) (LinearMap s q v) (*∂) :: (Num' s, OneDimensional q, LinearSpace q, LinearSpace v, s ~ Scalar a, s ~ Scalar q, s ~ Scalar v) => q -> Lens' a (LinearMap s q v) -> Lens' a v (.∂) :: (Fractional' s, LinearSpace x, s ~ Scalar x, LinearSpace z, s ~ Scalar z) => (forall w. (LinearSpace w, Scalar w ~ s) => Lens' (TensorProduct x w) w) -> Lens' x z -> Lens' (SymmetricTensor s x) z module Math.LinearMap.Category -- | A linear map, represented simply as a Haskell function tagged with the -- type of scalar with respect to which it is linear. Many (sparse) -- linear mappings can actually be calculated much more efficiently if -- you don't represent them with any kind of matrix, but just as a -- function (which is after all, mathematically speaking, what a linear -- map foremostly is). -- -- However, if you sum up many LinearFunctions – which you can -- simply do with the VectorSpace instance – they will become ever -- slower to calculate, because the summand-functions are actually -- computed individually and only the results summed. That's where -- LinearMap is generally preferrable. You can always convert -- between these equivalent categories using arr. newtype LinearFunction s v w LinearFunction :: (v -> w) -> LinearFunction s v w [getLinearFunction] :: LinearFunction s v w -> v -> w -- | Infix synonym of LinearFunction, without explicit mention of -- the scalar type. type (-+>) v w = LinearFunction (Scalar w) v w -- | A bilinear function is a linear function mapping to a linear function, -- or equivalently a 2-argument function that's linear in each argument -- independently. Note that this can not be uncurried to a linear -- function with a tuple argument (this would not be linear but -- quadratic). type Bilinear v w y = LinearFunction (Scalar v) v (LinearFunction (Scalar v) w y) -- | Use a function as a linear map. This is only well-defined if the -- function is linear (this condition is not checked). lfun :: (EnhancedCat f (LinearFunction s), LinearSpace u, TensorSpace v, Scalar u ~ s, Scalar v ~ s, Object f u, Object f v) => (u -> v) -> f u v -- | The tensor product between one space's dual space and another space is -- the space spanned by vector–dual-vector pairs, in bra-ket -- notation written as -- --
--   m = ∑ |w⟩⟨v|
--   
-- -- Any linear mapping can be written as such a (possibly infinite) sum. -- The TensorProduct data structure only stores the linear -- independent parts though; for simple finite-dimensional spaces this -- means e.g. LinearMap ℝ ℝ³ ℝ³ effectively boils down to -- an ordinary matrix type, namely an array of column-vectors -- |w⟩. -- -- (The ⟨v| dual-vectors are then simply assumed to come from -- the canonical basis.) -- -- For bigger spaces, the tensor product may be implemented in a more -- efficient sparse structure; this can be defined in the -- TensorSpace instance. newtype LinearMap s v w LinearMap :: TensorProduct (DualVector v) w -> LinearMap s v w [getLinearMap] :: LinearMap s v w -> TensorProduct (DualVector v) w -- | Infix synonym for LinearMap, without explicit mention of the -- scalar type. type (+>) v w = LinearMap (Scalar v) v w -- | The dual operation to the tuple constructor, or rather to the -- &&& fanout operation: evaluate two (linear) -- functions in parallel and sum up the results. The typical use is to -- concatenate “row vectors” in a matrix definition. (⊕) :: (u +> w) -> (v +> w) -> (u, v) +> w -- | ASCII version of '⊕' (>+<) :: (u +> w) -> (v +> w) -> (u, v) +> w -- | For real matrices, this boils down to transpose. For free -- complex spaces it also incurs complex conjugation. -- -- The signature can also be understood as -- --
--   adjoint :: (v +> w) -> (DualVector w +> DualVector v)
--   
-- -- Or -- --
--   adjoint :: (DualVector v +> DualVector w) -> (w +> v)
--   
-- -- But not (v+>w) -> (w+>v), in general (though -- in a Hilbert space, this too is equivalent, via riesz -- isomorphism). adjoint :: (LinearSpace v, LinearSpace w, Scalar v ~ Scalar w) => (v +> DualVector w) -+> (w +> DualVector v) (<.>^) :: LinearSpace v => DualVector v -> v -> Scalar v -- | A linear map that simply projects from a dual vector in u to -- a vector in v. -- --
--   (du -+|> v) u  ≡  v ^* (du <.>^ u)
--   
(-+|>) :: (EnhancedCat f (LinearFunction s), LSpace u, LSpace v, Scalar u ~ s, Scalar v ~ s, Object f u, Object f v) => DualVector u -> v -> f u v -- | Tensor products are most interesting because they can be used to -- implement linear mappings, but they also form a useful vector space on -- their own right. newtype Tensor s v w Tensor :: TensorProduct v w -> Tensor s v w [getTensorProduct] :: Tensor s v w -> TensorProduct v w -- | Infix synonym for Tensor, without explicit mention of the -- scalar type. type (⊗) v w = Tensor (Scalar v) v w -- | Infix version of tensorProduct. (⊗) :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v, Num' (Scalar v)) => v -> w -> v ⊗ w newtype SymmetricTensor s v SymTensor :: Tensor s v v -> SymmetricTensor s v [getSymmetricTensor] :: SymmetricTensor s v -> Tensor s v v squareV :: (Num' s, s ~ Scalar v) => TensorSpace v => v -> SymmetricTensor s v squareVs :: (Num' s, s ~ Scalar v) => TensorSpace v => [v] -> SymmetricTensor s v type ⊗〃+> v w = LinearMap (Scalar v) (SymmetricTensor (Scalar v) v) w currySymBilin :: LinearSpace v => (v `⊗〃+>` w) -+> (v +> (v +> w)) -- | A positive (semi)definite symmetric bilinear form. This gives rise to -- a norm thus: -- --
--   Norm n |$| v = √(n v <.>^ v)
--   
--   
-- -- Strictly speaking, this type is neither strong enough nor general -- enough to deserve the name Norm: it includes proper -- Seminorms (i.e. m|$|v ≡ 0 does not guarantee v == -- zeroV), but not actual norms such as the ℓ₁-norm on ℝⁿ (Taxcab -- norm) or the supremum norm. However, 𝐿₂-like norms are the only ones -- that can really be formulated without any basis reference; and -- guaranteeing positive definiteness through the type system is scarcely -- practical. newtype Norm v Norm :: v -+> DualVector v -> Norm v [applyNorm] :: Norm v -> v -+> DualVector v -- | A “norm” that may explicitly be degenerate, with m|$|v ⩵ 0 -- for some v ≠ zeroV. type Seminorm v = Norm v -- | A seminorm defined by -- --
--   ‖v‖ = √(∑ᵢ ⟨dᵢ|v⟩²)
--   
-- -- for some dual vectors dᵢ. If given a complete basis of the -- dual space, this generates a proper Norm. -- -- If the dᵢ are a complete orthonormal system, you get the -- euclideanNorm (in an inefficient form). spanNorm :: LSpace v => [DualVector v] -> Seminorm v -- | The canonical standard norm (2-norm) on inner-product / Hilbert -- spaces. euclideanNorm :: HilbertSpace v => Norm v -- | Use a Norm to measure the length / norm of a vector. -- --
--   euclideanNorm |$| v  ≡  √(v <.> v)
--   
(|$|) :: (LSpace v, Floating (Scalar v)) => Seminorm v -> v -> Scalar v -- | The squared norm. More efficient than |$| because that needs to -- take the square root. normSq :: LSpace v => Seminorm v -> v -> Scalar v -- | “Partially apply” a norm, yielding a dual vector (i.e. a linear form -- that accepts the second argument of the scalar product). -- --
--   (euclideanNorm <$| v) <.>^ w  ≡  v <.> w
--   
-- -- See also |&>. (<$|) :: LSpace v => Norm v -> v -> DualVector v -- | Scale the result of a norm with the absolute of the given number. -- --
--   scaleNorm μ n |$| v = abs μ * (n|$|v)
--   
-- -- Equivalently, this scales the norm's unit ball by the reciprocal of -- that factor. scaleNorm :: LSpace v => Scalar v -> Norm v -> Norm v normSpanningSystem :: SimpleSpace v => Seminorm v -> [DualVector v] normSpanningSystem' :: (FiniteDimensional v, IEEE (Scalar v)) => Seminorm v -> [v] -- | A multidimensional variance of points v with some -- distribution can be considered a norm on the dual space, quantifying -- for a dual vector dv the expectation value of -- (dv.^v)^2. type Variance v = Norm (DualVector v) spanVariance :: LSpace v => [v] -> Variance v -- | Flipped, “ket” version of <$|. -- --
--   v <.>^ (w |&> euclideanNorm)  ≡  v <.> w
--   
(|&>) :: LSpace v => DualVector v -> Variance v -> v -- | Inverse of spanVariance. Equivalent to -- normSpanningSystem on the dual space. varianceSpanningSystem :: SimpleSpace v => Variance v -> [v] -- | A proper norm induces a norm on the dual space – the “reciprocal -- norm”. (The orthonormal systems of the norm and its dual are mutually -- conjugate.) The dual norm of a seminorm is undefined. dualNorm :: SimpleSpace v => Norm v -> Variance v -- | dualNorm in the opposite direction. This is actually -- self-inverse; with dualSpaceWitness you can replace each with -- the other direction. dualNorm' :: SimpleSpace v => Variance v -> Norm v -- | Interpret a variance as a covariance between two subspaces, and -- normalise it by the variance on u. The result is effectively -- the linear regression coefficient of a simple regression of the -- vectors spanning the variance. dependence :: (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => Variance (u, v) -> (u +> v) -- | spanNorm / spanVariance are inefficient if the number of -- vectors is similar to the dimension of the space, or even larger than -- it. Use this function to optimise the underlying operator to a dense -- matrix representation. densifyNorm :: LSpace v => Norm v -> Norm v -- | Like densifyNorm, but also perform a “sanity check” to -- eliminate NaN etc. problems. wellDefinedNorm :: LinearSpace v => Norm v -> Maybe (Norm v) -- | Inverse function application, aka solving of a linear system: -- --
--   f \$ f $ v  ≡  v
--   
--   f $ f \$ u  ≡  u
--   
-- -- If f does not have full rank, the behaviour is undefined. -- However, it does not need to be a proper isomorphism: the first of the -- above equations is still fulfilled if only f is -- injective (overdetermined system) and the second if it is -- surjective. -- -- If you want to solve for multiple RHS vectors, be sure to partially -- apply this operator to the linear map, like -- --
--   map (f \$) [v₁, v₂, ...]
--   
-- -- Since most of the work is actually done in triangularising the -- operator, this may be much faster than -- --
--   [f \$ v₁, f \$ v₂, ...]
--   
(\$) :: (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => (u +> v) -> v -> u pseudoInverse :: (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => (u +> v) -> v +> u -- | Approximation of the determinant. roughDet :: (FiniteDimensional v, IEEE (Scalar v)) => (v +> v) -> Scalar v linearRegressionW :: (LinearSpace x, FiniteDimensional y, SimpleSpace m, Scalar x ~ s, Scalar y ~ s, Scalar m ~ s, RealFrac' s) => Norm y -> (x -> (m +> y)) -> [(x, y)] -> m linearRegressionWVar :: (LinearSpace x, FiniteDimensional y, SimpleSpace m, Scalar x ~ s, Scalar y ~ s, Scalar m ~ s, RealFrac' s) => (x -> (m +> y)) -> [(x, (y, Norm y))] -> (m, [DualVector m]) -- | Simple automatic finding of the eigenvalues and -vectors of a -- Hermitian operator, in reasonable approximation. -- -- This works by spanning a QR-stabilised Krylov basis with -- constructEigenSystem until it is complete -- (roughEigenSystem), and then properly decoupling the system -- with finishEigenSystem (based on two iterations of shifted -- Givens rotations). -- -- This function is a tradeoff in performance vs. accuracy. Use -- constructEigenSystem and finishEigenSystem directly for -- more quickly computing a (perhaps incomplete) approximation, or for -- more precise results. eigen :: (FiniteDimensional v, HilbertSpace v, IEEE (Scalar v)) => (v +> v) -> [(Scalar v, v)] -- | Lazily compute the eigenbasis of a linear map. The algorithm is -- essentially a hybrid of Lanczos/Arnoldi style Krylov-spanning and -- QR-diagonalisation, which we don't do separately but interleave -- at each step. -- -- The size of the eigen-subbasis increases with each step until the -- space's dimension is reached. (But the algorithm can also be used for -- infinite-dimensional spaces.) constructEigenSystem :: (LSpace v, RealFloat (Scalar v)) => Norm v -> Scalar v -> (v -+> v) -> [v] -> [[Eigenvector v]] -- | Find a system of vectors that approximate the eigensytem, in the sense -- that: each true eigenvalue is represented by an approximate one, and -- that is closer to the true value than all the other approximate EVs. -- -- This function does not make any guarantees as to how well a single -- eigenvalue is approximated, though. roughEigenSystem :: (FiniteDimensional v, IEEE (Scalar v)) => Norm v -> (v +> v) -> [Eigenvector v] finishEigenSystem :: (LSpace v, RealFloat (Scalar v)) => Norm v -> [Eigenvector v] -> [Eigenvector v] data Eigenvector v Eigenvector :: Scalar v -> v -> v -> v -> Scalar v -> Eigenvector v -- | The estimated eigenvalue λ. [ev_Eigenvalue] :: Eigenvector v -> Scalar v -- | Normalised vector v that gets mapped to a multiple, namely: [ev_Eigenvector] :: Eigenvector v -> v -- | f $ v ≡ λ *^ v . [ev_FunctionApplied] :: Eigenvector v -> v -- | Deviation of v to (f$v)^/λ. Ideally, this would of -- course be equal. [ev_Deviation] :: Eigenvector v -> v -- | Squared norm of the deviation. [ev_Badness] :: Eigenvector v -> Scalar v -- | The workhorse of this package: most functions here work on vector -- spaces that fulfill the LSpace v constraint. -- -- In summary, this is a VectorSpace with an implementation for -- TensorProduct v w, for any other space w, and -- with a DualVector space. This fulfills DualVector -- (DualVector v) ~ v (this constraint is encapsulated in -- DualSpaceWitness). -- -- To make a new space of yours an LSpace, you must define -- instances of TensorSpace and LinearSpace. In fact, -- LSpace is equivalent to LinearSpace, but makes the -- condition explicit that the scalar and dual vectors also form a linear -- space. LinearSpace only stores that constraint in -- dualSpaceWitness (to avoid UndecidableSuperclasses). type LSpace v = (LinearSpace v, LinearSpace (Scalar v), LinearSpace (DualVector v), Num' (Scalar v)) class (VectorSpace v, PseudoAffine v) => TensorSpace v where type family TensorProduct v w :: * subtractTensors m n = addTensors m (getLinearFunction negateTensor n) tensorProducts vws = sumV [getLinearFunction (getLinearFunction tensorProduct v) w | (v, w) <- vws] wellDefinedVector v = if v == v then Just v else Nothing scalarSpaceWitness :: TensorSpace v => ScalarSpaceWitness v linearManifoldWitness :: TensorSpace v => LinearManifoldWitness v zeroTensor :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => v ⊗ w toFlatTensor :: TensorSpace v => v -+> (v ⊗ Scalar v) fromFlatTensor :: TensorSpace v => (v ⊗ Scalar v) -+> v addTensors :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w subtractTensors :: (TensorSpace v, TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w scaleTensor :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => Bilinear (Scalar v) (v ⊗ w) (v ⊗ w) negateTensor :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -+> (v ⊗ w) tensorProduct :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => Bilinear v w (v ⊗ w) tensorProducts :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => [(v, w)] -> (v ⊗ w) transposeTensor :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -+> (w ⊗ v) fmapTensor :: (TensorSpace v, TensorSpace w, TensorSpace x, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear (w -+> x) (v ⊗ w) (v ⊗ x) fzipTensorWith :: (TensorSpace v, TensorSpace u, TensorSpace w, TensorSpace x, Scalar u ~ Scalar v, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear ((w, x) -+> u) (v ⊗ w, v ⊗ x) (v ⊗ u) coerceFmapTensorProduct :: (TensorSpace v, Functor p) => p v -> Coercion a b -> Coercion (TensorProduct v a) (TensorProduct v b) -- | “Sanity-check” a vector. This typically amounts to detecting any NaN -- components, which should trigger a Nothing result. Otherwise, -- the result should be Just the input, but may also be -- optimised / memoised if applicable (i.e. for function spaces). wellDefinedVector :: TensorSpace v => v -> Maybe v wellDefinedTensor :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => v ⊗ w -> Maybe (v ⊗ w) -- | The class of vector spaces v for which LinearMap s -- v w is well-implemented. class (TensorSpace v, Num (Scalar v)) => LinearSpace v where type family DualVector v :: * idTensor = case dualSpaceWitness :: DualSpaceWitness v of { DualSpaceWitness -> transposeTensor -+$> asTensor $ linearId } sampleLinearFunction = case (scalarSpaceWitness :: ScalarSpaceWitness v, dualSpaceWitness :: DualSpaceWitness v) of { (ScalarSpaceWitness, DualSpaceWitness) -> LinearFunction $ \ f -> getLinearFunction (fmap f) id } toLinearForm = case (scalarSpaceWitness :: ScalarSpaceWitness v, dualSpaceWitness :: DualSpaceWitness v) of { (ScalarSpaceWitness, DualSpaceWitness) -> toFlatTensor >>> arr fromTensor } fromLinearForm = case (scalarSpaceWitness :: ScalarSpaceWitness v, dualSpaceWitness :: DualSpaceWitness v) of { (ScalarSpaceWitness, DualSpaceWitness) -> arr asTensor >>> fromFlatTensor } coerceDoubleDual = case dualSpaceWitness :: DualSpaceWitness v of { DualSpaceWitness -> Coercion } trace = case scalarSpaceWitness :: ScalarSpaceWitness v of { ScalarSpaceWitness -> flipBilin contractLinearMapAgainst -+$> id } contractTensorMap = case scalarSpaceWitness :: ScalarSpaceWitness v of { ScalarSpaceWitness -> arr deferLinearMap >>> transposeTensor >>> fmap trace >>> fromFlatTensor } contractMapTensor = case (scalarSpaceWitness :: ScalarSpaceWitness v, dualSpaceWitness :: DualSpaceWitness v) of { (ScalarSpaceWitness, DualSpaceWitness) -> arr (coUncurryLinearMap >>> asTensor) >>> transposeTensor >>> fmap (arr asLinearMap >>> trace) >>> fromFlatTensor } contractTensorFn = LinearFunction $ getLinearFunction sampleLinearFunction >>> getLinearFunction contractTensorMap contractLinearMapAgainst = case (scalarSpaceWitness :: ScalarSpaceWitness v, dualSpaceWitness :: DualSpaceWitness v) of { (ScalarSpaceWitness, DualSpaceWitness) -> arr asTensor >>> transposeTensor >>> applyDualVector >>> LinearFunction (. sampleLinearFunction) } composeLinear = case scalarSpaceWitness :: ScalarSpaceWitness v of { ScalarSpaceWitness -> LinearFunction $ \ f -> fmap (applyLinear -+$> f) } dualSpaceWitness :: LinearSpace v => DualSpaceWitness v linearId :: LinearSpace v => v +> v idTensor :: LinearSpace v => v ⊗ DualVector v sampleLinearFunction :: (LinearSpace v, TensorSpace w, Scalar v ~ Scalar w) => (v -+> w) -+> (v +> w) toLinearForm :: LinearSpace v => DualVector v -+> (v +> Scalar v) fromLinearForm :: LinearSpace v => (v +> Scalar v) -+> DualVector v coerceDoubleDual :: LinearSpace v => Coercion v (DualVector (DualVector v)) trace :: LinearSpace v => (v +> v) -+> Scalar v contractTensorMap :: (LinearSpace v, TensorSpace w, Scalar w ~ Scalar v) => (v +> (v ⊗ w)) -+> w contractMapTensor :: (LinearSpace v, TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ (v +> w)) -+> w contractTensorFn :: (LinearSpace v, TensorSpace w, Scalar w ~ Scalar v) => (v -+> (v ⊗ w)) -+> w contractLinearMapAgainst :: (LinearSpace v, LinearSpace w, Scalar w ~ Scalar v) => Bilinear (v +> w) (w -+> v) (Scalar v) applyDualVector :: (LinearSpace v, LinearSpace v) => Bilinear (DualVector v) v (Scalar v) applyLinear :: (LinearSpace v, TensorSpace w, Scalar w ~ Scalar v) => Bilinear (v +> w) v w composeLinear :: (LinearSpace v, LinearSpace w, TensorSpace x, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear (w +> x) (v +> w) (v +> x) tensorId :: (LinearSpace v, LinearSpace w, Scalar w ~ Scalar v) => (v ⊗ w) +> (v ⊗ w) applyTensorFunctional :: (LinearSpace v, LinearSpace u, Scalar u ~ Scalar v) => Bilinear (DualVector (v ⊗ u)) (v ⊗ u) (Scalar v) applyTensorLinMap :: (LinearSpace v, LinearSpace u, TensorSpace w, Scalar u ~ Scalar v, Scalar w ~ Scalar v) => Bilinear ((v ⊗ u) +> w) (v ⊗ u) w -- | SemiInner is the class of vector spaces with finite subspaces -- in which you can define a basis that can be used to project from the -- whole space into the subspace. The usual application is for using a -- kind of Galerkin method to give an approximate solution (see -- \$) to a linear equation in a possibly infinite-dimensional -- space. -- -- Of course, this also works for spaces which are already -- finite-dimensional themselves. class LinearSpace v => SemiInner v where symTensorTensorDualBasisCandidates = case (dualSpaceWitness :: DualSpaceWitness v, dualSpaceWitness :: DualSpaceWitness w, scalarSpaceWitness :: ScalarSpaceWitness v) of { (DualSpaceWitness, DualSpaceWitness, ScalarSpaceWitness) -> map (second $ getLinearFunction transposeTensor) >>> dualBasisCandidates >>> fmap (fmap . second $ arr asTensor >>> arr transposeTensor >>> arr fromTensor) } -- | Lazily enumerate choices of a basis of functionals that can be made -- dual to the given vectors, in order of preference (which roughly -- means, large in the normal direction.) I.e., if the vector 𝑣 -- is assigned early to the dual vector 𝑣', then (𝑣' $ -- 𝑣) should be large and all the other products comparably small. -- -- The purpose is that we should be able to make this basis orthonormal -- with a ~Gaussian-elimination approach, in a way that stays numerically -- stable. This is otherwise known as the choice of a pivot -- element. -- -- For simple finite-dimensional array-vectors, you can easily define -- this method using cartesianDualBasisCandidates. dualBasisCandidates :: SemiInner v => [(Int, v)] -> Forest (Int, DualVector v) tensorDualBasisCandidates :: (SemiInner v, SemiInner w, Scalar w ~ Scalar v) => [(Int, v ⊗ w)] -> Forest (Int, DualVector (v ⊗ w)) symTensorDualBasisCandidates :: SemiInner v => [(Int, SymmetricTensor (Scalar v) v)] -> Forest (Int, SymmetricTensor (Scalar v) (DualVector v)) symTensorTensorDualBasisCandidates :: (SemiInner v, SemiInner w, Scalar w ~ Scalar v) => [(Int, SymmetricTensor (Scalar v) v ⊗ w)] -> Forest (Int, SymmetricTensor (Scalar v) v +> DualVector w) cartesianDualBasisCandidates :: [DualVector v] -> (v -> [ℝ]) -> ([(Int, v)] -> Forest (Int, DualVector v)) embedFreeSubspace :: (SemiInner v, RealFrac' (Scalar v), Traversable t) => t v -> Maybe (ReifiedLens' v (t (Scalar v))) class (LSpace v) => FiniteDimensional v where data family SubBasis v :: * subbasisDimension = length . enumerateSubBasis entireBasis :: FiniteDimensional v => SubBasis v enumerateSubBasis :: FiniteDimensional v => SubBasis v -> [v] subbasisDimension :: FiniteDimensional v => SubBasis v -> Int -- | Split up a linear map in “column vectors” WRT some suitable basis. decomposeLinMap :: (FiniteDimensional v, LSpace w, Scalar w ~ Scalar v) => (v +> w) -> (SubBasis v, DList w) -- | Expand in the given basis, if possible. Else yield a superbasis of the -- given one, in which this is possible, and the decomposition -- therein. decomposeLinMapWithin :: (FiniteDimensional v, LSpace w, Scalar w ~ Scalar v) => SubBasis v -> (v +> w) -> Either (SubBasis v, DList w) (DList w) -- | Assemble a vector from coefficients in some basis. Return any excess -- coefficients. recomposeSB :: FiniteDimensional v => SubBasis v -> [Scalar v] -> (v, [Scalar v]) recomposeSBTensor :: (FiniteDimensional v, FiniteDimensional w, Scalar w ~ Scalar v) => SubBasis v -> SubBasis w -> [Scalar v] -> (v ⊗ w, [Scalar v]) recomposeLinMap :: (FiniteDimensional v, LSpace w, Scalar w ~ Scalar v) => SubBasis v -> [w] -> (v +> w, [w]) -- | Given a function that interprets a coefficient-container as a vector -- representation, build a linear function mapping to that space. recomposeContraLinMap :: (FiniteDimensional v, LinearSpace w, Scalar w ~ Scalar v, Functor f) => (f (Scalar w) -> w) -> f (DualVector v) -> v +> w recomposeContraLinMapTensor :: (FiniteDimensional v, FiniteDimensional u, LinearSpace w, Scalar u ~ Scalar v, Scalar w ~ Scalar v, Functor f) => (f (Scalar w) -> w) -> f (v +> DualVector u) -> (v ⊗ u) +> w -- | The existance of a finite basis gives us an isomorphism between a -- space and its dual space. Note that this isomorphism is not natural -- (i.e. it depends on the actual choice of basis, unlike everything else -- in this library). uncanonicallyFromDual :: FiniteDimensional v => DualVector v -+> v uncanonicallyToDual :: FiniteDimensional v => v -+> DualVector v addV :: AdditiveGroup w => LinearFunction s (w, w) w scale :: VectorSpace v => Bilinear (Scalar v) v v inner :: InnerSpace v => Bilinear v v (Scalar v) flipBilin :: Bilinear v w y -> Bilinear w v y bilinearFunction :: (v -> w -> y) -> Bilinear v w y (.⊗) :: (TensorSpace v, HasBasis v, TensorSpace w, Num' (Scalar v), Scalar v ~ Scalar w) => Basis v -> w -> v ⊗ w type DualSpace v = v +> Scalar v -- | The Riesz representation theorem provides an isomorphism -- between a Hilbert space and its (continuous) dual space. riesz :: (FiniteDimensional v, InnerSpace v) => DualVector v -+> v coRiesz :: (LSpace v, InnerSpace v) => v -+> DualVector v -- | Functions are generally a pain to display, but since linear -- functionals in a Hilbert space can be represented by vectors in -- that space, this can be used for implementing a Show instance. showsPrecAsRiesz :: (FiniteDimensional v, InnerSpace v, Show v, HasBasis (Scalar v), Basis (Scalar v) ~ ()) => Int -> DualSpace v -> ShowS -- | Outer product of a general v-vector and a basis element from -- w. Note that this operation is in general pretty inefficient; -- it is provided mostly to lay out matrix definitions neatly. (.<) :: (FiniteDimensional v, Num' (Scalar v), InnerSpace v, LSpace w, HasBasis w, Scalar v ~ Scalar w) => Basis w -> v -> v +> w type HilbertSpace v = (LSpace v, InnerSpace v, DualVector v ~ v) type SimpleSpace v = (FiniteDimensional v, FiniteDimensional (DualVector v), SemiInner v, SemiInner (DualVector v), RealFrac' (Scalar v)) class (Num s, LinearSpace s) => Num' s closedScalarWitness :: Num' s => ClosedScalarWitness s type Fractional' s = (Num' s, Fractional s, Eq s, VectorSpace s) type RealFrac' s = (Fractional' s, IEEE s, InnerSpace s) type RealFloat' s = (RealFrac' s, Floating s) type LinearShowable v = (Show v, RieszDecomposable v) data ClosedScalarWitness s ClosedScalarWitness :: ClosedScalarWitness s data ScalarSpaceWitness v ScalarSpaceWitness :: ScalarSpaceWitness v data DualSpaceWitness v DualSpaceWitness :: DualSpaceWitness v data LinearManifoldWitness v LinearManifoldWitness :: BoundarylessWitness v -> LinearManifoldWitness v -- | Modify a norm in such a way that the given vectors lie within its unit -- ball. (Not optimally – the unit ball may be bigger than -- necessary.) relaxNorm :: SimpleSpace v => Norm v -> [v] -> Norm v transformNorm :: (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> w) -> Norm w -> Norm v transformVariance :: (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> w) -> Variance v -> Variance w -- | The unique positive number whose norm is 1 (if the norm is not -- constant zero). findNormalLength :: RealFrac' s => Norm s -> Maybe s -- | Unsafe version of findNormalLength, only works reliable if the -- norm is actually positive definite. normalLength :: RealFrac' s => Norm s -> s summandSpaceNorms :: (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => Norm (u, v) -> (Norm u, Norm v) sumSubspaceNorms :: (LSpace u, LSpace v, Scalar u ~ Scalar v) => Norm u -> Norm v -> Norm (u, v) -- | For any two norms, one can find a system of co-vectors that, with -- suitable coefficients, spans either of them: if shSys = -- sharedNormSpanningSystem n₀ n₁, then -- --
--   n₀ = spanNorm $ fst$shSys
--   
-- -- and -- --
--   n₁ = spanNorm [dv^*η | (dv,η)<-shSys]
--   
-- -- A rather crude approximation (roughEigenSystem) is used in this -- function, so do not expect the above equations to hold with great -- accuracy. sharedNormSpanningSystem :: SimpleSpace v => Norm v -> Seminorm v -> [(DualVector v, Scalar v)] -- | Like 'sharedNormSpanningSystem n₀ n₁', but allows either of the -- norms to be singular. -- --
--   n₀ = spanNorm [dv | (dv, Just _)<-shSys]
--   
-- -- and -- --
--   n₁ = spanNorm $ [dv^*η | (dv, Just η)<-shSys]
--                   ++ [ dv | (dv, Nothing)<-shSys]
--   
-- -- You may also interpret a Nothing here as an “infinite -- eigenvalue”, i.e. it is so small as an spanning vector of n₀ -- that you would need to scale it by ∞ to use it for spanning -- n₁. sharedSeminormSpanningSystem :: SimpleSpace v => Seminorm v -> Seminorm v -> [(DualVector v, Maybe (Scalar v))] -- | A system of vectors which are orthogonal with respect to both of the -- given seminorms. (In general they are not orthonormal to either -- of them.) sharedSeminormSpanningSystem' :: SimpleSpace v => Seminorm v -> Seminorm v -> [v] convexPolytopeHull :: SimpleSpace v => [v] -> [DualVector v] convexPolytopeRepresentatives :: SimpleSpace v => [DualVector v] -> [v] instance (GHC.Show.Show v, GHC.Show.Show (Data.VectorSpace.Scalar v)) => GHC.Show.Show (Math.LinearMap.Category.Eigenvector v) instance Math.LinearMap.Category.Class.LSpace v => Data.Semigroup.Semigroup (Math.LinearMap.Category.Norm v) instance Math.LinearMap.Category.Class.LSpace v => GHC.Base.Monoid (Math.LinearMap.Category.Seminorm v) instance (Math.VectorSpace.Docile.SimpleSpace v, GHC.Show.Show (Math.LinearMap.Category.Class.DualVector v)) => GHC.Show.Show (Math.LinearMap.Category.Norm v)