hasktorch-indef-0.0.1.0: Core Hasktorch abstractions wrapping FFI bindings

Copyright(c) Sam Stites 2017
LicenseBSD3
Maintainersam@stites.io
Stabilityexperimental
Portabilitynon-portable
Safe HaskellNone
LanguageHaskell2010

Torch.Indef.Static.NN.Linear

Description

Linear layers

Synopsis

Documentation

newtype Linear i o Source #

datatype representing a linear layer with bias. Represents y = Ax + b.

Constructors

Linear 

Fields

Instances
Eq (Linear i o) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Linear

Methods

(==) :: Linear i o -> Linear i o -> Bool #

(/=) :: Linear i o -> Linear i o -> Bool #

(KnownDim i, KnownDim o) => Num (Linear i o) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Linear

Methods

(+) :: Linear i o -> Linear i o -> Linear i o #

(-) :: Linear i o -> Linear i o -> Linear i o #

(*) :: Linear i o -> Linear i o -> Linear i o #

negate :: Linear i o -> Linear i o #

abs :: Linear i o -> Linear i o #

signum :: Linear i o -> Linear i o #

fromInteger :: Integer -> Linear i o #

(KnownDim i, KnownDim o) => Show (Linear i o) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Linear

Methods

showsPrec :: Int -> Linear i o -> ShowS #

show :: Linear i o -> String #

showList :: [Linear i o] -> ShowS #

Generic (Linear i o) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Linear

Associated Types

type Rep (Linear i o) :: Type -> Type #

Methods

from :: Linear i o -> Rep (Linear i o) x #

to :: Rep (Linear i o) x -> Linear i o #

(KnownDim i, KnownDim o) => Backprop (Linear i o) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Linear

Methods

zero :: Linear i o -> Linear i o

add :: Linear i o -> Linear i o -> Linear i o

one :: Linear i o -> Linear i o

(KnownDim i, KnownDim o) => Pairwise (Linear i o) HsReal Source # 
Instance details

Defined in Torch.Indef.Static.NN.Linear

Methods

(^+) :: Linear i o -> HsReal -> Linear i o Source #

(^-) :: Linear i o -> HsReal -> Linear i o Source #

(^*) :: Linear i o -> HsReal -> Linear i o Source #

(^/) :: Linear i o -> HsReal -> Linear i o Source #

type Rep (Linear i o) Source # 
Instance details

Defined in Torch.Indef.Static.NN.Linear

type Rep (Linear i o) = D1 (MetaData "Linear" "Torch.Indef.Static.NN.Linear" "hasktorch-indef-0.0.1.0-inplace-2pb660JGMmy35M7wFEyvsQ" True) (C1 (MetaCons "Linear" PrefixI True) (S1 (MetaSel (Just "getTensors") NoSourceUnpackedness NoSourceStrictness DecidedLazy) (Rec0 (Tensor (i ': (o ': ([] :: [Nat]))), Tensor (o ': ([] :: [Nat]))))))

update_ Source #

Arguments

:: (KnownDim i, KnownDim o) 
=> Linear i o

layer to update

-> HsReal

learning rate

-> Linear i o

gradient

-> IO () 

update a Conv2d layer inplace

update Source #

Arguments

:: (KnownDim i, KnownDim o) 
=> Linear i o

layer to update

-> HsReal

learning rate

-> Linear i o

gradient

-> Linear i o

updated layer

update a Conv2d layer

weights :: Linear i o -> Tensor '[i, o] Source #

the dense weight matrix of a linear layer

bias :: Linear i o -> Tensor '[o] Source #

the bias vector of a linear layer

inputSize :: forall i o. KnownDim i => Linear i o -> Int Source #

The input size of a linear layer

outputSize :: forall i o kW dW. KnownDim o => Linear i o -> Int Source #

The output size of a linear layer

mkLinear :: (KnownDim i, KnownDim o) => (forall d. Dimensions d => IO (Tensor d)) -> IO (Linear i o) Source #

linear :: forall s i o. Reifies s W => All KnownDim '[i, o] => BVar s (Linear i o) -> BVar s (Tensor '[i]) -> BVar s (Tensor '[o]) Source #

Linear

module = nn.Linear(inputDimension, outputDimension, [bias = true])

Applies a linear transformation to the incoming data, i.e. y = Ax + b. The input tensor given in forward(input) must be either a vector (1D tensor) or matrix (2D tensor). If the input is a matrix, then each row is assumed to be an input sample of given batch. The layer can be used without bias by setting bias = false.

You can create a layer in the following way:

module = nn.Linear(10, 5) -- 10 inputs, 5 outputs

Usually this would be added to a network of some kind, e.g.:

mlp = nn.Sequential() mlp:add(module)

The weights and biases (A and b) can be viewed with:

print(module.weight) print(module.bias)

The gradients for these weights can be seen with:

print(module.gradWeight) print(module.gradBias)

As usual with nn modules, applying the linear transformation is performed with:

x = torch.Tensor(10) -- 10 inputs y = module:forward(x)

linearBatch :: forall s i o b. Reifies s W => All KnownDim '[b, i, o] => BVar s (Linear i o) -> BVar s (Tensor '[b, i]) -> BVar s (Tensor '[b, o]) Source #

linear with a batch dimension

linearBatchIO :: forall i o b. All KnownDim '[b, i, o] => Linear i o -> Tensor '[b, i] -> IO (Tensor '[b, o], Tensor '[b, o] -> IO (Linear i o, Tensor '[b, i])) Source #

linearBatchWithIO :: forall i o b. All KnownDim '[b, i, o] => Maybe (Tensor '[b, o]) -> Maybe (Tensor '[b, i]) -> Maybe (Linear i o) -> Linear i o -> Tensor '[b, i] -> IO (Tensor '[b, o], Tensor '[b, o] -> IO (Linear i o, Tensor '[b, i])) Source #

linear with a batch dimension