rl-satton-0.1.2.4: Collection of Reinforcement Learning algorithms

Safe HaskellNone
LanguageHaskell2010

RL.DP

Synopsis

Documentation

type Probability = Rational Source #

Probability [0..1]

type P s a = HashMap s (Set (a, Probability)) Source #

Policy

type V s num = HashMap s num Source #

diffV :: (Eq s, Hashable s, Num num) => V s num -> V s num -> num Source #

class (Ord s, Ord a, Fractional num, Ord num, Hashable s) => DP_Problem pr s a num | pr -> s, pr -> a, pr -> num where Source #

Dynamic Programming Problem. Parameters have the following meaning: num - Type of Numbers; pr - the problem; s - State; a - Action

Methods

dp_states :: pr -> Set s Source #

dp_actions :: pr -> s -> Set a Source #

dp_transitions :: pr -> s -> a -> Set (s, Probability) Source #

dp_reward :: pr -> s -> a -> s -> num Source #

dp_terminal_states :: pr -> Set s Source #

action :: DP_Problem pr s a num => pr -> P s a -> s -> Set (a, Probability) Source #

initV :: DP_Problem pr s a num => pr -> num -> V s num Source #

invariant_probable_actions :: (DP_Problem pr s a num, Show s, Show a) => pr -> Bool Source #

For given state, probabilities for all possible action should sum up to 1

invariant_closed_transition :: (DP_Problem pr s a num, Show s, Show a) => pr -> Bool Source #

No action leads to unlisted state

invariant_no_dead_states :: (DP_Problem pr s a num, Show s, Show a) => pr -> Bool Source #

Terminal states are dead ends and non-terminal states are not

invariant_terminal :: (DP_Problem pr s a num, Show s, Show a) => pr -> Bool Source #

invariant_policy_actions :: (DP_Problem pr s a num, Ord a, Show s, Show a) => P s a -> pr -> Bool Source #

invariant_policy_prob :: (DP_Problem pr s a num, Ord a, Show s, Show a) => P s a -> pr -> Bool Source #

invariant :: (DP_Problem pr s a num, Show s, Show a, Ord a) => pr -> Bool Source #

policy_eq :: (Eq a, DP_Problem pr s a num) => pr -> P s a -> P s a -> Bool Source #

uniformPolicy :: (Ord a, DP_Problem pr s a num) => pr -> P s a Source #

data Opts num s a Source #

Constructors

Opts 

Fields

Instances

Show num => Show (Opts num s a) Source # 

Methods

showsPrec :: Int -> Opts num s a -> ShowS #

show :: Opts num s a -> String #

showList :: [Opts num s a] -> ShowS #

defaultOpts :: Fractional num => Opts num s a Source #

data EvalState num s Source #

Constructors

EvalState 

Fields

Instances

(Show num, Show s) => Show (EvalState num s) Source # 

Methods

showsPrec :: Int -> EvalState num s -> ShowS #

show :: EvalState num s -> String #

showList :: [EvalState num s] -> ShowS #

es_v' :: forall num s. Lens' (EvalState num s) (V s num) Source #

es_v :: forall num s. Lens' (EvalState num s) (V s num) Source #

es_iter :: forall num s. Lens' (EvalState num s) Int Source #

es_delta :: forall num s. Lens' (EvalState num s) num Source #

initEvalState :: Fractional num => V s num -> EvalState num s Source #

policy_eval :: (Monad m, DP_Problem pr s a num) => Opts num s a -> P s a -> V s num -> DP pr m s a num -> m (V s num) Source #

Iterative policy evaluation algorithm Figure 4.1, pg.86.

policy_action_value :: DP_Problem pr s a num => Opts num s a -> s -> a -> V s num -> pr -> num Source #

policy_improve :: (Monad m, DP_Problem pr s a num) => Opts num s a -> V s num -> DP pr m s a num -> m (P s a) Source #

data DP pr m s a num Source #

Constructors

DP 

Fields

policy_iteration :: (Monad m, DP_Problem pr s a num, Ord a) => Opts num s a -> P s a -> V s num -> DP pr m s a num -> m (V s num, P s a) Source #