{-# LINE 1 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
---------------------------------------------------------------------------
{-# LINE 2 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
-- | Module    : Numeric.Statistics.Dirichlet.Mixture
-- Copyright   : (c) 2009-2011 Felipe Lessa
-- License     : GPL
--
-- Maintainer  : felipe.lessa@gmail.com
-- Stability   : experimental
-- Portability : portable
--
-- This module implements the algorithms described by Hager and
-- Zhang [1].  We use bindings to @CG_DESCENT@ library by the same
-- authors, version 3.0 from 18\/05\/2008 [2].  The library code is
-- also licensed under the terms of the GPL.
--
-- * [1] Hager, W. W. and Zhang, H.  /A new conjugate gradient/
--   /method with guaranteed descent and an efficient line/
--   /search./ Society of Industrial and Applied Mathematics
--   Journal on Optimization, 16 (2005), 170-192.
--
-- * [2] <http://www.math.ufl.edu/~hager/papers/CG/CG_DESCENT-C-3.0.tar.gz>
--
--------------------------------------------------------------------------


module Numeric.Optimization.Algorithms.HagerZhang05
    ( -- * Main function
      -- $mainFunction
      optimize
      -- ** User-defined function types
    , Function(..)
    , Gradient(..)
    , Combined(..)
    , PointMVector
    , GradientMVector
      -- ** Kinds of function types
    , Simple
    , Mutable
      -- * Result and statistics
    , Result(..)
    , Statistics(..)
      -- * Options
    , defaultParameters
    , Parameters(..)
    , Verbose(..)
    , LineSearch(..)
    , StopRules(..)
    , EstimateError(..)
      -- * Technical parameters
    , TechParameters(..)
    ) where

import qualified Data.Vector.Generic as G
import qualified Data.Vector.Generic.Mutable as GM
import qualified Data.Vector.Storable as S
import qualified Data.Vector.Storable.Mutable as SM
import Control.Applicative
import Control.Exception (bracket)
import Control.Monad.Primitive (PrimMonad(..))
import Foreign
import Foreign.C
import qualified System.IO.Unsafe as Unsafe


{-# LINE 66 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
trace :: String -> a -> a
trace _ x = x

{-# LINE 69 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}


{-# LINE 71 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}

-- $mainFunction
-- Please pay close attention to the types of @Vector@s and
-- @MVetor@s being used below.  They may come from
-- "Data.Vector.Generic"/"Data.Vector.Generic.Mutable" or from
-- "Data.Vector.Storable"/"Data.Vector.Storable.Mutable".  The
-- rule of thumb is that input pure vectors are @Generic@ and
-- everything else is @Storable@.


-- | Run the @CG_DESCENT@ optimizer and try to minimize the
-- function.
optimize :: (G.Vector v Double)
         => Parameters          -- ^ How should we optimize.
         -> Double              -- ^ @grad_tol@, see 'stopRules'.
         -> v Double            -- ^ Initial guess.
         -> Function t1         -- ^ Function to be minimized.
         -> Gradient t2         -- ^ Gradient of the function.
         -> Maybe (Combined t3) -- ^ (Optional) Combined function computing
                                --   both the function and its gradient.
         -> IO (S.Vector Double, Result, Statistics)
optimize params grad_tol initial f g c = do
  -- Mutable vector used for initial guess and final solution.
  let n = G.length initial
  x <- GM.unstream $ G.stream initial

  -- Convert user-provided functions.
  let mf = mutableF f
      mg = mutableG g
      mc = maybe (combine mf mg) mutableC c
      cf = prepareF mf
      cg = prepareG mg
      cc = prepareC mc

  -- Allocate everything.
  (ret, stats) <-
    SM.unsafeWith x                            $ \x_ptr     ->
    allocaSet (Statistics 0 0 0 0 0)           $ \stats_ptr ->
    allocaSet params                           $ \param_ptr ->
    bracket (mkCFunction cf) freeHaskellFunPtr $ \cf_ptr    ->
    bracket (mkCGradient cg) freeHaskellFunPtr $ \cg_ptr    ->
    bracket (mkCCombined cc) freeHaskellFunPtr $ \cc_ptr    ->
    allocateWorkSpace n                        $ \work_ptr  -> do
      -- Go to C land.
      ret <- cg_descent x_ptr (fromIntegral n)
               stats_ptr param_ptr grad_tol
               cf_ptr cg_ptr cc_ptr work_ptr
      stats <- peek stats_ptr
      return (intToResult ret, stats)

  -- Retrive solution and return.
  x' <- G.unsafeFreeze x
  return $ ret `seq` (x', ret, stats)

-- | Allocates as 'alloca' and sets the memory area.
allocaSet :: Storable a => a -> (Ptr a -> IO b) -> IO b
allocaSet x f = alloca $ \x_ptr -> do
                  poke x_ptr x
                  f x_ptr

-- | Allocates enough work space for CG_DESCENT.  If the number
-- of dimensions is "small enough" then we allocate on the stack,
-- otherwise we allocate via malloc.
allocateWorkSpace :: Int -> (Ptr Double -> IO a) -> IO a
allocateWorkSpace n
    | size < threshold = allocaBytes size
    | otherwise        = bracket (mallocBytes size) free
    where
      size = 4 * n * sizeOf (undefined :: Double)
      threshold = 4096 -- gives room to 128 dimensions

type CFunction = Ptr Double ->               CInt -> IO Double
type CGradient = Ptr Double -> Ptr Double -> CInt -> IO ()
type CCombined = Ptr Double -> Ptr Double -> CInt -> IO Double
foreign import ccall safe "cg_user.h"
    cg_descent :: Ptr Double
               -> CInt
               -> Ptr Statistics
               -> Ptr Parameters
               -> Double
               -> FunPtr CFunction
               -> FunPtr CGradient
               -> FunPtr CCombined
               -> Ptr Double
               -> IO CInt
foreign import ccall "wrapper" mkCFunction :: CFunction -> IO (FunPtr CFunction)
foreign import ccall "wrapper" mkCGradient :: CGradient -> IO (FunPtr CGradient)
foreign import ccall "wrapper" mkCCombined :: CCombined -> IO (FunPtr CCombined)


-- | Phantom type for simple pure functions.
data Simple
-- | Phantom type for functions using mutable data.
data Mutable

-- | Mutable vector representing the point where the
-- function\/gradient is begin evaluated.  This vector /should/
-- /not/ be modified.
type PointMVector m = SM.MVector (PrimState m) Double

-- | Mutable vector representing where the gradient should be
-- /written/.
type GradientMVector m = SM.MVector (PrimState m) Double

-- | Function calculating the value of the objective function @f@
-- at a point @x@.
data Function t where
    VFunction :: G.Vector v Double
              => (v Double -> Double)
              -> Function Simple
    MFunction :: (forall m. (PrimMonad m, Functor m)
                  => PointMVector m
                  -> m Double)
              -> Function Mutable



-- | Copies the input array from a mutable storable vector to any
-- pure vector.  Used to convert pure functions into mutable
-- ones.
copyInput :: (PrimMonad m, G.Vector v Double)
          => SM.MVector (PrimState m) Double
          -> m (v Double)
copyInput mx = do
  let s = trace "    copyInput start" $ GM.length mx
  mz <- GM.new s
  let go i | i >= s    = return ()
           | otherwise = GM.unsafeRead mx i >>=
                         GM.unsafeWrite mz i >> go (i+1)
  go 0
  trace "              stop" $ G.unsafeFreeze mz

-- | Copies the output array from any pure vector to a mutable
-- storable array.  Used to convert pure functions that return
-- the gradient into mutable ones.
copyOutput :: (PrimMonad m, G.Vector v Double)
           => SM.MVector (PrimState m) Double
           -> v Double
           -> m ()
copyOutput mret r = go $ trace "    copyOutput start" $ 0
  where
    s = min (GM.length mret) (G.length r)
    go i | i >= s    = trace "               stop" $ return ()
         | otherwise = let !x = G.unsafeIndex r i
                       in GM.unsafeWrite mret i x >> go (i+1)



mutableF :: Function t -> Function Mutable
mutableF (VFunction f) = MFunction (\mx -> f <$> copyInput mx)
mutableF (MFunction f) = MFunction f

prepareF :: Function Mutable -> CFunction
prepareF (MFunction f) =
    \x_ptr n -> do
      let n' = fromIntegral n
      x_fptr <- newForeignPtr_ x_ptr
      let x = SM.unsafeFromForeignPtr x_fptr 0 n'

{-# LINE 234 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      r <- f x

{-# LINE 238 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      return r


{-# LINE 248 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}





-- | Function calculating the value of the gradient of the
-- objective function @f@ at a point @x@.
--
-- The 'MGradient' constructor uses a function receiving as
-- parameters the point @x@ being evaluated (should not be
-- modified) and the vector where the gradient should be written.
data Gradient t where
    VGradient :: G.Vector v Double
              => (v Double -> v Double)
              -> Gradient Simple
    MGradient :: (forall m. (PrimMonad m, Functor m)
                  => PointMVector m
                  -> GradientMVector m
                  -> m ())
              -> Gradient Mutable
mutableG :: Gradient t -> Gradient Mutable
mutableG (VGradient f) = MGradient f'
    where
      f' :: (PrimMonad m, Functor m) =>
            PointMVector m
         -> GradientMVector m
         -> m ()
      f' mx mret = f <$> copyInput mx >>= copyOutput mret
mutableG (MGradient f) = MGradient f


prepareG :: Gradient Mutable -> CGradient
prepareG (MGradient f) =
    \ret_ptr x_ptr n -> do
      let n' = fromIntegral n
      x_fptr   <- newForeignPtr_ x_ptr
      ret_fptr <- newForeignPtr_ ret_ptr
      let x = SM.unsafeFromForeignPtr x_fptr   0 n'
          r = SM.unsafeFromForeignPtr ret_fptr 0 n'

{-# LINE 292 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      f x r

{-# LINE 296 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}









-- | Function calculating both the value of the objective
-- function @f@ and its gradient at a point @x@.
data Combined t where
    VCombined :: G.Vector v Double
              => (v Double -> (Double, v Double))
              -> Combined Simple
    MCombined :: (forall m. (PrimMonad m, Functor m)
                  => PointMVector m
                  -> GradientMVector m
                  -> m Double)
              -> Combined Mutable
mutableC :: Combined t -> Combined Mutable
mutableC (VCombined f) = MCombined f'
    where
      f' :: (PrimMonad m, Functor m) =>
            PointMVector m
         -> GradientMVector m
         -> m Double
      f' mx mret = do
        (v,r) <- f <$> copyInput mx
        copyOutput mret r
        return v
mutableC (MCombined f) = MCombined f

prepareC :: Combined Mutable -> CCombined
prepareC (MCombined f) =
    \ret_ptr x_ptr n -> do
      let n' = fromIntegral n
      x_fptr   <- newForeignPtr_ x_ptr
      ret_fptr <- newForeignPtr_ ret_ptr
      let x = SM.unsafeFromForeignPtr x_fptr   0 n'
          r = SM.unsafeFromForeignPtr ret_fptr 0 n'

{-# LINE 342 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v <- f x r

{-# LINE 346 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      return v

-- | Combine two separated functions into a single, combined one.
-- This is always a win for us since we save one jump from C to
-- Haskell land.
combine :: Function Mutable -> Gradient Mutable -> Combined Mutable
combine (MFunction f) (MGradient g) =
    MCombined $ \mx mret -> g mx mret >> f mx




data Result =
      ToleranceStatisfied
      -- ^ Convergence tolerance was satisfied.
    | FunctionChange
      -- ^ Change in function value was less than @funcEpsilon *
      -- |f|@.
    | MaxTotalIter
      -- ^ Total iterations exceeded @maxItersFac * n@.
    | NegativeSlope
      -- ^ Slope was always negative in line search.
    | MaxSecantIter
      -- ^ Number of secant iterations exceed nsecant.
    | NotDescent
      -- ^ Search direction not a descent direction.
    | LineSearchFailsInitial
      -- ^ Line search fails in initial interval.
    | LineSearchFailsBisection
      -- ^ Line search fails during bisection.
    | LineSearchFailsUpdate
      -- ^ Line search fails during interval update.
    | DebugTol
      -- ^ Debug tolerance was on and the test failed (see 'debugTol').
    | FunctionValueNaN
      -- ^ Function value became @NaN@.
    | StartFunctionValueNaN
      -- ^ Initial function value was @NaN@.
    deriving (Eq, Ord, Show, Read, Enum)

intToResult :: CInt -> Result
intToResult (-2) = FunctionValueNaN
intToResult (-1) = StartFunctionValueNaN
intToResult   0  = ToleranceStatisfied
intToResult   1  = FunctionChange
intToResult   2  = MaxTotalIter
intToResult   3  = NegativeSlope
intToResult   4  = MaxSecantIter
intToResult   5  = NotDescent
intToResult   6  = LineSearchFailsInitial
intToResult   7  = LineSearchFailsBisection
intToResult   8  = LineSearchFailsUpdate
intToResult   9  = DebugTol
intToResult  10  = error $ "HagerZhang05.intToResult: out of memory?! how?!"
intToResult   x  = error $ "HagerZhang05.intToResult: unknown value " ++ show x

-- | Statistics given after the process finishes.
data Statistics = Statistics {
    finalValue :: Double
    -- ^ Value of the function at the solution.
    ,gradNorm :: Double
    -- ^ Maximum absolute component of the gradient at the
    -- solution.
    ,totalIters :: CInt
    -- ^ Total number of iterations.
    ,funcEvals :: CInt
    -- ^ Total number of function evaluations.
    ,gradEvals :: CInt
    -- ^ Total number of gradient evaluations.
    } deriving (Eq, Ord, Show, Read)

instance Storable Statistics where
    sizeOf _    = (40)
{-# LINE 419 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
    alignment _ = alignment (undefined :: Double)
    peek ptr = do
      v_finalValue <- (\hsc_ptr -> peekByteOff hsc_ptr 0)     ptr
{-# LINE 422 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_gradNorm   <- (\hsc_ptr -> peekByteOff hsc_ptr 8) ptr
{-# LINE 423 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_totalIters <- (\hsc_ptr -> peekByteOff hsc_ptr 16)  ptr
{-# LINE 424 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_funcEvals  <- (\hsc_ptr -> peekByteOff hsc_ptr 24) ptr
{-# LINE 425 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_gradEvals  <- (\hsc_ptr -> peekByteOff hsc_ptr 32) ptr
{-# LINE 426 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      return Statistics {finalValue = v_finalValue
                        ,gradNorm   = v_gradNorm
                        ,totalIters = v_totalIters
                        ,funcEvals  = v_funcEvals
                        ,gradEvals  = v_gradEvals}
    poke ptr s = do
      (\hsc_ptr -> pokeByteOff hsc_ptr 0)     ptr (finalValue s)
{-# LINE 433 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 8) ptr (gradNorm s)
{-# LINE 434 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 16)  ptr (totalIters s)
{-# LINE 435 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 24) ptr (funcEvals s)
{-# LINE 436 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 32) ptr (gradEvals s)
{-# LINE 437 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}



-- | Default parameters.  See the documentation for 'Parameters'
-- and 'TechParameters' to see what are the defaults.
defaultParameters :: Parameters
defaultParameters =
    Unsafe.unsafePerformIO $ do
      alloca $ \ptr -> do
        cg_default ptr
        peek ptr
{-# NOINLINE defaultParameters #-}
foreign import ccall unsafe "cg_user.h"
  cg_default :: Ptr Parameters -> IO ()


-- | Parameters given to the optimizer.
data Parameters = Parameters {
    printFinal :: Bool
    -- ^ Print final statistics to @stdout@.  Defaults to @True@.

    ,printParams :: Bool
    -- ^ Print parameters to @stdout@ before starting.  Defaults to @False@

    ,verbose :: Verbose
    -- ^ How verbose we should be while computing.  Everything is
    -- printed to @stdout@. Defaults to 'Quiet'.

    ,lineSearch :: LineSearch
    -- ^ What kind of line search should be used.  Defaults to
    -- @AutoSwitch 1e-3@.

    ,qdecay :: Double
    -- ^ Factor in @[0, 1]@ used to compute average cost
    -- magnitude @C_k@ as follows:
    --
    -- > Q_k = 1 + (qdecay)Q_{k-1},   Q_0 = 0
    -- > C_k = C_{k-1} + (|f_k| - C_{k-1})/Q_k
    --
    -- Defaults to @0.7@.

    ,stopRules :: StopRules
    -- ^ Stop rules that define when the iterations should end.
    -- Defaults to @DefaultStopRule 0@.

    ,estimateError :: EstimateError
    -- ^ How to calculate the estimated error in the function
    -- value.  Defaults to @RelativeEpsilon 1e-6@.

    ,quadraticStep :: Maybe Double
    -- ^ When to attempt quadratic interpolation in line search.
    -- If @Nothing@ then never try a quadratic interpolation
    -- step.  If @Just cutoff@, then attemp quadratic
    -- interpolation in line search when @|f_{k+1} - f_k| / f_k
    -- <= cutoff@.  Defaults to @Just 1e-12@.

    ,debugTol :: Maybe Double
    -- ^ If @Just tol@, then always check that @f_{k+1} - f_k <=
    -- tol * C_k@. Otherwise, if @Nothing@ then no checking of
    -- function values is done.  Defaults to @Nothing@.

    ,initialStep :: Maybe Double
    -- ^ If @Just step@, then use @step@ as the initial step of
    -- the line search.  Otherwise, if @Nothing@ then the initial
    -- step is programatically calculated.  Defaults to
    -- @Nothing@.

    ,maxItersFac :: Double
    -- ^ Defines the maximum number of iterations.  The process
    -- is aborted when @maxItersFac * n@ iterations are done, where
    -- @n@ is the number of dimensions.  Defaults to infinity.

    ,nexpand :: CInt
    -- ^ Maximum number of times the bracketing interval grows or
    -- shrinks in the line search.  Defaults to @50@.

    ,nsecant :: CInt
    -- ^ Maximum number of secant iterations in line search.
    -- Defaults to @50@.

    ,restartFac :: Double
    -- ^ Restart the conjugate gradient method after @restartFac
    -- * n@ iterations. Defaults to @1@.

    ,funcEpsilon :: Double
    -- ^ Stop when @-alpha * dphi0@, the estimated change in
    -- function value, is less than @funcEpsilon * |f|@.
    -- Defaults to @0@.

    ,nanRho :: Double
    -- ^ After encountering @NaN@ while calculating the step
    -- length, growth factor when searching for a bracketing
    -- interval.  Defaults to @1.3@.

    ,techParameters :: TechParameters
    -- ^ Technical parameters which you probably should not
    -- touch.
    } deriving (Eq, Ord, Show, Read)

instance Storable Parameters where
    sizeOf _    = (208)
{-# LINE 538 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
    alignment _ = alignment (undefined :: Double)
    peek ptr    = do
      v_printFinal    <- (\hsc_ptr -> peekByteOff hsc_ptr 0)  ptr
{-# LINE 541 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_printParams   <- (\hsc_ptr -> peekByteOff hsc_ptr 8)  ptr
{-# LINE 542 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_verbose       <- (\hsc_ptr -> peekByteOff hsc_ptr 4)  ptr
{-# LINE 543 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_awolfe        <- (\hsc_ptr -> peekByteOff hsc_ptr 12)      ptr
{-# LINE 544 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_awolfefac     <- (\hsc_ptr -> peekByteOff hsc_ptr 16)   ptr
{-# LINE 545 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_qdecay        <- (\hsc_ptr -> peekByteOff hsc_ptr 24)      ptr
{-# LINE 546 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_stopRule      <- (\hsc_ptr -> peekByteOff hsc_ptr 32)    ptr
{-# LINE 547 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_stopRuleFac   <- (\hsc_ptr -> peekByteOff hsc_ptr 40)     ptr
{-# LINE 548 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_estimateError <- (\hsc_ptr -> peekByteOff hsc_ptr 48)    ptr
{-# LINE 549 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_estimateEps   <- (\hsc_ptr -> peekByteOff hsc_ptr 56)         ptr
{-# LINE 550 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_quadraticStep <- (\hsc_ptr -> peekByteOff hsc_ptr 64)    ptr
{-# LINE 551 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_quadraticCut  <- (\hsc_ptr -> peekByteOff hsc_ptr 72)  ptr
{-# LINE 552 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_debug         <- (\hsc_ptr -> peekByteOff hsc_ptr 80)       ptr
{-# LINE 553 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_debugTol      <- (\hsc_ptr -> peekByteOff hsc_ptr 88)    ptr
{-# LINE 554 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_initialStep   <- (\hsc_ptr -> peekByteOff hsc_ptr 96)        ptr
{-# LINE 555 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_maxItersFac   <- (\hsc_ptr -> peekByteOff hsc_ptr 104)   ptr
{-# LINE 556 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_nexpand       <- (\hsc_ptr -> peekByteOff hsc_ptr 112)     ptr
{-# LINE 557 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_nsecant       <- (\hsc_ptr -> peekByteOff hsc_ptr 116)     ptr
{-# LINE 558 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_restartFac    <- (\hsc_ptr -> peekByteOff hsc_ptr 120) ptr
{-# LINE 559 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_funcEpsilon   <- (\hsc_ptr -> peekByteOff hsc_ptr 128)        ptr
{-# LINE 560 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_nanRho        <- (\hsc_ptr -> peekByteOff hsc_ptr 136)     ptr
{-# LINE 561 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}

      v_delta         <- (\hsc_ptr -> peekByteOff hsc_ptr 144)       ptr
{-# LINE 563 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_sigma         <- (\hsc_ptr -> peekByteOff hsc_ptr 152)       ptr
{-# LINE 564 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_gamma         <- (\hsc_ptr -> peekByteOff hsc_ptr 160)       ptr
{-# LINE 565 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_rho           <- (\hsc_ptr -> peekByteOff hsc_ptr 168)         ptr
{-# LINE 566 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_eta           <- (\hsc_ptr -> peekByteOff hsc_ptr 176)         ptr
{-# LINE 567 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_psi0          <- (\hsc_ptr -> peekByteOff hsc_ptr 184)        ptr
{-# LINE 568 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_psi1          <- (\hsc_ptr -> peekByteOff hsc_ptr 192)        ptr
{-# LINE 569 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      v_psi2          <- (\hsc_ptr -> peekByteOff hsc_ptr 200)        ptr
{-# LINE 570 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}

      let tech = TechParameters {techDelta = v_delta
                                ,techSigma = v_sigma
                                ,techGamma = v_gamma
                                ,techRho   = v_rho
                                ,techEta   = v_eta
                                ,techPsi0  = v_psi0
                                ,techPsi1  = v_psi1
                                ,techPsi2  = v_psi2}

      let b :: CInt -> Bool; b = (/= 0)

      return Parameters {printFinal     = b v_printFinal
                        ,printParams    = b v_printParams
                        ,verbose        = case v_verbose :: CInt of
                                            0 -> Quiet
                                            1 -> Verbose
                                            _ -> VeryVerbose
                        ,lineSearch     = if b v_awolfe
                                          then ApproximateWolfe
                                          else AutoSwitch v_awolfefac
                        ,qdecay         = v_qdecay
                        ,stopRules      = if b v_stopRule
                                          then DefaultStopRule v_stopRuleFac
                                          else AlternativeStopRule
                        ,estimateError  = if b v_estimateError
                                          then RelativeEpsilon v_estimateEps
                                          else AbsoluteEpsilon v_estimateEps
                        ,quadraticStep  = if b v_quadraticStep
                                          then Just v_quadraticCut
                                          else Nothing
                        ,debugTol       = if b v_debug
                                          then Just v_debugTol
                                          else Nothing
                        ,initialStep    = case v_initialStep of
                                            0 -> Nothing
                                            x -> Just x
                        ,maxItersFac    = v_maxItersFac
                        ,nexpand        = v_nexpand
                        ,nsecant        = v_nsecant
                        ,restartFac     = v_restartFac
                        ,funcEpsilon    = v_funcEpsilon
                        ,nanRho         = v_nanRho
                        ,techParameters = tech}
    poke ptr p = do
      let i b = if b p then 1 else (0 :: CInt)
          m b = maybe (0 :: CInt) (const 1) (b p)
      (\hsc_ptr -> pokeByteOff hsc_ptr 0)  ptr (i printFinal)
{-# LINE 618 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 8)  ptr (i printParams)
{-# LINE 619 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 4)  ptr (case verbose p of
{-# LINE 620 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
                                               Quiet       -> 0 :: CInt
                                               Verbose     -> 1
                                               VeryVerbose -> 3)
      let (awolfe, awolfefac) = case lineSearch p of
                                  ApproximateWolfe -> (1, 0)
                                  AutoSwitch x     -> (0, x)
      (\hsc_ptr -> pokeByteOff hsc_ptr 12)      ptr (awolfe :: CInt)
{-# LINE 627 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 16)   ptr awolfefac
{-# LINE 628 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 24)      ptr (qdecay p)
{-# LINE 629 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      let (stopRule, stopRuleFac) = case stopRules p of
                                      DefaultStopRule x   -> (1, x)
                                      AlternativeStopRule -> (0, 0)
      (\hsc_ptr -> pokeByteOff hsc_ptr 32)    ptr (stopRule :: CInt)
{-# LINE 633 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 40)     ptr stopRuleFac
{-# LINE 634 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      let (pertRule, eps) = case estimateError p of
                              RelativeEpsilon x -> (1,x)
                              AbsoluteEpsilon x -> (0,x)
      (\hsc_ptr -> pokeByteOff hsc_ptr 48)    ptr (pertRule :: CInt)
{-# LINE 638 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 56)         ptr eps
{-# LINE 639 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 64)    ptr (m quadraticStep)
{-# LINE 640 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 72)  ptr (maybe 0 id $ quadraticStep p)
{-# LINE 641 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 80)       ptr (m debugTol)
{-# LINE 642 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 88)    ptr (maybe 0 id $ debugTol p)
{-# LINE 643 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 96)        ptr (maybe 0 id $ initialStep p)
{-# LINE 644 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 104)   ptr (maxItersFac p)
{-# LINE 645 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 112)     ptr (nexpand p)
{-# LINE 646 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 116)     ptr (nsecant p)
{-# LINE 647 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 120) ptr (restartFac p)
{-# LINE 648 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 128)        ptr (funcEpsilon p)
{-# LINE 649 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 136)     ptr (nanRho p)
{-# LINE 650 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}

      (\hsc_ptr -> pokeByteOff hsc_ptr 144)       ptr (techDelta $ techParameters p)
{-# LINE 652 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 152)       ptr (techSigma $ techParameters p)
{-# LINE 653 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 160)       ptr (techGamma $ techParameters p)
{-# LINE 654 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 168)         ptr (techRho   $ techParameters p)
{-# LINE 655 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 176)         ptr (techEta   $ techParameters p)
{-# LINE 656 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 184)        ptr (techPsi0  $ techParameters p)
{-# LINE 657 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 192)        ptr (techPsi1  $ techParameters p)
{-# LINE 658 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}
      (\hsc_ptr -> pokeByteOff hsc_ptr 200)        ptr (techPsi2  $ techParameters p)
{-# LINE 659 "src/Numeric/Optimization/Algorithms/HagerZhang05.hsc" #-}




-- | Technical parameters which you probably should not touch.
-- You should read the papers of @CG_DESCENT@ to understand how
-- you can tune these parameters.
data TechParameters = TechParameters {
    techDelta :: Double
    -- ^ Wolfe line search parameter.  Defaults to @0.1@.
    ,techSigma :: Double
    -- ^ Wolfe line search parameter.  Defaults to @0.9@.
    ,techGamma :: Double
    -- ^ Decay factor for bracket interval width.  Defaults to
    -- @0.66@.
    ,techRho :: Double
    -- ^ Growth factor when searching for initial bracketing
    -- interval.  Defaults to @5@.
    ,techEta :: Double
    -- ^ Lower bound for the conjugate gradient update parameter
    -- @beta_k@ is @techEta * ||d||_2@.  Defaults to @0.01@.
    ,techPsi0 :: Double
    -- ^ Factor used in starting guess for iteration 1.  Defaults
    -- to @0.01@.
    ,techPsi1 :: Double
    -- ^ In performing a QuadStep, we evaluate the function at
    -- @psi1 * previous step@.  Defaults to @0.1@.
    ,techPsi2 :: Double
    -- ^ When starting a new CG iteration, our initial guess for
    -- the line search stepsize is @psi2 * previous step@.
    -- Defaults to @2@.
    } deriving (Eq, Ord, Show, Read)



-- | How verbose we should be.
data Verbose =
      Quiet
      -- ^ Do not output anything to @stdout@, which most of the
      -- time is good.
    | Verbose
      -- ^ Print what work is being done on each iteraction.
    | VeryVerbose
      -- ^ Print information about every step, may be useful for
      -- troubleshooting.
      deriving (Eq, Ord, Show, Read, Enum)

-- | Line search methods that may be used.
data LineSearch =
      ApproximateWolfe
      -- ^ Use approximate Wolfe line search.
    | AutoSwitch Double
      -- ^ Use ordinary Wolfe line search, switch to approximate
      -- Wolfe when
      --
      -- > |f_{k+1} - f_k| < AWolfeFac * C_k
      --
      -- where @C_k@ is the average size of cost and
      -- @AWolfeFac@ is the parameter to this constructor.
      deriving (Eq, Ord, Show, Read)

-- | Stop rules used to decided when to stop iterating.
data StopRules =
      DefaultStopRule Double
      -- ^ @DefaultStopRule stop_fac@ stops when
      --
      -- > |g_k|_infty <= max(grad_tol, |g_0|_infty * stop_fac)
      --
      -- where @|g_i|_infty@ is the maximum absolute component of
      -- the gradient at the @i@-th step.
    | AlternativeStopRule
      -- ^ @AlternativeStopRule@ stops when
      --
      -- > |g_k|_infty <= grad_tol * (1 + |f_k|)
      deriving (Eq, Ord, Show, Read)

-- | How to calculate the estimated error in the function value.
data EstimateError =
      AbsoluteEpsilon Double
      -- ^ @AbsoluteEpsilon eps@ estimates the error as @eps@.
    | RelativeEpsilon Double
      -- ^ @RelativeEpsilon eps@ estimates the error as @eps * C_k@.
      deriving (Eq, Ord, Show, Read)