-- Hoogle documentation, generated by Haddock -- See Hoogle, http://www.haskell.org/hoogle/ -- | Streaming, parsers, arrays, serialization and more -- -- For upgrading to streamly-0.9.0+ please read the Streamly-0.9.0 -- upgrade guide. -- -- Streamly is a standard library for Haskell that focuses on C-like -- performance, modular combinators, and streaming data flow model. -- Streamly consists of two packages: "streamly-core" and "streamly". -- streamly-core provides basic features, and depends only on GHC -- boot libraries (see note below), while streamly provides -- higher-level features like concurrency, time, lifted exceptions, and -- networking. For documentation, visit the Streamly website. -- -- The streamly-core package provides the following functionality: -- -- -- -- This package covers some or all of the functionality covered by -- streaming, pipes, conduit, list-t, logic-t, foldl, attoparsec, -- array, primitive, vector, vector-algorithms, binary, cereal, store, -- bytestring, text, stringsearch, interpolate. Streamly provides a -- consistent, concise, modular and performant interface for all this -- functionality. -- -- Note: The dependencies "heaps" and "monad-control" are included in the -- package solely for backward compatibility, and will be removed in -- future versions. @package streamly-core @version 0.2.2 -- | Compatibility functions for "base" package. module Streamly.Internal.BaseCompat (#.) :: Coercible b c => (b -> c) -> (a -> b) -> a -> c unsafeWithForeignPtr :: ForeignPtr a -> (Ptr a -> IO b) -> IO b -- | Additional Control.Exception utilities. module Streamly.Internal.Control.Exception -- | Like assert but is not removed by the compiler, it is always -- present in production code. -- -- Pre-release verify :: Bool -> a -> a verifyM :: Applicative f => Bool -> f () module Streamly.Internal.Control.ForkIO -- | Stolen from the async package. The perf improvement is modest, 2% on a -- thread heavy benchmark (parallel composition using noop computations). -- A version of forkIO that does not include the outer exception handler: -- saves a bit of time when we will be installing our own exception -- handler. rawForkIO :: IO () -> IO ThreadId -- | Fork a thread that is automatically killed as soon as the reference to -- the returned threadId is garbage collected. forkIOManaged :: IO () -> IO ThreadId -- | Fork a thread that is automatically killed as soon as the reference to -- the returned threadId is garbage collected. forkManagedWith :: MonadIO m => (m () -> m ThreadId) -> m () -> m ThreadId -- | Additional Control.Monad utilities. module Streamly.Internal.Control.Monad -- | Discard any exceptions or value returned by an effectful action. -- -- Pre-release discard :: MonadCatch m => m b -> m () module Streamly.Internal.Data.Builder -- | A simple stateful function composing monad that chains state passing -- functions. This can be considered as a simplified version of the State -- monad or even a Fold. Unlike fold the step function is one-shot and -- not called in a loop. newtype Builder s m a Builder :: (s -> m (a, s)) -> Builder s m a instance GHC.Base.Functor m => GHC.Base.Functor (Streamly.Internal.Data.Builder.Builder s m) instance GHC.Base.Monad m => GHC.Base.Applicative (Streamly.Internal.Data.Builder.Builder a m) instance GHC.Base.Monad m => GHC.Base.Monad (Streamly.Internal.Data.Builder.Builder a m) -- | | Strict data types to be used as accumulator for strict left folds -- and scans. For more comprehensive strict data types see -- https://hackage.haskell.org/package/strict-base-types . The -- names have been suffixed by a prime so that programmers can easily -- distinguish the strict versions from the lazy ones. -- -- One major advantage of strict data structures as accumulators in folds -- and scans is that it helps the compiler optimize the code much better -- by unboxing. In a big tight loop the difference could be huge. module Streamly.Internal.Data.Either.Strict -- | A strict Either data Either' a b Left' :: !a -> Either' a b Right' :: !b -> Either' a b -- | Return True if the given value is a Left', False -- otherwise. isLeft' :: Either' a b -> Bool -- | Return True if the given value is a Right', False -- otherwise. isRight' :: Either' a b -> Bool -- | Return the contents of a Left'-value or errors out. fromLeft' :: Either' a b -> a -- | Return the contents of a Right'-value or errors out. fromRight' :: Either' a b -> b instance (GHC.Show.Show a, GHC.Show.Show b) => GHC.Show.Show (Streamly.Internal.Data.Either.Strict.Either' a b) -- | A value associated with an IO action that is automatically called -- whenever the value is garbage collected. module Streamly.Internal.Data.IOFinalizer -- | An IOFinalizer has an associated IO action that is -- automatically called whenever the finalizer is garbage collected. The -- action can be run and cleared prematurely. -- -- You can hold a reference to the finalizer in your data structure, if -- the data structure gets garbage collected the finalizer will be -- called. -- -- It is implemented using mkWeakIORef. -- -- Pre-release newtype IOFinalizer IOFinalizer :: IORef (Maybe (IO ())) -> IOFinalizer -- | Create a finalizer that calls the supplied function automatically when -- the it is garbage collected. -- -- /The finalizer is always run using the state of the monad that is -- captured at the time of calling newFinalizer./ -- -- Note: To run it on garbage collection we have no option but to use the -- monad state captured at some earlier point of time. For the case when -- the finalizer is run manually before GC we could run it with the -- current state of the monad but we want to keep both the cases -- consistent. -- -- Pre-release newIOFinalizer :: MonadIO m => IO a -> m IOFinalizer -- | Run the action associated with the finalizer and deactivate it so that -- it never runs again. Note, the finalizing action runs with async -- exceptions masked. -- -- If this function is called multiple times, the action is guaranteed to -- run once and only once. -- -- Pre-release runIOFinalizer :: MonadIO m => IOFinalizer -> m () -- | Run an action clearing the finalizer atomically wrt async exceptions. -- The action is run with async exceptions masked. -- -- This function can be called at most once after setting the finalizer. -- If the finalizer is not set it is considered a bug. -- -- Pre-release clearingIOFinalizer :: MonadIO m => IOFinalizer -> IO a -> m a module Streamly.Internal.Data.IsMap class IsMap f where { type family Key f :: Type; } mapEmpty :: IsMap f => f a mapAlterF :: (IsMap f, Functor g) => (Maybe a -> g (Maybe a)) -> Key f -> f a -> g (f a) mapLookup :: IsMap f => Key f -> f a -> Maybe a mapInsert :: IsMap f => Key f -> a -> f a -> f a mapDelete :: IsMap f => Key f -> f a -> f a mapUnion :: IsMap f => f a -> f a -> f a mapNull :: IsMap f => f a -> Bool mapTraverseWithKey :: (IsMap f, Applicative t) => (Key f -> a -> t b) -> f a -> t (f b) instance GHC.Classes.Ord k => Streamly.Internal.Data.IsMap.IsMap (Data.Map.Internal.Map k) instance Streamly.Internal.Data.IsMap.IsMap Data.IntMap.Internal.IntMap -- | | Strict data types to be used as accumulator for strict left folds -- and scans. For more comprehensive strict data types see -- https://hackage.haskell.org/package/strict-base-types . The -- names have been suffixed by a prime so that programmers can easily -- distinguish the strict versions from the lazy ones. -- -- One major advantage of strict data structures as accumulators in folds -- and scans is that it helps the compiler optimize the code much better -- by unboxing. In a big tight loop the difference could be huge. module Streamly.Internal.Data.Maybe.Strict -- | A strict Maybe data Maybe' a Just' :: !a -> Maybe' a Nothing' :: Maybe' a -- | Convert strict Maybe' to lazy Maybe toMaybe :: Maybe' a -> Maybe a -- | Returns True iff its argument is of the form "Just' _". isJust' :: Maybe' a -> Bool -- | Extract the element out of a Just' and throws an error if its argument -- is Nothing'. fromJust' :: Maybe' a -> a instance GHC.Show.Show a => GHC.Show.Show (Streamly.Internal.Data.Maybe.Strict.Maybe' a) -- | The Fold type embeds a default initial value, therefore, it -- is like a Monoid whereas the Refold type has to be -- supplied with an initial value, therefore, it is more like a -- Semigroup operation. -- -- Refolds can be appended to each other or to a fold to build the fold -- incrementally. This is useful in incremental builder like use cases. -- -- See the file splitting example in the streamly-examples -- repository for an application of the Refold type. The -- Fold type does not perform as well in this situation. -- -- Refold type is to Fold as Unfold type is to -- Stream. Unfold provides better optimizaiton than -- stream in nested operations, similarly, Refold provides better -- optimization than Fold. module Streamly.Internal.Data.Refold.Type -- | Like Fold except that the initial state of the accmulator can -- be generated using a dynamically supplied input. This affords better -- stream fusion optimization in nested fold operations where the initial -- fold state is determined based on a dynamic value. -- -- Internal data Refold m c a b -- | Fold step inject extract Refold :: (s -> a -> m (Step s b)) -> (c -> m (Step s b)) -> (s -> m b) -> Refold m c a b -- | Make a consumer from a left fold style pure step function. -- -- If your Fold returns only Partial (i.e. never returns -- a Done) then you can use foldl'* constructors. -- -- See also: Streamly.Data.Fold.foldl' -- -- Internal foldl' :: Monad m => (b -> a -> b) -> Refold m b a b -- | Append the elements of an input stream to a provided starting value. -- --
--   >>> stream = fmap Data.Monoid.Sum $ Stream.enumerateFromTo 1 10
--   
--   >>> Stream.fold (Fold.fromRefold Refold.sconcat 10) stream
--   Sum {getSum = 65}
--   
-- --
--   >>> sconcat = Refold.foldl' (<>)
--   
-- -- Internal sconcat :: (Monad m, Semigroup a) => Refold m a a a -- | Internal drainBy :: Monad m => (c -> a -> m b) -> Refold m c a () -- | Keep running the same consumer over and over again on the input, -- feeding the output of the previous run to the next. -- -- Internal iterate :: Monad m => Refold m b a b -> Refold m b a b -- | lmapM f fold maps the monadic function f on the -- input of the fold. -- -- Internal lmapM :: Monad m => (a -> m b) -> Refold m c b r -> Refold m c a r -- | Map a monadic function on the output of a fold. -- -- Internal rmapM :: Monad m => (b -> m c) -> Refold m x a b -> Refold m x a c -- | Supply the output of the first consumer as input to the second -- consumer. -- -- Internal append :: Monad m => Refold m x a b -> Refold m b a b -> Refold m x a b -- | Take at most n input elements and fold them using the -- supplied fold. A negative count is treated as 0. -- -- Internal take :: Monad m => Int -> Refold m x a b -> Refold m x a b instance (GHC.Show.Show a, GHC.Show.Show b) => GHC.Show.Show (Streamly.Internal.Data.Refold.Type.Tuple'Fused a b) module Streamly.Internal.Data.Time.TimeSpec -- | Data type to represent practically large quantities of time -- efficiently. It can represent time up to ~292 billion years at -- nanosecond resolution. data TimeSpec TimeSpec :: {-# UNPACK #-} !Int64 -> {-# UNPACK #-} !Int64 -> TimeSpec -- | seconds [sec] :: TimeSpec -> {-# UNPACK #-} !Int64 -- | nanoseconds [nsec] :: TimeSpec -> {-# UNPACK #-} !Int64 instance GHC.Show.Show Streamly.Internal.Data.Time.TimeSpec.TimeSpec instance GHC.Read.Read Streamly.Internal.Data.Time.TimeSpec.TimeSpec instance GHC.Classes.Eq Streamly.Internal.Data.Time.TimeSpec.TimeSpec instance GHC.Classes.Ord Streamly.Internal.Data.Time.TimeSpec.TimeSpec instance GHC.Num.Num Streamly.Internal.Data.Time.TimeSpec.TimeSpec instance Foreign.Storable.Storable Streamly.Internal.Data.Time.TimeSpec.TimeSpec -- | | Strict data types to be used as accumulator for strict left folds -- and scans. For more comprehensive strict data types see -- https://hackage.haskell.org/package/strict-base-types . The -- names have been suffixed by a prime so that programmers can easily -- distinguish the strict versions from the lazy ones. -- -- One major advantage of strict data structures as accumulators in folds -- and scans is that it helps the compiler optimize the code much better -- by unboxing. In a big tight loop the difference could be huge. module Streamly.Internal.Data.Tuple.Strict -- | A strict (,) data Tuple' a b Tuple' :: !a -> !b -> Tuple' a b -- | A strict (,,) data Tuple3' a b c Tuple3' :: !a -> !b -> !c -> Tuple3' a b c -- | A strict (,,) data Tuple3Fused' a b c Tuple3Fused' :: !a -> !b -> !c -> Tuple3Fused' a b c -- | A strict (,,,) data Tuple4' a b c d Tuple4' :: !a -> !b -> !c -> !d -> Tuple4' a b c d instance (GHC.Show.Show a, GHC.Show.Show b) => GHC.Show.Show (Streamly.Internal.Data.Tuple.Strict.Tuple' a b) instance (GHC.Show.Show a, GHC.Show.Show b, GHC.Show.Show c) => GHC.Show.Show (Streamly.Internal.Data.Tuple.Strict.Tuple3' a b c) instance (GHC.Show.Show a, GHC.Show.Show b, GHC.Show.Show c) => GHC.Show.Show (Streamly.Internal.Data.Tuple.Strict.Tuple3Fused' a b c) instance (GHC.Show.Show a, GHC.Show.Show b, GHC.Show.Show c, GHC.Show.Show d) => GHC.Show.Show (Streamly.Internal.Data.Tuple.Strict.Tuple4' a b c d) -- | There are three fundamental types in streamly. They are streams -- (Streamly.Data.Stream), pipes -- (Streamly.Internal.Data.Pipe) and folds -- (Streamly.Data.Fold). Streams are sources or producers of -- values, multiple sources can be merged into a single source but a -- source cannot be split into multiple stream sources. Folds are sinks -- or consumers, a stream can be split and distributed to multiple folds -- but the results cannot be merged back into a stream source again. -- Pipes are transformations, a stream source can be split and -- distributed to multiple pipes each pipe can apply its own transform on -- the stream and the results can be merged back into a single pipe. -- Pipes can be attached to a source to produce a source or they can be -- attached to a fold to produce a fold, or multiple pipes can be merged -- or zipped into a single pipe. -- --
--   import qualified Streamly.Internal.Data.Pipe as Pipe
--   
module Streamly.Internal.Data.Pipe data Step s a Yield :: a -> s -> Step s a Continue :: s -> Step s a data Pipe m a b Pipe :: (s1 -> a -> m (Step (PipeState s1 s2) b)) -> (s2 -> m (Step (PipeState s1 s2) b)) -> s1 -> Pipe m a b -- | Represents a stateful transformation over an input stream of values of -- type a to outputs of type b in Monad -- m. data PipeState s1 s2 Consume :: s1 -> PipeState s1 s2 Produce :: s2 -> PipeState s1 s2 -- | The composed pipe distributes the input to both the constituent pipes -- and zips the output of the two using a supplied zipping function. zipWith :: Monad m => (a -> b -> c) -> Pipe m i a -> Pipe m i b -> Pipe m i c -- | The composed pipe distributes the input to both the constituent pipes -- and merges the outputs of the two. tee :: Monad m => Pipe m a b -> Pipe m a b -> Pipe m a b -- | Lift a pure function to a Pipe. map :: Monad m => (a -> b) -> Pipe m a b -- | Compose two pipes such that the output of the second pipe is attached -- to the input of the first pipe. compose :: Monad m => Pipe m b c -> Pipe m a b -> Pipe m a c -- | Lift a monadic function to a Pipe. mapM :: Monad m => (a -> m b) -> Pipe m a b module Streamly.Internal.Data.Time.Units -- | A type class for converting between time units using Integer as -- the intermediate and the widest representation with a nanosecond -- resolution. This system of units can represent arbitrarily large times -- but provides least efficient arithmetic operations due to -- Integer arithmetic. -- -- NOTE: Converting to and from units may truncate the value depending on -- the original value and the size and resolution of the destination -- unit. -- -- A type class for converting between units of time using -- TimeSpec as the intermediate representation. This system of -- units can represent up to ~292 billion years at nanosecond resolution -- with reasonably efficient arithmetic operations. -- -- NOTE: Converting to and from units may truncate the value depending on -- the original value and the size and resolution of the destination -- unit. class TimeUnit a -- | A type class for converting between units of time using Int64 -- as the intermediate representation with a nanosecond resolution. This -- system of units can represent up to ~292 years at nanosecond -- resolution with fast arithmetic operations. -- -- NOTE: Converting to and from units may truncate the value depending on -- the original value and the size and resolution of the destination -- unit. class TimeUnit64 a -- | Data type to represent practically large quantities of time -- efficiently. It can represent time up to ~292 billion years at -- nanosecond resolution. data TimeSpec TimeSpec :: {-# UNPACK #-} !Int64 -> {-# UNPACK #-} !Int64 -> TimeSpec -- | seconds [sec] :: TimeSpec -> {-# UNPACK #-} !Int64 -- | nanoseconds [nsec] :: TimeSpec -> {-# UNPACK #-} !Int64 -- | An Int64 time representation with a nanosecond resolution. It -- can represent time up to ~292 years. newtype NanoSecond64 NanoSecond64 :: Int64 -> NanoSecond64 -- | An Int64 time representation with a microsecond resolution. It -- can represent time up to ~292,000 years. newtype MicroSecond64 MicroSecond64 :: Int64 -> MicroSecond64 -- | An Int64 time representation with a millisecond resolution. It -- can represent time up to ~292 million years. newtype MilliSecond64 MilliSecond64 :: Int64 -> MilliSecond64 -- | Convert nanoseconds to a string showing time in an appropriate unit. showNanoSecond64 :: NanoSecond64 -> String -- | Absolute times are relative to a predefined epoch in time. -- AbsTime represents times using TimeSpec which can -- represent times up to ~292 billion years at a nanosecond resolution. newtype AbsTime AbsTime :: TimeSpec -> AbsTime -- | Convert a TimeUnit to an absolute time. toAbsTime :: TimeUnit a => a -> AbsTime -- | Convert absolute time to a TimeUnit. fromAbsTime :: TimeUnit a => AbsTime -> a data RelTime toRelTime :: TimeUnit a => a -> RelTime fromRelTime :: TimeUnit a => RelTime -> a diffAbsTime :: AbsTime -> AbsTime -> RelTime addToAbsTime :: AbsTime -> RelTime -> AbsTime -- | Relative times are relative to some arbitrary point of time. Unlike -- AbsTime they are not relative to a predefined epoch. data RelTime64 -- | Convert a TimeUnit to a relative time. toRelTime64 :: TimeUnit64 a => a -> RelTime64 -- | Convert relative time to a TimeUnit. fromRelTime64 :: TimeUnit64 a => RelTime64 -> a -- | Difference between two absolute points of time. diffAbsTime64 :: AbsTime -> AbsTime -> RelTime64 addToAbsTime64 :: AbsTime -> RelTime64 -> AbsTime showRelTime64 :: RelTime64 -> String instance Streamly.Internal.Data.Unbox.Unbox Streamly.Internal.Data.Time.Units.NanoSecond64 instance Foreign.Storable.Storable Streamly.Internal.Data.Time.Units.NanoSecond64 instance GHC.Classes.Ord Streamly.Internal.Data.Time.Units.NanoSecond64 instance GHC.Real.Integral Streamly.Internal.Data.Time.Units.NanoSecond64 instance GHC.Real.Real Streamly.Internal.Data.Time.Units.NanoSecond64 instance GHC.Num.Num Streamly.Internal.Data.Time.Units.NanoSecond64 instance GHC.Enum.Bounded Streamly.Internal.Data.Time.Units.NanoSecond64 instance GHC.Enum.Enum Streamly.Internal.Data.Time.Units.NanoSecond64 instance GHC.Show.Show Streamly.Internal.Data.Time.Units.NanoSecond64 instance GHC.Read.Read Streamly.Internal.Data.Time.Units.NanoSecond64 instance GHC.Classes.Eq Streamly.Internal.Data.Time.Units.NanoSecond64 instance Streamly.Internal.Data.Unbox.Unbox Streamly.Internal.Data.Time.Units.MicroSecond64 instance Foreign.Storable.Storable Streamly.Internal.Data.Time.Units.MicroSecond64 instance GHC.Classes.Ord Streamly.Internal.Data.Time.Units.MicroSecond64 instance GHC.Real.Integral Streamly.Internal.Data.Time.Units.MicroSecond64 instance GHC.Real.Real Streamly.Internal.Data.Time.Units.MicroSecond64 instance GHC.Num.Num Streamly.Internal.Data.Time.Units.MicroSecond64 instance GHC.Enum.Bounded Streamly.Internal.Data.Time.Units.MicroSecond64 instance GHC.Enum.Enum Streamly.Internal.Data.Time.Units.MicroSecond64 instance GHC.Show.Show Streamly.Internal.Data.Time.Units.MicroSecond64 instance GHC.Read.Read Streamly.Internal.Data.Time.Units.MicroSecond64 instance GHC.Classes.Eq Streamly.Internal.Data.Time.Units.MicroSecond64 instance Streamly.Internal.Data.Unbox.Unbox Streamly.Internal.Data.Time.Units.MilliSecond64 instance Foreign.Storable.Storable Streamly.Internal.Data.Time.Units.MilliSecond64 instance GHC.Classes.Ord Streamly.Internal.Data.Time.Units.MilliSecond64 instance GHC.Real.Integral Streamly.Internal.Data.Time.Units.MilliSecond64 instance GHC.Real.Real Streamly.Internal.Data.Time.Units.MilliSecond64 instance GHC.Num.Num Streamly.Internal.Data.Time.Units.MilliSecond64 instance GHC.Enum.Bounded Streamly.Internal.Data.Time.Units.MilliSecond64 instance GHC.Enum.Enum Streamly.Internal.Data.Time.Units.MilliSecond64 instance GHC.Show.Show Streamly.Internal.Data.Time.Units.MilliSecond64 instance GHC.Read.Read Streamly.Internal.Data.Time.Units.MilliSecond64 instance GHC.Classes.Eq Streamly.Internal.Data.Time.Units.MilliSecond64 instance GHC.Show.Show Streamly.Internal.Data.Time.Units.AbsTime instance GHC.Classes.Ord Streamly.Internal.Data.Time.Units.AbsTime instance GHC.Classes.Eq Streamly.Internal.Data.Time.Units.AbsTime instance GHC.Classes.Ord Streamly.Internal.Data.Time.Units.RelTime64 instance GHC.Real.Integral Streamly.Internal.Data.Time.Units.RelTime64 instance GHC.Real.Real Streamly.Internal.Data.Time.Units.RelTime64 instance GHC.Num.Num Streamly.Internal.Data.Time.Units.RelTime64 instance GHC.Enum.Bounded Streamly.Internal.Data.Time.Units.RelTime64 instance GHC.Enum.Enum Streamly.Internal.Data.Time.Units.RelTime64 instance GHC.Show.Show Streamly.Internal.Data.Time.Units.RelTime64 instance GHC.Read.Read Streamly.Internal.Data.Time.Units.RelTime64 instance GHC.Classes.Eq Streamly.Internal.Data.Time.Units.RelTime64 instance GHC.Classes.Ord Streamly.Internal.Data.Time.Units.RelTime instance GHC.Num.Num Streamly.Internal.Data.Time.Units.RelTime instance GHC.Show.Show Streamly.Internal.Data.Time.Units.RelTime instance GHC.Read.Read Streamly.Internal.Data.Time.Units.RelTime instance GHC.Classes.Eq Streamly.Internal.Data.Time.Units.RelTime instance Streamly.Internal.Data.Time.Units.TimeUnit64 Streamly.Internal.Data.Time.Units.NanoSecond64 instance Streamly.Internal.Data.Time.Units.TimeUnit64 Streamly.Internal.Data.Time.Units.MicroSecond64 instance Streamly.Internal.Data.Time.Units.TimeUnit64 Streamly.Internal.Data.Time.Units.MilliSecond64 instance Streamly.Internal.Data.Time.Units.TimeUnit Streamly.Internal.Data.Time.TimeSpec.TimeSpec instance Streamly.Internal.Data.Time.Units.TimeUnit Streamly.Internal.Data.Time.Units.NanoSecond64 instance Streamly.Internal.Data.Time.Units.TimeUnit Streamly.Internal.Data.Time.Units.MicroSecond64 instance Streamly.Internal.Data.Time.Units.TimeUnit Streamly.Internal.Data.Time.Units.MilliSecond64 module Streamly.Internal.Data.SVar.Type data ThreadAbort ThreadAbort :: ThreadAbort -- | Events that a child thread may send to a parent thread. data ChildEvent a ChildYield :: a -> ChildEvent a ChildStop :: ThreadId -> Maybe SomeException -> ChildEvent a newtype RunInIO m RunInIO :: (forall b. m b -> IO (StM m b)) -> RunInIO m [runInIO] :: RunInIO m -> forall b. m b -> IO (StM m b) -- | Sorting out-of-turn outputs in a heap for Ahead style streams data AheadHeapEntry (t :: (Type -> Type) -> Type -> Type) m a AheadEntryNull :: AheadHeapEntry (t :: (Type -> Type) -> Type -> Type) m a AheadEntryPure :: a -> AheadHeapEntry (t :: (Type -> Type) -> Type -> Type) m a AheadEntryStream :: (RunInIO m, t m a) -> AheadHeapEntry (t :: (Type -> Type) -> Type -> Type) m a newtype Count Count :: Int64 -> Count data Limit Unlimited :: Limit Limited :: Word -> Limit -- | Identify the type of the SVar. Two computations using the same style -- can be scheduled on the same SVar. data SVarStyle AsyncVar :: SVarStyle WAsyncVar :: SVarStyle ParallelVar :: SVarStyle AheadVar :: SVarStyle data SVarStopStyle StopNone :: SVarStopStyle StopAny :: SVarStopStyle StopBy :: SVarStopStyle data SVarStats SVarStats :: IORef Int -> IORef Int -> IORef Int -> IORef Int -> IORef Int -> IORef (Count, NanoSecond64) -> IORef NanoSecond64 -> IORef NanoSecond64 -> IORef (Maybe AbsTime) -> SVarStats [totalDispatches] :: SVarStats -> IORef Int [maxWorkers] :: SVarStats -> IORef Int [maxOutQSize] :: SVarStats -> IORef Int [maxHeapSize] :: SVarStats -> IORef Int [maxWorkQSize] :: SVarStats -> IORef Int [avgWorkerLatency] :: SVarStats -> IORef (Count, NanoSecond64) [minWorkerLatency] :: SVarStats -> IORef NanoSecond64 [maxWorkerLatency] :: SVarStats -> IORef NanoSecond64 [svarStopTime] :: SVarStats -> IORef (Maybe AbsTime) -- | An SVar or a Stream Var is a conduit to the output from multiple -- streams running concurrently and asynchronously. An SVar can be -- thought of as an asynchronous IO handle. We can write any number of -- streams to an SVar in a non-blocking manner and then read them back at -- any time at any pace. The SVar would run the streams asynchronously -- and accumulate results. An SVar may not really execute the stream -- completely and accumulate all the results. However, it ensures that -- the reader can read the results at whatever paces it wants to read. -- The SVar monitors and adapts to the consumer's pace. -- -- An SVar is a mini scheduler, it has an associated workLoop that holds -- the stream tasks to be picked and run by a pool of worker threads. It -- has an associated output queue where the output stream elements are -- placed by the worker threads. A outputDoorBell is used by the worker -- threads to intimate the consumer thread about availability of new -- results in the output queue. More workers are added to the SVar by -- fromStreamVar on demand if the output produced is not keeping -- pace with the consumer. On bounded SVars, workers block on the output -- queue to provide throttling of the producer when the consumer is not -- pulling fast enough. The number of workers may even get reduced -- depending on the consuming pace. -- -- New work is enqueued either at the time of creation of the SVar or as -- a result of executing the parallel combinators i.e. <| and -- <|> when the already enqueued computations get -- evaluated. See joinStreamVarAsync. data WorkerInfo WorkerInfo :: Count -> IORef Count -> IORef (Count, AbsTime) -> WorkerInfo [workerYieldMax] :: WorkerInfo -> Count [workerYieldCount] :: WorkerInfo -> IORef Count [workerLatencyStart] :: WorkerInfo -> IORef (Count, AbsTime) -- | Buffering policy for persistent push workers (in ParallelT). In a pull -- style SVar (in AsyncT, AheadT etc.), the consumer side dispatches -- workers on demand, workers terminate if the buffer is full or if the -- consumer is not cosuming fast enough. In a push style SVar, a worker -- is dispatched only once, workers are persistent and keep pushing work -- to the consumer via a bounded buffer. If the buffer becomes full the -- worker either blocks, or it can drop an item from the buffer to make -- space. -- -- Pull style SVars are useful in lazy stream evaluation whereas push -- style SVars are useful in strict left Folds. -- -- XXX Maybe we can separate the implementation in two different types -- instead of using a common SVar type. data PushBufferPolicy PushBufferDropNew :: PushBufferPolicy PushBufferDropOld :: PushBufferPolicy PushBufferBlock :: PushBufferPolicy data LatencyRange LatencyRange :: NanoSecond64 -> NanoSecond64 -> LatencyRange [minLatency] :: LatencyRange -> NanoSecond64 [maxLatency] :: LatencyRange -> NanoSecond64 data YieldRateInfo YieldRateInfo :: NanoSecond64 -> LatencyRange -> Int -> IORef Count -> IORef (Count, AbsTime) -> Maybe NanoSecond64 -> IORef Count -> IORef (Count, Count, NanoSecond64) -> IORef (Count, Count, NanoSecond64) -> IORef NanoSecond64 -> YieldRateInfo [svarLatencyTarget] :: YieldRateInfo -> NanoSecond64 [svarLatencyRange] :: YieldRateInfo -> LatencyRange [svarRateBuffer] :: YieldRateInfo -> Int [svarGainedLostYields] :: YieldRateInfo -> IORef Count [svarAllTimeLatency] :: YieldRateInfo -> IORef (Count, AbsTime) [workerBootstrapLatency] :: YieldRateInfo -> Maybe NanoSecond64 [workerPollingInterval] :: YieldRateInfo -> IORef Count [workerPendingLatency] :: YieldRateInfo -> IORef (Count, Count, NanoSecond64) [workerCollectedLatency] :: YieldRateInfo -> IORef (Count, Count, NanoSecond64) [workerMeasuredLatency] :: YieldRateInfo -> IORef NanoSecond64 data SVar t m a SVar :: SVarStyle -> RunInIO m -> SVarStopStyle -> IORef ThreadId -> IORef ([ChildEvent a], Int) -> MVar () -> m [ChildEvent a] -> m Bool -> IORef ([ChildEvent a], Int) -> MVar () -> Limit -> Limit -> IORef Count -> PushBufferPolicy -> MVar () -> Maybe (IORef Count) -> Maybe YieldRateInfo -> ((RunInIO m, t m a) -> IO ()) -> IO Bool -> IO Bool -> IORef Bool -> (Maybe WorkerInfo -> m ()) -> IORef (Set ThreadId) -> IORef Int -> (ThreadId -> m ()) -> MVar () -> SVarStats -> Maybe (IORef ()) -> Bool -> ThreadId -> IORef (Heap (Entry Int (AheadHeapEntry t m a)), Maybe Int) -> IORef ([t m a], Int) -> SVar t m a [svarStyle] :: SVar t m a -> SVarStyle [svarMrun] :: SVar t m a -> RunInIO m [svarStopStyle] :: SVar t m a -> SVarStopStyle [svarStopBy] :: SVar t m a -> IORef ThreadId [outputQueue] :: SVar t m a -> IORef ([ChildEvent a], Int) [outputDoorBell] :: SVar t m a -> MVar () [readOutputQ] :: SVar t m a -> m [ChildEvent a] [postProcess] :: SVar t m a -> m Bool [outputQueueFromConsumer] :: SVar t m a -> IORef ([ChildEvent a], Int) [outputDoorBellFromConsumer] :: SVar t m a -> MVar () [maxWorkerLimit] :: SVar t m a -> Limit [maxBufferLimit] :: SVar t m a -> Limit [pushBufferSpace] :: SVar t m a -> IORef Count [pushBufferPolicy] :: SVar t m a -> PushBufferPolicy [pushBufferMVar] :: SVar t m a -> MVar () [remainingWork] :: SVar t m a -> Maybe (IORef Count) [yieldRateInfo] :: SVar t m a -> Maybe YieldRateInfo [enqueue] :: SVar t m a -> (RunInIO m, t m a) -> IO () [isWorkDone] :: SVar t m a -> IO Bool [isQueueDone] :: SVar t m a -> IO Bool [needDoorBell] :: SVar t m a -> IORef Bool [workLoop] :: SVar t m a -> Maybe WorkerInfo -> m () [workerThreads] :: SVar t m a -> IORef (Set ThreadId) [workerCount] :: SVar t m a -> IORef Int [accountThread] :: SVar t m a -> ThreadId -> m () [workerStopMVar] :: SVar t m a -> MVar () [svarStats] :: SVar t m a -> SVarStats [svarRef] :: SVar t m a -> Maybe (IORef ()) [svarInspectMode] :: SVar t m a -> Bool [svarCreator] :: SVar t m a -> ThreadId [outputHeap] :: SVar t m a -> IORef (Heap (Entry Int (AheadHeapEntry t m a)), Maybe Int) [aheadWorkQueue] :: SVar t m a -> IORef ([t m a], Int) -- | Specifies the stream yield rate in yields per second (Hertz). -- We keep accumulating yield credits at rateGoal. At any point of -- time we allow only as many yields as we have accumulated as per -- rateGoal since the start of time. If the consumer or the -- producer is slower or faster, the actual rate may fall behind or -- exceed rateGoal. We try to recover the gap between the two by -- increasing or decreasing the pull rate from the producer. However, if -- the gap becomes more than rateBuffer we try to recover only as -- much as rateBuffer. -- -- rateLow puts a bound on how low the instantaneous rate can go -- when recovering the rate gap. In other words, it determines the -- maximum yield latency. Similarly, rateHigh puts a bound on how -- high the instantaneous rate can go when recovering the rate gap. In -- other words, it determines the minimum yield latency. We reduce the -- latency by increasing concurrency, therefore we can say that it puts -- an upper bound on concurrency. -- -- If the rateGoal is 0 or negative the stream never yields a -- value. If the rateBuffer is 0 or negative we do not attempt to -- recover. -- -- Since: 0.5.0 (Streamly) data Rate Rate :: Double -> Double -> Double -> Int -> Rate -- | The lower rate limit [rateLow] :: Rate -> Double -- | The target rate we want to achieve [rateGoal] :: Rate -> Double -- | The upper rate limit [rateHigh] :: Rate -> Double -- | Maximum slack from the goal [rateBuffer] :: Rate -> Int data State t m a magicMaxBuffer :: Word defState :: State t m a -- | Adapt the stream state from one type to another. adaptState :: State t m a -> State t n b getMaxThreads :: State t m a -> Limit setMaxThreads :: Int -> State t m a -> State t m a getMaxBuffer :: State t m a -> Limit setMaxBuffer :: Int -> State t m a -> State t m a getStreamRate :: State t m a -> Maybe Rate setStreamRate :: Maybe Rate -> State t m a -> State t m a getStreamLatency :: State t m a -> Maybe NanoSecond64 setStreamLatency :: Int -> State t m a -> State t m a getYieldLimit :: State t m a -> Maybe Count setYieldLimit :: Maybe Int64 -> State t m a -> State t m a getInspectMode :: State t m a -> Bool setInspectMode :: State t m a -> State t m a instance GHC.Classes.Ord Streamly.Internal.Data.SVar.Type.Count instance GHC.Real.Integral Streamly.Internal.Data.SVar.Type.Count instance GHC.Real.Real Streamly.Internal.Data.SVar.Type.Count instance GHC.Num.Num Streamly.Internal.Data.SVar.Type.Count instance GHC.Enum.Bounded Streamly.Internal.Data.SVar.Type.Count instance GHC.Enum.Enum Streamly.Internal.Data.SVar.Type.Count instance GHC.Show.Show Streamly.Internal.Data.SVar.Type.Count instance GHC.Read.Read Streamly.Internal.Data.SVar.Type.Count instance GHC.Classes.Eq Streamly.Internal.Data.SVar.Type.Count instance GHC.Show.Show Streamly.Internal.Data.SVar.Type.ThreadAbort instance GHC.Show.Show Streamly.Internal.Data.SVar.Type.SVarStyle instance GHC.Classes.Eq Streamly.Internal.Data.SVar.Type.SVarStyle instance GHC.Show.Show Streamly.Internal.Data.SVar.Type.LatencyRange instance GHC.Show.Show Streamly.Internal.Data.SVar.Type.Limit instance GHC.Show.Show Streamly.Internal.Data.SVar.Type.SVarStopStyle instance GHC.Classes.Eq Streamly.Internal.Data.SVar.Type.SVarStopStyle instance GHC.Classes.Eq Streamly.Internal.Data.SVar.Type.Limit instance GHC.Classes.Ord Streamly.Internal.Data.SVar.Type.Limit instance GHC.Exception.Type.Exception Streamly.Internal.Data.SVar.Type.ThreadAbort module Streamly.Internal.Data.Unfold -- | A stream is a succession of Steps. A Yield produces a -- single value and the next state of the stream. Stop indicates -- there are no more values in the stream. data Step s a Yield :: a -> s -> Step s a Skip :: s -> Step s a Stop :: Step s a -- | An Unfold m a b is a generator of a stream of values of type -- b from a seed of type a in Monad m. data Unfold m a b -- |
--   Unfold step inject
--   
Unfold :: (s -> m (Step s b)) -> (a -> m s) -> Unfold m a b -- | Make an unfold from step and inject functions. -- -- Pre-release mkUnfoldM :: (s -> m (Step s b)) -> (a -> m s) -> Unfold m a b -- | Make an unfold from a step function. -- -- See also: unfoldrM -- -- Pre-release mkUnfoldrM :: Applicative m => (a -> m (Step a b)) -> Unfold m a b -- | Build a stream by unfolding a monadic step function starting -- from a seed. The step function returns the next element in the stream -- and the next seed value. When it is done it returns Nothing and -- the stream ends. unfoldrM :: Applicative m => (a -> m (Maybe (b, a))) -> Unfold m a b -- | Like unfoldrM but uses a pure step function. -- --
--   >>> :{
--    f [] = Nothing
--    f (x:xs) = Just (x, xs)
--   :}
--   
-- --
--   >>> Unfold.fold Fold.toList (Unfold.unfoldr f) [1,2,3]
--   [1,2,3]
--   
unfoldr :: Applicative m => (a -> Maybe (b, a)) -> Unfold m a b -- | Lift a monadic function into an unfold. The unfold generates a -- singleton stream. functionM :: Applicative m => (a -> m b) -> Unfold m a b -- | Lift a pure function into an unfold. The unfold generates a singleton -- stream. -- --
--   function f = functionM $ return . f
--   
function :: Applicative m => (a -> b) -> Unfold m a b -- | Identity unfold. The unfold generates a singleton stream having the -- input as the only element. -- --
--   identity = function Prelude.id
--   
-- -- Pre-release identity :: Applicative m => Unfold m a a -- | The unfold discards its input and generates a function stream using -- the supplied monadic action. -- -- Pre-release fromEffect :: Applicative m => m b -> Unfold m a b -- | Discards the unfold input and always returns the argument of -- fromPure. -- --
--   fromPure = fromEffect . pure
--   
-- -- Pre-release fromPure :: Applicative m => b -> Unfold m a b -- | Convert a list of pure values to a Stream fromList :: Applicative m => Unfold m [a] a -- | Map a function on the input argument of the Unfold. -- --
--   >>> u = Unfold.lmap (fmap (+1)) Unfold.fromList
--   
--   >>> Unfold.fold Fold.toList u [1..5]
--   [2,3,4,5,6]
--   
-- --
--   lmap f = Unfold.many (Unfold.function f)
--   
lmap :: (a -> c) -> Unfold m c b -> Unfold m a b -- | Map an action on the input argument of the Unfold. -- --
--   lmapM f = Unfold.many (Unfold.functionM f)
--   
lmapM :: Monad m => (a -> m c) -> Unfold m c b -> Unfold m a b -- | Map a function on the output of the unfold (the type b). -- --
--   >>> map f = Unfold.map2 (const f)
--   
-- -- Pre-release map :: Functor m => (b -> c) -> Unfold m a b -> Unfold m a c -- |
--   >>> map2 f = Unfold.mapM2 (\a b -> pure (f a b))
--   
-- -- Note that the seed may mutate (e.g. if the seed is a Handle or IORef) -- as stream is generated from it, so we need to be careful when reusing -- the seed while the stream is being generated from it. map2 :: Functor m => (a -> b -> c) -> Unfold m a b -> Unfold m a c -- | Apply a monadic function to each element of the stream and replace it -- with the output of the resulting action. -- --
--   >>> mapM f = Unfold.mapM2 (const f)
--   
mapM :: Monad m => (b -> m c) -> Unfold m a b -> Unfold m a c mapM2 :: Monad m => (a -> b -> m c) -> Unfold m a b -> Unfold m a c -- | Supply the seed to an unfold closing the input end of the unfold. -- --
--   both a = Unfold.lmap (Prelude.const a)
--   
-- -- Pre-release both :: a -> Unfold m a b -> Unfold m Void b -- | Supply the first component of the tuple to an unfold that accepts a -- tuple as a seed resulting in a fold that accepts the second component -- of the tuple as a seed. -- --
--   first a = Unfold.lmap (a, )
--   
-- -- Pre-release first :: a -> Unfold m (a, b) c -> Unfold m b c -- | Supply the second component of the tuple to an unfold that accepts a -- tuple as a seed resulting in a fold that accepts the first component -- of the tuple as a seed. -- --
--   second b = Unfold.lmap (, b)
--   
-- -- Pre-release second :: b -> Unfold m (a, b) c -> Unfold m a c takeWhileMWithInput :: Monad m => (a -> b -> m Bool) -> Unfold m a b -> Unfold m a b -- | Same as takeWhile but with a monadic predicate. takeWhileM :: Monad m => (b -> m Bool) -> Unfold m a b -> Unfold m a b -- | End the stream generated by the Unfold as soon as the predicate -- fails on an element. takeWhile :: Monad m => (b -> Bool) -> Unfold m a b -> Unfold m a b data ConcatState s1 s2 ConcatOuter :: s1 -> ConcatState s1 s2 ConcatInner :: s1 -> s2 -> ConcatState s1 s2 -- | Apply the first unfold to each output element of the second unfold and -- flatten the output in a single stream. -- --
--   >>> many u = Unfold.many2 (Unfold.lmap snd u)
--   
many :: Monad m => Unfold m b c -> Unfold m a b -> Unfold m a c many2 :: Monad m => Unfold m (a, b) c -> Unfold m a b -> Unfold m a c -- | unfoldManyInterleave for documentation and notes. -- -- This is almost identical to unfoldManyInterleave in StreamD module. -- -- The many combinator is in fact manyAppend to be more -- explicit in naming. -- -- Internal manyInterleave :: Monad m => Unfold m a b -> Unfold m c a -> Unfold m c b -- | Outer product discarding the first element. -- -- Unimplemented crossApplySnd :: Unfold m a b -> Unfold m a c -> Unfold m a c -- | Outer product discarding the second element. -- -- Unimplemented crossApplyFst :: Unfold m a b -> Unfold m a c -> Unfold m a b -- | Create a cross product (vector product or cartesian product) of the -- output streams of two unfolds using a monadic combining function. -- --
--   >>> f1 f u = Unfold.mapM2 (\(_, c) b -> f b c) (Unfold.lmap fst u)
--   
--   >>> crossWithM f u = Unfold.many2 (f1 f u)
--   
-- -- Pre-release crossWithM :: Monad m => (b -> c -> m d) -> Unfold m a b -> Unfold m a c -> Unfold m a d -- | Like crossWithM but uses a pure combining function. -- --
--   crossWith f = crossWithM (\b c -> return $ f b c)
--   
-- --
--   >>> u1 = Unfold.lmap fst Unfold.fromList
--   
--   >>> u2 = Unfold.lmap snd Unfold.fromList
--   
--   >>> u = Unfold.crossWith (,) u1 u2
--   
--   >>> Unfold.fold Fold.toList u ([1,2,3], [4,5,6])
--   [(1,4),(1,5),(1,6),(2,4),(2,5),(2,6),(3,4),(3,5),(3,6)]
--   
crossWith :: Monad m => (b -> c -> d) -> Unfold m a b -> Unfold m a c -> Unfold m a d -- | See crossWith. -- -- Definition: -- --
--   >>> cross = Unfold.crossWith (,)
--   
-- -- To create a cross product of the streams generated from a tuple we can -- write: -- --
--   >>> :{
--   cross :: Monad m => Unfold m a b -> Unfold m c d -> Unfold m (a, c) (b, d)
--   cross u1 u2 = Unfold.cross (Unfold.lmap fst u1) (Unfold.lmap snd u2)
--   :}
--   
-- -- Pre-release cross :: Monad m => Unfold m a b -> Unfold m a c -> Unfold m a (b, c) crossApply :: Monad m => Unfold m a (b -> c) -> Unfold m a b -> Unfold m a c -- | Map an unfold generating action to each element of an unfold and -- flatten the results into a single stream. concatMapM :: Monad m => (b -> m (Unfold m a c)) -> Unfold m a b -> Unfold m a c concatMap :: Monad m => (b -> Unfold m a c) -> Unfold m a b -> Unfold m a c bind :: Monad m => Unfold m a b -> (b -> Unfold m a c) -> Unfold m a c infixl 1 `bind` -- | Distribute the input to two unfolds and then zip the outputs to a -- single stream using a monadic zip function. -- -- Stops as soon as any of the unfolds stops. -- -- Pre-release zipWithM :: Monad m => (b -> c -> m d) -> Unfold m a b -> Unfold m a c -> Unfold m a d -- | Like zipWithM but with a pure zip function. -- --
--   >>> square = fmap (\x -> x * x) Unfold.fromList
--   
--   >>> cube = fmap (\x -> x * x * x) Unfold.fromList
--   
--   >>> u = Unfold.zipWith (,) square cube
--   
--   >>> Unfold.fold Fold.toList u [1..5]
--   [(1,1),(4,8),(9,27),(16,64),(25,125)]
--   
-- --
--   zipWith f = zipWithM (\a b -> return $ f a b)
--   
zipWith :: Monad m => (b -> c -> d) -> Unfold m a b -> Unfold m a c -> Unfold m a d -- | Lift a monadic function into an unfold generating a nil stream with a -- side effect. nilM :: Applicative m => (a -> m c) -> Unfold m a b -- | An empty stream. nil :: Applicative m => Unfold m a b -- | Prepend a monadic single element generator function to an -- Unfold. The same seed is used in the action as well as the -- unfold. -- -- Pre-release consM :: Applicative m => (a -> m b) -> Unfold m a b -> Unfold m a b -- | Generates an infinite stream repeating the seed. repeatM :: Applicative m => Unfold m (m a) a -- | Given a seed (n, action), generates a stream replicating the -- action n times. replicateM :: Applicative m => Unfold m (Int, m a) a -- | fromIndicesM gen generates an infinite stream of values using -- gen starting from the seed. -- --
--   fromIndicesM f = Unfold.mapM f $ Unfold.enumerateFrom 0
--   
-- -- Pre-release fromIndicesM :: Applicative m => (Int -> m a) -> Unfold m Int a -- | Generates an infinite stream starting with the given seed and applying -- the given function repeatedly. iterateM :: Applicative m => (a -> m a) -> Unfold m (m a) a -- | Types that can be enumerated as a stream. The operations in this type -- class are equivalent to those in the Enum type class, except -- that these generate a stream instead of a list. Use the functions in -- Streamly.Internal.Data.Unfold.Enumeration module to define new -- instances. -- -- Pre-release class Enum a => Enumerable a -- | Unfolds from generating a stream starting with the element -- from, enumerating up to maxBound when the type is -- Bounded or generating an infinite stream when the type is not -- Bounded. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.unfold Unfold.enumerateFrom (0 :: Int)
--   [0,1,2,3]
--   
-- -- For Fractional types, enumeration is numerically stable. -- However, no overflow or underflow checks are performed. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.unfold Unfold.enumerateFrom 1.1
--   [1.1,2.1,3.1,4.1]
--   
-- -- Pre-release enumerateFrom :: (Enumerable a, Monad m) => Unfold m a a -- | Unfolds (from, to) generating a finite stream starting with -- the element from, enumerating the type up to the value -- to. If to is smaller than from then an -- empty stream is returned. -- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromTo (0, 4)
--   [0,1,2,3,4]
--   
-- -- For Fractional types, the last element is equal to the -- specified to value after rounding to the nearest integral -- value. -- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromTo (1.1, 4)
--   [1.1,2.1,3.1,4.1]
--   
-- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromTo (1.1, 4.6)
--   [1.1,2.1,3.1,4.1,5.1]
--   
-- -- Pre-release enumerateFromTo :: (Enumerable a, Monad m) => Unfold m (a, a) a -- | Unfolds (from, then) generating a stream whose first element -- is from and the successive elements are in increments of -- then. Enumeration can occur downwards or upwards depending on -- whether then comes before or after from. For -- Bounded types the stream ends when maxBound is reached, -- for unbounded types it keeps enumerating infinitely. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.unfold Unfold.enumerateFromThen (0, 2)
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.unfold Unfold.enumerateFromThen (0,(-2))
--   [0,-2,-4,-6]
--   
-- -- Pre-release enumerateFromThen :: (Enumerable a, Monad m) => Unfold m (a, a) a -- | Unfolds (from, then, to) generating a finite stream whose -- first element is from and the successive elements are in -- increments of then up to to. Enumeration can occur -- downwards or upwards depending on whether then comes before -- or after from. -- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromThenTo (0, 2, 6)
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromThenTo (0, (-2), (-6))
--   [0,-2,-4,-6]
--   
-- -- Pre-release enumerateFromThenTo :: (Enumerable a, Monad m) => Unfold m (a, a, a) a -- | Unfolds (from, stride) generating an infinite stream starting -- from from and incrementing every time by stride. For -- Bounded types, after the value overflows it keeps enumerating -- in a cycle: -- --
--   >>> Stream.toList $ Stream.take 10 $ Stream.unfold Unfold.enumerateFromStepNum (255::Word8,1)
--   [255,0,1,2,3,4,5,6,7,8]
--   
-- -- The implementation is numerically stable for floating point values. -- -- Note enumerateFromStepIntegral is faster for integrals. -- -- Internal enumerateFromStepNum :: (Monad m, Num a) => Unfold m (a, a) a -- | Same as enumerateFromStepNum using a stride of 1: -- --
--   >>> enumerateFromNum = lmap (from -> (from, 1)) Unfold.enumerateFromStepNum
--   >>> Stream.toList $ Stream.take 6 $ Stream.unfold enumerateFromNum (0.9)
--   [0.9,1.9,2.9,3.9,4.9,5.9]
--   
-- -- Also, same as enumerateFromThenNum using a stride of 1 but see -- the note in enumerateFromThenNum about the loss of precision: -- --
--   >>> enumerateFromNum = lmap (from -> (from, from + 1)) Unfold.enumerateFromThenNum
--   >>> Stream.toList $ Stream.take 6 $ Stream.unfold enumerateFromNum (0.9)
--   [0.9,1.9,2.9,3.8999999999999995,4.8999999999999995,5.8999999999999995]
--   
-- -- Internal enumerateFromNum :: (Monad m, Num a) => Unfold m a a -- | Same as 'enumerateFromStepNum (from, next)' using a stride of next -- - from: -- --
--   >>> enumerateFromThenNum = lmap ((from, next) -> (from, next - from)) Unfold.enumerateFromStepNum
--   
-- -- Example: @ >>> Stream.toList $ Stream.take 10 $ Stream.unfold -- enumerateFromThenNum (255::Word8,0) [255,0,1,2,3,4,5,6,7,8] -- --
--   The implementation is numerically stable for floating point values.
--   
--   Note that enumerateFromThenIntegral is faster for integrals.
--   
--   Note that in the strange world of floating point numbers, using
--   
-- -- enumerateFromThenNum (from, from + 1) is almost exactly the same -- as enumerateFromStepNum (from, 1) but not precisely the same. -- Because (from + 1) - from is not exactly 1, it may lose some -- precision, the loss may also be aggregated in each step, if you want -- that precision then use enumerateFromStepNum instead. -- -- Internal enumerateFromThenNum :: (Monad m, Num a) => Unfold m (a, a) a -- | Can be used to enumerate unbounded integrals. This does not check for -- overflow or underflow for bounded integrals. -- -- Internal enumerateFromStepIntegral :: (Monad m, Integral a) => Unfold m (a, a) a enumerateFromIntegral :: (Monad m, Integral a) => Unfold m a a enumerateFromThenIntegral :: (Monad m, Integral a) => Unfold m (a, a) a enumerateFromToIntegral :: (Monad m, Integral a) => Unfold m (a, a) a enumerateFromThenToIntegral :: (Monad m, Integral a) => Unfold m (a, a, a) a enumerateFromIntegralBounded :: (Monad m, Integral a, Bounded a) => Unfold m a a enumerateFromThenIntegralBounded :: (Monad m, Integral a, Bounded a) => Unfold m (a, a) a enumerateFromToIntegralBounded :: (Monad m, Integral a, Bounded a) => Unfold m (a, a) a enumerateFromThenToIntegralBounded :: (Monad m, Integral a, Bounded a) => Unfold m (a, a, a) a -- | Enumerate from given starting Enum value from with stride of -- 1 till maxBound -- -- Internal enumerateFromSmallBounded :: (Monad m, Enum a, Bounded a) => Unfold m a a -- | Enumerate from given starting Enum value from and next Enum -- value next with stride of (fromEnum next - fromEnum from) -- till maxBound. -- -- Internal enumerateFromThenSmallBounded :: forall m a. (Monad m, Enum a, Bounded a) => Unfold m (a, a) a -- | Enumerate from given starting Enum value from and to Enum -- value to with stride of 1 till to value. -- -- Internal enumerateFromToSmall :: (Monad m, Enum a) => Unfold m (a, a) a -- | Enumerate from given starting Enum value from and then Enum -- value next and to Enum value to with stride of -- (fromEnum next - fromEnum from) till to value. -- -- Internal enumerateFromThenToSmall :: (Monad m, Enum a) => Unfold m (a, a, a) a enumerateFromFractional :: (Monad m, Fractional a) => Unfold m a a enumerateFromThenFractional :: (Monad m, Fractional a) => Unfold m (a, a) a -- | Same as enumerateFromStepNum with a step of 1 and enumerating -- up to the specified upper limit rounded to the nearest integral value: -- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromToFractional (0.1, 6.3)
--   [0.1,1.1,2.1,3.1,4.1,5.1,6.1]
--   
-- -- Internal enumerateFromToFractional :: (Monad m, Fractional a, Ord a) => Unfold m (a, a) a enumerateFromThenToFractional :: (Monad m, Fractional a, Ord a) => Unfold m (a, a, a) a -- | Convert a list of monadic values to a Stream fromListM :: Applicative m => Unfold m [m a] a fromPtr :: forall m a. (MonadIO m, Storable a) => Unfold m (Ptr a) a fromStreamK :: Applicative m => Unfold m (StreamK m a) a fromStreamD :: Applicative m => Unfold m (Stream m a) a fromStream :: Applicative m => Unfold m (Stream m a) a -- | Convert an Unfold into an unfold accepting a tuple as an -- argument, using the argument of the original fold as the second -- element of tuple and discarding the first element of the tuple. -- --
--   discardFirst = Unfold.lmap snd
--   
-- -- Pre-release discardFirst :: Unfold m a b -> Unfold m (c, a) b -- | Convert an Unfold into an unfold accepting a tuple as an -- argument, using the argument of the original fold as the first element -- of tuple and discarding the second element of the tuple. -- --
--   discardSecond = Unfold.lmap fst
--   
-- -- Pre-release discardSecond :: Unfold m a b -> Unfold m (a, c) b -- | Convert an Unfold that accepts a tuple as an argument into an -- unfold that accepts a tuple with elements swapped. -- --
--   swap = Unfold.lmap Tuple.swap
--   
-- -- Pre-release swap :: Unfold m (a, c) b -> Unfold m (c, a) b -- | Compose an Unfold and a Fold. Given an Unfold m a -- b and a Fold m b c, returns a monadic action a -> -- m c representing the application of the fold on the unfolded -- stream. -- --
--   >>> Unfold.fold Fold.sum Unfold.fromList [1..100]
--   5050
--   
-- --
--   >>> fold f u = Stream.fold f . Stream.unfold u
--   
-- -- Pre-release fold :: Monad m => Fold m b c -> Unfold m a b -> a -> m c -- | Scan the output of an Unfold to change it in a stateful manner. -- -- Pre-release postscanlM' :: Monad m => (b -> a -> m b) -> m b -> Unfold m c a -> Unfold m c b -- | Scan the output of an Unfold to change it in a stateful manner. -- -- Pre-release postscan :: Monad m => Fold m b c -> Unfold m a b -> Unfold m a c -- | Scan the output of an Unfold to change it in a stateful manner. -- Once fold is done it will stop. -- --
--   >>> u = Unfold.scan (Fold.take 2 Fold.sum) Unfold.fromList
--   
--   >>> Unfold.fold Fold.toList u [1,2,3,4,5]
--   [0,1,3]
--   
-- -- Pre-release scan :: Monad m => Fold m b c -> Unfold m a b -> Unfold m a c -- | Scan the output of an Unfold to change it in a stateful manner. -- Once fold is done it will restart from its initial state. -- --
--   >>> u = Unfold.scanMany (Fold.take 2 Fold.sum) Unfold.fromList
--   
--   >>> Unfold.fold Fold.toList u [1,2,3,4,5]
--   [0,1,3,0,3,7,0,5]
--   
-- -- Pre-release scanMany :: Monad m => Fold m b c -> Unfold m a b -> Unfold m a c -- | Apply a fold multiple times on the output of an unfold. -- -- Pre-release foldMany :: Monad m => Fold m b c -> Unfold m a b -> Unfold m a c -- | Choose left or right unfold based on an either input. -- -- Pre-release either :: Applicative m => Unfold m a c -> Unfold m b c -> Unfold m (Either a b) c -- |
--   >>> u = Unfold.take 2 Unfold.fromList
--   
--   >>> Unfold.fold Fold.toList u [1..100]
--   [1,2]
--   
take :: Applicative m => Int -> Unfold m a b -> Unfold m a b -- | Include only those elements that pass a predicate. filter :: Monad m => (b -> Bool) -> Unfold m a b -> Unfold m a b -- | Same as filter but with a monadic predicate. filterM :: Monad m => (b -> m Bool) -> Unfold m a b -> Unfold m a b -- | drop n unf drops n elements from the stream -- generated by unf. drop :: Applicative m => Int -> Unfold m a b -> Unfold m a b -- | Similar to dropWhileM but with a pure condition function. dropWhile :: Monad m => (b -> Bool) -> Unfold m a b -> Unfold m a b -- | dropWhileM f unf drops elements from the stream generated by -- unf while the condition holds true. The condition function -- f is monadic in nature. dropWhileM :: Monad m => (b -> m Bool) -> Unfold m a b -> Unfold m a b joinInnerGeneric :: Monad m => (b -> c -> Bool) -> Unfold m a b -> Unfold m a c -> Unfold m a (b, c) -- | Like gbracketIO but with following differences: -- -- -- -- Inhibits stream fusion -- -- Pre-release gbracket_ :: Monad m => (a -> m c) -> (forall s. m s -> m (Either e s)) -> (c -> m d) -> Unfold m (c, e) b -> Unfold m c b -> Unfold m a b -- | Run the alloc action a -> m c with async exceptions -- disabled but keeping blocking operations interruptible (see -- mask). Use the output c as input to Unfold m c -- b to generate an output stream. When unfolding use the supplied -- try operation forall s. m s -> m (Either e s) to -- catch synchronous exceptions. If an exception occurs run the exception -- handling unfold Unfold m (c, e) b. -- -- The cleanup action c -> m d, runs whenever the stream ends -- normally, due to a sync or async exception or if it gets garbage -- collected after a partial lazy evaluation. See bracket for -- the semantics of the cleanup action. -- -- gbracket can express all other exception handling -- combinators. -- -- Inhibits stream fusion -- -- Pre-release gbracketIO :: MonadIO m => (a -> IO c) -> (c -> IO d) -> (c -> IO ()) -> Unfold m e b -> (forall s. m s -> IO (Either e s)) -> Unfold m c b -> Unfold m a b -- | Run a side effect a -> m c on the input a before -- unfolding it using Unfold m a b. -- --
--   before f = lmapM (\a -> f a >> return a)
--   
-- -- Pre-release before :: (a -> m c) -> Unfold m a b -> Unfold m a b -- | Unfold the input a using Unfold m a b, run an action -- on a whenever the unfold stops normally, or if it is garbage -- collected after a partial lazy evaluation. -- -- The semantics of the action a -> m c are similar to the -- cleanup action semantics in bracket. -- -- See also after_ -- -- Pre-release afterIO :: MonadIO m => (a -> IO c) -> Unfold m a b -> Unfold m a b -- | Like after with following differences: -- -- -- -- Pre-release after_ :: Monad m => (a -> m c) -> Unfold m a b -> Unfold m a b -- | Unfold the input a using Unfold m a b, run an action -- on a whenever the unfold stops normally, aborts due to an -- exception or if it is garbage collected after a partial lazy -- evaluation. -- -- The semantics of the action a -> m c are similar to the -- cleanup action semantics in bracket. -- --
--   finally release = bracket return release
--   
-- -- See also finally_ -- -- Inhibits stream fusion -- -- Pre-release finallyIO :: (MonadIO m, MonadCatch m) => (a -> IO c) -> Unfold m a b -> Unfold m a b -- | Like finallyIO with following differences: -- -- -- -- Inhibits stream fusion -- -- Pre-release finally_ :: MonadCatch m => (a -> m c) -> Unfold m a b -> Unfold m a b -- | Run the alloc action a -> m c with async exceptions -- disabled but keeping blocking operations interruptible (see -- mask). Use the output c as input to Unfold m c -- b to generate an output stream. -- -- c is usually a resource under the state of monad m, -- e.g. a file handle, that requires a cleanup after use. The cleanup -- action c -> m d, runs whenever the stream ends normally, -- due to a sync or async exception or if it gets garbage collected after -- a partial lazy evaluation. -- -- bracket only guarantees that the cleanup action runs, and it -- runs with async exceptions enabled. The action must ensure that it can -- successfully cleanup the resource in the face of sync or async -- exceptions. -- -- When the stream ends normally or on a sync exception, cleanup action -- runs immediately in the current thread context, whereas in other cases -- it runs in the GC context, therefore, cleanup may be delayed until the -- GC gets to run. -- -- See also: bracket_, gbracket -- -- Inhibits stream fusion -- -- Pre-release bracketIO :: (MonadIO m, MonadCatch m) => (a -> IO c) -> (c -> IO d) -> Unfold m c b -> Unfold m a b -- | Like bracketIO but with following differences: -- -- -- -- Inhibits stream fusion -- -- Pre-release bracket_ :: MonadCatch m => (a -> m c) -> (c -> m d) -> Unfold m c b -> Unfold m a b -- | Unfold the input a using Unfold m a b, run the -- action a -> m c on a if the unfold aborts due to -- an exception. -- -- Inhibits stream fusion -- -- Pre-release onException :: MonadCatch m => (a -> m c) -> Unfold m a b -> Unfold m a b -- | When unfolding Unfold m a b if an exception e -- occurs, unfold e using Unfold m e b. -- -- Inhibits stream fusion -- -- Pre-release handle :: (MonadCatch m, Exception e) => Unfold m e b -> Unfold m a b -> Unfold m a b -- | Fast, composable stream producers with ability to terminate, -- supporting nested stream fusion. Nested stream operations like -- concatMap in the Streamly.Data.Stream module do not -- fuse, however, the unfoldMany operation, using the -- Unfold type, is a fully fusible alternative to -- concatMap. -- -- Please refer to Streamly.Internal.Data.Unfold for more -- functions that have not yet been released. -- -- Exception combinators are not exposed, we would like to encourage the -- use of Stream type instead whenever exception handling is -- required. We can consider exposing the unfold exception functions if -- there is a compelling use case to use unfolds instead of stream. module Streamly.Data.Unfold -- | An Unfold m a b is a generator of a stream of values of type -- b from a seed of type a in Monad m. data Unfold m a b -- | Build a stream by unfolding a monadic step function starting -- from a seed. The step function returns the next element in the stream -- and the next seed value. When it is done it returns Nothing and -- the stream ends. unfoldrM :: Applicative m => (a -> m (Maybe (b, a))) -> Unfold m a b -- | Like unfoldrM but uses a pure step function. -- --
--   >>> :{
--    f [] = Nothing
--    f (x:xs) = Just (x, xs)
--   :}
--   
-- --
--   >>> Unfold.fold Fold.toList (Unfold.unfoldr f) [1,2,3]
--   [1,2,3]
--   
unfoldr :: Applicative m => (a -> Maybe (b, a)) -> Unfold m a b -- | Lift a pure function into an unfold. The unfold generates a singleton -- stream. -- --
--   function f = functionM $ return . f
--   
function :: Applicative m => (a -> b) -> Unfold m a b -- | Lift a monadic function into an unfold. The unfold generates a -- singleton stream. functionM :: Applicative m => (a -> m b) -> Unfold m a b -- | Generates an infinite stream repeating the seed. repeatM :: Applicative m => Unfold m (m a) a -- | Given a seed (n, action), generates a stream replicating the -- action n times. replicateM :: Applicative m => Unfold m (Int, m a) a -- | Generates an infinite stream starting with the given seed and applying -- the given function repeatedly. iterateM :: Applicative m => (a -> m a) -> Unfold m (m a) a -- | Types that can be enumerated as a stream. The operations in this type -- class are equivalent to those in the Enum type class, except -- that these generate a stream instead of a list. Use the functions in -- Streamly.Internal.Data.Unfold.Enumeration module to define new -- instances. -- -- Pre-release class Enum a => Enumerable a -- | Unfolds from generating a stream starting with the element -- from, enumerating up to maxBound when the type is -- Bounded or generating an infinite stream when the type is not -- Bounded. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.unfold Unfold.enumerateFrom (0 :: Int)
--   [0,1,2,3]
--   
-- -- For Fractional types, enumeration is numerically stable. -- However, no overflow or underflow checks are performed. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.unfold Unfold.enumerateFrom 1.1
--   [1.1,2.1,3.1,4.1]
--   
-- -- Pre-release enumerateFrom :: (Enumerable a, Monad m) => Unfold m a a -- | Unfolds (from, to) generating a finite stream starting with -- the element from, enumerating the type up to the value -- to. If to is smaller than from then an -- empty stream is returned. -- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromTo (0, 4)
--   [0,1,2,3,4]
--   
-- -- For Fractional types, the last element is equal to the -- specified to value after rounding to the nearest integral -- value. -- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromTo (1.1, 4)
--   [1.1,2.1,3.1,4.1]
--   
-- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromTo (1.1, 4.6)
--   [1.1,2.1,3.1,4.1,5.1]
--   
-- -- Pre-release enumerateFromTo :: (Enumerable a, Monad m) => Unfold m (a, a) a -- | Unfolds (from, then) generating a stream whose first element -- is from and the successive elements are in increments of -- then. Enumeration can occur downwards or upwards depending on -- whether then comes before or after from. For -- Bounded types the stream ends when maxBound is reached, -- for unbounded types it keeps enumerating infinitely. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.unfold Unfold.enumerateFromThen (0, 2)
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.unfold Unfold.enumerateFromThen (0,(-2))
--   [0,-2,-4,-6]
--   
-- -- Pre-release enumerateFromThen :: (Enumerable a, Monad m) => Unfold m (a, a) a -- | Unfolds (from, then, to) generating a finite stream whose -- first element is from and the successive elements are in -- increments of then up to to. Enumeration can occur -- downwards or upwards depending on whether then comes before -- or after from. -- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromThenTo (0, 2, 6)
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.unfold Unfold.enumerateFromThenTo (0, (-2), (-6))
--   [0,-2,-4,-6]
--   
-- -- Pre-release enumerateFromThenTo :: (Enumerable a, Monad m) => Unfold m (a, a, a) a -- | Convert a list of pure values to a Stream fromList :: Applicative m => Unfold m [a] a -- | Convert a list of monadic values to a Stream fromListM :: Applicative m => Unfold m [m a] a fromStream :: Applicative m => Unfold m (Stream m a) a -- | Map a function on the input argument of the Unfold. -- --
--   >>> u = Unfold.lmap (fmap (+1)) Unfold.fromList
--   
--   >>> Unfold.fold Fold.toList u [1..5]
--   [2,3,4,5,6]
--   
-- --
--   lmap f = Unfold.many (Unfold.function f)
--   
lmap :: (a -> c) -> Unfold m c b -> Unfold m a b -- | Map an action on the input argument of the Unfold. -- --
--   lmapM f = Unfold.many (Unfold.functionM f)
--   
lmapM :: Monad m => (a -> m c) -> Unfold m c b -> Unfold m a b -- | Supply the first component of the tuple to an unfold that accepts a -- tuple as a seed resulting in a fold that accepts the second component -- of the tuple as a seed. -- --
--   first a = Unfold.lmap (a, )
--   
-- -- Pre-release first :: a -> Unfold m (a, b) c -> Unfold m b c -- | Supply the second component of the tuple to an unfold that accepts a -- tuple as a seed resulting in a fold that accepts the first component -- of the tuple as a seed. -- --
--   second b = Unfold.lmap (, b)
--   
-- -- Pre-release second :: b -> Unfold m (a, b) c -> Unfold m a c -- | Apply a monadic function to each element of the stream and replace it -- with the output of the resulting action. -- --
--   >>> mapM f = Unfold.mapM2 (const f)
--   
mapM :: Monad m => (b -> m c) -> Unfold m a b -> Unfold m a c -- | Same as takeWhile but with a monadic predicate. takeWhileM :: Monad m => (b -> m Bool) -> Unfold m a b -> Unfold m a b -- | End the stream generated by the Unfold as soon as the predicate -- fails on an element. takeWhile :: Monad m => (b -> Bool) -> Unfold m a b -> Unfold m a b -- |
--   >>> u = Unfold.take 2 Unfold.fromList
--   
--   >>> Unfold.fold Fold.toList u [1..100]
--   [1,2]
--   
take :: Applicative m => Int -> Unfold m a b -> Unfold m a b -- | Include only those elements that pass a predicate. filter :: Monad m => (b -> Bool) -> Unfold m a b -> Unfold m a b -- | Same as filter but with a monadic predicate. filterM :: Monad m => (b -> m Bool) -> Unfold m a b -> Unfold m a b -- | drop n unf drops n elements from the stream -- generated by unf. drop :: Applicative m => Int -> Unfold m a b -> Unfold m a b -- | Similar to dropWhileM but with a pure condition function. dropWhile :: Monad m => (b -> Bool) -> Unfold m a b -> Unfold m a b -- | dropWhileM f unf drops elements from the stream generated by -- unf while the condition holds true. The condition function -- f is monadic in nature. dropWhileM :: Monad m => (b -> m Bool) -> Unfold m a b -> Unfold m a b -- | Like zipWithM but with a pure zip function. -- --
--   >>> square = fmap (\x -> x * x) Unfold.fromList
--   
--   >>> cube = fmap (\x -> x * x * x) Unfold.fromList
--   
--   >>> u = Unfold.zipWith (,) square cube
--   
--   >>> Unfold.fold Fold.toList u [1..5]
--   [(1,1),(4,8),(9,27),(16,64),(25,125)]
--   
-- --
--   zipWith f = zipWithM (\a b -> return $ f a b)
--   
zipWith :: Monad m => (b -> c -> d) -> Unfold m a b -> Unfold m a c -> Unfold m a d -- | Like crossWithM but uses a pure combining function. -- --
--   crossWith f = crossWithM (\b c -> return $ f b c)
--   
-- --
--   >>> u1 = Unfold.lmap fst Unfold.fromList
--   
--   >>> u2 = Unfold.lmap snd Unfold.fromList
--   
--   >>> u = Unfold.crossWith (,) u1 u2
--   
--   >>> Unfold.fold Fold.toList u ([1,2,3], [4,5,6])
--   [(1,4),(1,5),(1,6),(2,4),(2,5),(2,6),(3,4),(3,5),(3,6)]
--   
crossWith :: Monad m => (b -> c -> d) -> Unfold m a b -> Unfold m a c -> Unfold m a d -- | Apply the first unfold to each output element of the second unfold and -- flatten the output in a single stream. -- --
--   >>> many u = Unfold.many2 (Unfold.lmap snd u)
--   
many :: Monad m => Unfold m b c -> Unfold m a b -> Unfold m a c module Streamly.Internal.Data.Time.Clock -- | Clock types. A clock may be system-wide (that is, visible to all -- processes) or per-process (measuring time that is meaningful only -- within a process). All implementations shall support CLOCK_REALTIME. -- (The only suspend-aware monotonic is CLOCK_BOOTTIME on Linux.) data Clock -- | The identifier for the system-wide monotonic clock, which is defined -- as a clock measuring real time, whose value cannot be set via -- clock_settime and which cannot have negative clock jumps. The -- maximum possible clock jump shall be implementation defined. For this -- clock, the value returned by getTime represents the amount of -- time (in seconds and nanoseconds) since an unspecified point in the -- past (for example, system start-up time, or the Epoch). This point -- does not change after system start-up time. Note that the absolute -- value of the monotonic clock is meaningless (because its origin is -- arbitrary), and thus there is no need to set it. Furthermore, realtime -- applications can rely on the fact that the value of this clock is -- never set. Monotonic :: Clock -- | The identifier of the system-wide clock measuring real time. For this -- clock, the value returned by getTime represents the amount of -- time (in seconds and nanoseconds) since the Epoch. Realtime :: Clock -- | The identifier of the CPU-time clock associated with the calling -- process. For this clock, the value returned by getTime -- represents the amount of execution time of the current process. ProcessCPUTime :: Clock -- | The identifier of the CPU-time clock associated with the calling OS -- thread. For this clock, the value returned by getTime -- represents the amount of execution time of the current OS thread. ThreadCPUTime :: Clock -- | (since Linux 2.6.28; Linux and Mac OSX) Similar to CLOCK_MONOTONIC, -- but provides access to a raw hardware-based time that is not subject -- to NTP adjustments or the incremental adjustments performed by -- adjtime(3). MonotonicRaw :: Clock -- | (since Linux 2.6.32; Linux and Mac OSX) A faster but less precise -- version of CLOCK_MONOTONIC. Use when you need very fast, but not -- fine-grained timestamps. MonotonicCoarse :: Clock -- | (since Linux 2.6.39; Linux and Mac OSX) Identical to CLOCK_MONOTONIC, -- except it also includes any time that the system is suspended. This -- allows applications to get a suspend-aware monotonic clock without -- having to deal with the complications of CLOCK_REALTIME, which may -- have discontinuities if the time is changed using settimeofday(2). Uptime :: Clock -- | (since Linux 2.6.32; Linux-specific) A faster but less precise version -- of CLOCK_REALTIME. Use when you need very fast, but not fine-grained -- timestamps. RealtimeCoarse :: Clock getTime :: Clock -> IO AbsTime -- | asyncClock g starts a clock thread that updates an IORef with -- current time as a 64-bit value in microseconds, every g -- seconds. The IORef can be read asynchronously. The thread exits -- automatically when the reference to the returned ThreadId is -- lost. -- -- Minimum granularity of clock update is 1 ms. Higher is better for -- performance. -- -- CAUTION! This is safe only on a 64-bit machine. On a 32-bit machine a -- 64-bit Var cannot be read consistently without a lock while -- another thread is writing to it. asyncClock :: Clock -> Double -> IO (ThreadId, IORef MicroSecond64) readClock :: (ThreadId, IORef MicroSecond64) -> IO MicroSecond64 -- | Adjustable periodic timer. data Timer -- | timer clockType granularity period creates a timer. The timer -- produces timer ticks at specified time intervals that can be waited -- upon using waitTimer. If the previous tick is not yet -- processed, the new tick is lost. timer :: Clock -> Double -> Double -> IO Timer -- | Resets the current period. resetTimer :: Timer -> IO () -- | Elongates the current period by specified amount. -- -- Unimplemented extendTimer :: Timer -> Double -> IO () -- | Shortens the current period by specified amount. -- -- Unimplemented shortenTimer :: Timer -> Double -> IO () -- | Show the remaining time in the current time period. -- -- Unimplemented readTimer :: Timer -> IO Double -- | Blocking wait for a timer tick. waitTimer :: Timer -> IO () module Streamly.Internal.System.IO -- | Default maximum buffer size in bytes, for reading from and writing to -- IO devices, the value is 32KB minus GHC allocation overhead, which is -- a few bytes, so that the actual allocation is 32KB. defaultChunkSize :: Int -- | When we allocate a byte array of size k the allocator -- actually allocates memory of size k + byteArrayOverhead. -- arrayPayloadSize n returns the size of the array in bytes -- that would result in an allocation of n bytes. arrayPayloadSize :: Int -> Int unsafeInlineIO :: IO a -> a -- | Returns the heap allocation overhead for allocating a byte array. Each -- heap object contains a one word header. Byte arrays contain the size -- of the array after the header. -- -- See -- https://gitlab.haskell.org/ghc/ghc/-/wikis/commentary/rts/storage/heap-objects#arrays byteArrayOverhead :: Int module Streamly.Internal.Data.Parser -- | The type of a Parser's initial action. -- -- Internal data Initial s b -- | Wait for step function to be called with state s. IPartial :: !s -> Initial s b -- | Return a result right away without an input. IDone :: !b -> Initial s b -- | Return an error right away without an input. IError :: !String -> Initial s b -- | The return type of a Parser step. -- -- The parse operation feeds the input stream to the parser one element -- at a time, representing a parse Step. The parser may or may not -- consume the item and returns a result. If the result is Partial -- we can either extract the result or feed more input to the parser. If -- the result is Continue, we must feed more input in order to get -- a result. If the parser returns Done then the parser can no -- longer take any more input. -- -- If the result is Continue, the parse operation retains the -- input in a backtracking buffer, in case the parser may ask to -- backtrack in future. Whenever a 'Partial n' result is returned we -- first backtrack by n elements in the input and then release -- any remaining backtracking buffer. Similarly, 'Continue n' backtracks -- to n elements before the current position and starts feeding -- the input from that point for future invocations of the parser. -- -- If parser is not yet done, we can use the extract operation -- on the state of the parser to extract a result. If the parser -- has not yet yielded a result, the operation fails with a -- ParseError exception. If the parser yielded a Partial -- result in the past the last partial result is returned. Therefore, if -- a parser yields a partial result once it cannot fail later on. -- -- The parser can never backtrack beyond the position where the last -- partial result left it at. The parser must ensure that the backtrack -- position is always after that. -- -- Pre-release data Step s b -- | Partial count state. The following hold on Partial result: -- --
    --
  1. extract on state would succeed and give a -- result.
  2. --
  3. Input stream position is reset to current position - -- count.
  4. --
  5. All input before the new position is dropped. The parser can never -- backtrack beyond this position.
  6. --
Partial :: !Int -> !s -> Step s b -- | Continue count state. The following hold on a Continue -- result: -- --
    --
  1. If there was a Partial result in past, extract on -- state would give that result as Done otherwise it may -- return Error or Continue.
  2. --
  3. Input stream position is reset to current position - -- count.
  4. --
  5. the input is retained in a backtrack buffer.
  6. --
Continue :: !Int -> !s -> Step s b -- | Done with leftover input count and result. -- -- Done count result means the parser has finished, it will -- accept no more input, last count elements from the input are -- unused and the result of the parser is in result. Done :: !Int -> !b -> Step s b -- | Parser failed without generating any output. -- -- The parsing operation may backtrack to the beginning and try another -- alternative. Error :: !String -> Step s b -- | Map an extract function over the state of Step extractStep :: Monad m => (s -> m (Step s1 b)) -> Step s b -> m (Step s1 b) -- | Bimap discarding the count, and using the supplied count instead. bimapOverrideCount :: Int -> (s -> s1) -> (b -> b1) -> Step s b -> Step s1 b1 -- | A parser is a fold that can fail and is represented as Parser step -- initial extract. Before we drive a parser we call the -- initial action to retrieve the initial state of the fold. The -- parser driver invokes step with the state returned by the -- previous step and the next input element. It results into a new state -- and a command to the driver represented by Step type. The -- driver keeps invoking the step function until it stops or fails. At -- any point of time the driver can call extract to inspect the -- result of the fold. If the parser hits the end of input -- extract is called. It may result in an error or an output -- value. -- -- Pre-release data Parser a m b Parser :: (s -> a -> m (Step s b)) -> m (Initial s b) -> (s -> m (Step s b)) -> Parser a m b -- | This exception is used when a parser ultimately fails, the user of the -- parser is intimated via this exception. -- -- Pre-release newtype ParseError ParseError :: String -> ParseError -- | rmapM f parser maps the monadic function f on the -- output of the parser. -- --
--   >>> rmap = fmap
--   
rmapM :: Monad m => (b -> m c) -> Parser a m b -> Parser a m c -- | A parser that always yields a pure value without consuming any input. fromPure :: Monad m => b -> Parser a m b -- | A parser that always yields the result of an effectful action without -- consuming any input. fromEffect :: Monad m => m b -> Parser a m b -- | Sequential parser application. Apply two parsers sequentially to an -- input stream. The first parser runs and processes the input, the -- remaining input is then passed to the second parser. If both parsers -- succeed, their outputs are combined using the supplied function. If -- either parser fails, the operation fails. -- -- This combinator delivers high performance by stream fusion but it -- comes with some limitations. For those cases use the -- Applicative instance of ParserK. -- -- CAVEAT 1: NO RECURSION. This function is strict in both arguments. As -- a result, if a parser is defined recursively using this, it may cause -- an infintie loop. The following example checks the strictness: -- --
--   >>> p = Parser.splitWith const (Parser.satisfy (> 0)) undefined
--   
--   >>> Stream.parse p $ Stream.fromList [1]
--   *** Exception: Prelude.undefined
--   ...
--   
-- -- CAVEAT 2: QUADRATIC TIME COMPLEXITY. Static composition is fast due to -- stream fusion, but it works well only for limited (e.g. up to 8) -- compositions, use Streamly.Data.ParserK for larger -- compositions. -- -- Below are some common idioms that can be expressed using -- splitWith: -- --
--   >>> span p f1 f2 = Parser.splitWith (,) (Parser.takeWhile p f1) (Parser.fromFold f2)
--   
--   >>> spanBy eq f1 f2 = Parser.splitWith (,) (Parser.groupBy eq f1) (Parser.fromFold f2)
--   
-- -- Pre-release splitWith :: Monad m => (a -> b -> c) -> Parser x m a -> Parser x m b -> Parser x m c -- | Sequential parser application ignoring the output of the first parser. -- Apply two parsers sequentially to an input stream. The input is -- provided to the first parser, when it is done the remaining input is -- provided to the second parser. The output of the parser is the output -- of the second parser. The operation fails if any of the parsers fail. -- -- ALL THE CAVEATS IN splitWith APPLY HERE AS WELL. -- -- This implementation is strict in the second argument, therefore, the -- following will fail: -- --
--   >>> Stream.parse (Parser.split_ (Parser.satisfy (> 0)) undefined) $ Stream.fromList [1]
--   *** Exception: Prelude.undefined
--   ...
--   
-- -- Pre-release split_ :: Monad m => Parser x m a -> Parser x m b -> Parser x m b -- | A parser that always fails with an error message without consuming any -- input. die :: Monad m => String -> Parser a m b -- | A parser that always fails with an effectful error message and without -- consuming any input. -- -- Pre-release dieM :: Monad m => m String -> Parser a m b -- | See documentation of some. -- -- Pre-release splitSome :: Monad m => Parser a m b -> Fold m b c -> Parser a m c -- | See documentation of many. -- -- Pre-release splitMany :: Monad m => Parser a m b -> Fold m b c -> Parser a m c -- | Like splitMany, but inner fold emits an output at the end even if no -- input is received. -- -- Internal splitManyPost :: Monad m => Parser a m b -> Fold m b c -> Parser a m c -- | Sequential alternative. The input is first passed to the first parser, -- if it succeeds, the result is returned. However, if the first parser -- fails, the parser driver backtracks and tries the same input on the -- second (alternative) parser, returning the result if it succeeds. -- -- This combinator delivers high performance by stream fusion but it -- comes with some limitations. For those cases use the -- Alternative instance of ParserK. -- -- CAVEAT 1: NO RECURSION. This function is strict in both arguments. As -- a result, if a parser is defined recursively using this, it may cause -- an infintie loop. The following example checks the strictness: -- --
--   >>> p = Parser.satisfy (> 0) `Parser.alt` undefined
--   
--   >>> Stream.parse p $ Stream.fromList [1..10]
--   *** Exception: Prelude.undefined
--   
-- -- CAVEAT 2: QUADRATIC TIME COMPLEXITY. Static composition is fast due to -- stream fusion, but it works well only for limited (e.g. up to 8) -- compositions, use Streamly.Data.ParserK for larger -- compositions. -- -- Time Complexity: O(n^2) where n is the number of compositions. -- -- Pre-release alt :: Monad m => Parser x m a -> Parser x m a -> Parser x m a -- | Map a Parser returning function on the result of a -- Parser. -- -- ALL THE CAVEATS IN splitWith APPLY HERE AS WELL. -- -- Pre-release concatMap :: Monad m => (b -> Parser a m c) -> Parser a m b -> Parser a m c -- | lmap f parser maps the function f on the input of -- the parser. -- --
--   >>> Stream.parse (Parser.lmap (\x -> x * x) (Parser.fromFold Fold.sum)) (Stream.enumerateFromTo 1 100)
--   Right 338350
--   
-- --
--   lmap = Parser.lmapM return
--   
lmap :: (a -> b) -> Parser b m r -> Parser a m r -- | lmapM f parser maps the monadic function f on the -- input of the parser. lmapM :: Monad m => (a -> m b) -> Parser b m r -> Parser a m r -- | Include only those elements that pass a predicate. -- --
--   >>> Stream.parse (Parser.filter (> 5) (Parser.fromFold Fold.sum)) $ Stream.fromList [1..10]
--   Right 40
--   
filter :: Monad m => (a -> Bool) -> Parser a m b -> Parser a m b -- | Better performance splitWith for non-failing parsers. -- -- Does not work correctly for parsers that can fail. -- -- ALL THE CAVEATS IN splitWith APPLY HERE AS WELL. noErrorUnsafeSplitWith :: Monad m => (a -> b -> c) -> Parser x m a -> Parser x m b -> Parser x m c -- | Better performance split_ for non-failing parsers. -- -- Does not work correctly for parsers that can fail. -- -- ALL THE CAVEATS IN splitWith APPLY HERE AS WELL. noErrorUnsafeSplit_ :: Monad m => Parser x m a -> Parser x m b -> Parser x m b -- | Better performance concatMap for non-failing parsers. -- -- Does not work correctly for parsers that can fail. -- -- ALL THE CAVEATS IN splitWith APPLY HERE AS WELL. noErrorUnsafeConcatMap :: Monad m => (b -> Parser a m c) -> Parser a m b -> Parser a m c -- | A parser is a fold that can fail and is represented as Parser step -- initial extract. Before we drive a parser we call the -- initial action to retrieve the initial state of the fold. The -- parser driver invokes step with the state returned by the -- previous step and the next input element. It results into a new state -- and a command to the driver represented by Step type. The -- driver keeps invoking the step function until it stops or fails. At -- any point of time the driver can call extract to inspect the -- result of the fold. If the parser hits the end of input -- extract is called. It may result in an error or an output -- value. -- -- Pre-release data Parser a m b Parser :: (s -> a -> m (Step s b)) -> m (Initial s b) -> (s -> m (Step s b)) -> Parser a m b -- | This exception is used when a parser ultimately fails, the user of the -- parser is intimated via this exception. -- -- Pre-release newtype ParseError ParseError :: String -> ParseError -- | The return type of a Parser step. -- -- The parse operation feeds the input stream to the parser one element -- at a time, representing a parse Step. The parser may or may not -- consume the item and returns a result. If the result is Partial -- we can either extract the result or feed more input to the parser. If -- the result is Continue, we must feed more input in order to get -- a result. If the parser returns Done then the parser can no -- longer take any more input. -- -- If the result is Continue, the parse operation retains the -- input in a backtracking buffer, in case the parser may ask to -- backtrack in future. Whenever a 'Partial n' result is returned we -- first backtrack by n elements in the input and then release -- any remaining backtracking buffer. Similarly, 'Continue n' backtracks -- to n elements before the current position and starts feeding -- the input from that point for future invocations of the parser. -- -- If parser is not yet done, we can use the extract operation -- on the state of the parser to extract a result. If the parser -- has not yet yielded a result, the operation fails with a -- ParseError exception. If the parser yielded a Partial -- result in the past the last partial result is returned. Therefore, if -- a parser yields a partial result once it cannot fail later on. -- -- The parser can never backtrack beyond the position where the last -- partial result left it at. The parser must ensure that the backtrack -- position is always after that. -- -- Pre-release data Step s b -- | Partial count state. The following hold on Partial result: -- --
    --
  1. extract on state would succeed and give a -- result.
  2. --
  3. Input stream position is reset to current position - -- count.
  4. --
  5. All input before the new position is dropped. The parser can never -- backtrack beyond this position.
  6. --
Partial :: !Int -> !s -> Step s b -- | Continue count state. The following hold on a Continue -- result: -- --
    --
  1. If there was a Partial result in past, extract on -- state would give that result as Done otherwise it may -- return Error or Continue.
  2. --
  3. Input stream position is reset to current position - -- count.
  4. --
  5. the input is retained in a backtrack buffer.
  6. --
Continue :: !Int -> !s -> Step s b -- | Done with leftover input count and result. -- -- Done count result means the parser has finished, it will -- accept no more input, last count elements from the input are -- unused and the result of the parser is in result. Done :: !Int -> !b -> Step s b -- | Parser failed without generating any output. -- -- The parsing operation may backtrack to the beginning and try another -- alternative. Error :: !String -> Step s b -- | The type of a Parser's initial action. -- -- Internal data Initial s b -- | Wait for step function to be called with state s. IPartial :: !s -> Initial s b -- | Return a result right away without an input. IDone :: !b -> Initial s b -- | Return an error right away without an input. IError :: !String -> Initial s b -- | Make a Fold from a Parser. The fold just throws an -- exception if the parser fails or tries to backtrack. -- -- This can be useful in combinators that accept a Fold and we know that -- a Parser cannot fail or failure exception is acceptable as there is no -- way to recover. -- -- Pre-release toFold :: Monad m => Parser a m b -> Fold m a b -- | Make a Parser from a Fold. This parser sends all of its -- input to the fold. fromFold :: Monad m => Fold m a b -> Parser a m b -- | Convert a Maybe returning fold to an error returning parser. The first -- argument is the error message that the parser would return when the -- fold returns Nothing. -- -- Pre-release fromFoldMaybe :: Monad m => String -> Fold m a (Maybe b) -> Parser a m b -- | Stateful scan on the input of a parser using a Fold. -- -- Unimplemented postscan :: Fold m a b -> Parser b m c -> Parser a m c -- | Peek the head element of a stream, without consuming it. Fails if it -- encounters end of input. -- --
--   >>> Stream.parse ((,) <$> Parser.peek <*> Parser.satisfy (> 0)) $ Stream.fromList [1]
--   Right (1,1)
--   
-- --
--   peek = lookAhead (satisfy True)
--   
peek :: Monad m => Parser a m a -- | Consume one element from the head of the stream. Fails if it -- encounters end of input. -- --
--   >>> one = Parser.satisfy $ const True
--   
one :: Monad m => Parser a m a -- | Match a specific element. -- --
--   >>> oneEq x = Parser.satisfy (== x)
--   
oneEq :: (Monad m, Eq a) => a -> Parser a m a -- | Match anything other than the supplied element. -- --
--   >>> oneNotEq x = Parser.satisfy (/= x)
--   
oneNotEq :: (Monad m, Eq a) => a -> Parser a m a -- | Match any one of the elements in the supplied list. -- --
--   >>> oneOf xs = Parser.satisfy (`Foldable.elem` xs)
--   
-- -- When performance matters a pattern matching predicate could be more -- efficient than a Foldable datatype: -- --
--   let p x =
--      case x of
--         a -> True
--         e -> True
--          _  -> False
--   in satisfy p
--   
-- -- GHC may use a binary search instead of linear search in the list. -- Alternatively, you can also use an array instead of list for storage -- and search. oneOf :: (Monad m, Eq a, Foldable f) => f a -> Parser a m a -- | See performance notes in oneOf. -- --
--   >>> noneOf xs = Parser.satisfy (`Foldable.notElem` xs)
--   
noneOf :: (Monad m, Eq a, Foldable f) => f a -> Parser a m a -- | Succeeds if we are at the end of input, fails otherwise. -- --
--   >>> Stream.parse ((,) <$> Parser.satisfy (> 0) <*> Parser.eof) $ Stream.fromList [1]
--   Right (1,())
--   
eof :: Monad m => Parser a m () -- | Returns the next element if it passes the predicate, fails otherwise. -- --
--   >>> Stream.parse (Parser.satisfy (== 1)) $ Stream.fromList [1,0,1]
--   Right 1
--   
-- --
--   >>> toMaybe f x = if f x then Just x else Nothing
--   
--   >>> satisfy f = Parser.maybe (toMaybe f)
--   
satisfy :: Monad m => (a -> Bool) -> Parser a m a -- | Map a Maybe returning function on the next element in the -- stream. The parser fails if the function returns Nothing -- otherwise returns the Just value. -- --
--   >>> toEither = Maybe.maybe (Left "maybe: predicate failed") Right
--   
--   >>> maybe f = Parser.either (toEither . f)
--   
-- --
--   >>> maybe f = Parser.fromFoldMaybe "maybe: predicate failed" (Fold.maybe f)
--   
-- -- Pre-release maybe :: Monad m => (a -> Maybe b) -> Parser a m b -- | Map an Either returning function on the next element in the -- stream. If the function returns 'Left err', the parser fails with the -- error message err otherwise returns the Right value. -- -- Pre-release either :: Monad m => (a -> Either String b) -> Parser a m b -- | Run a parser without consuming the input. lookAhead :: Monad m => Parser a m b -> Parser a m b -- | takeBetween m n takes a minimum of m and a maximum -- of n input elements and folds them using the supplied fold. -- -- Stops after n elements. Fails if the stream ends before -- m elements could be taken. -- -- Examples: - -- --
--   >>> :{
--     takeBetween' low high ls = Stream.parse prsr (Stream.fromList ls)
--       where prsr = Parser.takeBetween low high Fold.toList
--   :}
--   
-- --
--   >>> takeBetween' 2 4 [1, 2, 3, 4, 5]
--   Right [1,2,3,4]
--   
-- --
--   >>> takeBetween' 2 4 [1, 2]
--   Right [1,2]
--   
-- --
--   >>> takeBetween' 2 4 [1]
--   Left (ParseError "takeBetween: Expecting alteast 2 elements, got 1")
--   
-- --
--   >>> takeBetween' 0 0 [1, 2]
--   Right []
--   
-- --
--   >>> takeBetween' 0 1 []
--   Right []
--   
-- -- takeBetween is the most general take operation, other take -- operations can be defined in terms of takeBetween. For example: -- --
--   >>> take n = Parser.takeBetween 0 n
--   
--   >>> takeEQ n = Parser.takeBetween n n
--   
--   >>> takeGE n = Parser.takeBetween n maxBound
--   
-- -- Pre-release takeBetween :: Monad m => Int -> Int -> Fold m a b -> Parser a m b -- | Stops after taking exactly n input elements. -- -- -- --
--   >>> Stream.parse (Parser.takeEQ 2 Fold.toList) $ Stream.fromList [1,0,1]
--   Right [1,0]
--   
-- --
--   >>> Stream.parse (Parser.takeEQ 4 Fold.toList) $ Stream.fromList [1,0,1]
--   Left (ParseError "takeEQ: Expecting exactly 4 elements, input terminated on 3")
--   
takeEQ :: Monad m => Int -> Fold m a b -> Parser a m b -- | Take at least n input elements, but can collect more. -- -- -- --
--   >>> Stream.parse (Parser.takeGE 4 Fold.toList) $ Stream.fromList [1,0,1]
--   Left (ParseError "takeGE: Expecting at least 4 elements, input terminated on 3")
--   
-- --
--   >>> Stream.parse (Parser.takeGE 4 Fold.toList) $ Stream.fromList [1,0,1,0,1]
--   Right [1,0,1,0,1]
--   
-- -- Pre-release takeGE :: Monad m => Int -> Fold m a b -> Parser a m b -- | Takes at-most n input elements. -- -- -- --
--   >>> Stream.parse (Parser.takeP 4 (Parser.takeEQ 2 Fold.toList)) $ Stream.fromList [1, 2, 3, 4, 5]
--   Right [1,2]
--   
-- --
--   >>> Stream.parse (Parser.takeP 4 (Parser.takeEQ 5 Fold.toList)) $ Stream.fromList [1, 2, 3, 4, 5]
--   Left (ParseError "takeEQ: Expecting exactly 5 elements, input terminated on 4")
--   
-- -- Internal takeP :: Monad m => Int -> Parser a m b -> Parser a m b -- | Match the input sequence with the supplied list and return it if -- successful. -- --
--   >>> listEq = Parser.listEqBy (==)
--   
listEq :: (Monad m, Eq a) => [a] -> Parser a m [a] -- | Match the given sequence of elements using the given comparison -- function. Returns the original sequence if successful. -- -- Definition: -- --
--   >>> listEqBy cmp xs = Parser.streamEqBy cmp (Stream.fromList xs) *> Parser.fromPure xs
--   
-- -- Examples: -- --
--   >>> Stream.parse (Parser.listEqBy (==) "string") $ Stream.fromList "string"
--   Right "string"
--   
-- --
--   >>> Stream.parse (Parser.listEqBy (==) "mismatch") $ Stream.fromList "match"
--   Left (ParseError "streamEqBy: mismtach occurred")
--   
listEqBy :: Monad m => (a -> a -> Bool) -> [a] -> Parser a m [a] -- | Like listEqBy but uses a stream instead of a list and does not -- return the stream. streamEqBy :: Monad m => (a -> a -> Bool) -> Stream m a -> Parser a m () -- | Match if the input stream is a subsequence of the argument stream i.e. -- all the elements of the input stream occur, in order, in the argument -- stream. The elements do not have to occur consecutively. A sequence is -- considered a subsequence of itself. subsequenceBy :: (a -> a -> Bool) -> Stream m a -> Parser a m () -- | Collect stream elements until an element fails the predicate. The -- element on which the predicate fails is returned back to the input -- stream. -- -- -- --
--   >>> Stream.parse (Parser.takeWhile (== 0) Fold.toList) $ Stream.fromList [0,0,1,0,1]
--   Right [0,0]
--   
-- --
--   >>> takeWhile cond f = Parser.takeWhileP cond (Parser.fromFold f)
--   
-- -- We can implement a breakOn using takeWhile: -- --
--   breakOn p = takeWhile (not p)
--   
takeWhile :: Monad m => (a -> Bool) -> Fold m a b -> Parser a m b -- | Like takeWhile but uses a Parser instead of a -- Fold to collect the input. The combinator stops when the -- condition fails or if the collecting parser stops. -- -- Other interesting parsers can be implemented in terms of this parser: -- --
--   >>> takeWhile1 cond p = Parser.takeWhileP cond (Parser.takeBetween 1 maxBound p)
--   
--   >>> takeWhileBetween cond m n p = Parser.takeWhileP cond (Parser.takeBetween m n p)
--   
-- -- Stops: when the condition fails or the collecting parser stops. Fails: -- when the collecting parser fails. -- -- Pre-release takeWhileP :: Monad m => (a -> Bool) -> Parser a m b -> Parser a m b -- | Like takeWhile but takes at least one element otherwise fails. -- --
--   >>> takeWhile1 cond p = Parser.takeWhileP cond (Parser.takeBetween 1 maxBound p)
--   
takeWhile1 :: Monad m => (a -> Bool) -> Fold m a b -> Parser a m b -- | Drain the input as long as the predicate succeeds, running the effects -- and discarding the results. -- -- This is also called skipWhile in some parsing libraries. -- --
--   >>> dropWhile p = Parser.takeWhile p Fold.drain
--   
dropWhile :: Monad m => (a -> Bool) -> Parser a m () -- | takeEndBy cond parser parses a token that ends by a separator -- chosen by the supplied predicate. The separator is also taken with the -- token. -- -- This can be combined with other parsers to implement other interesting -- parsers as follows: -- --
--   >>> takeEndByLE cond n p = Parser.takeEndBy cond (Parser.fromFold $ Fold.take n p)
--   
--   >>> takeEndByBetween cond m n p = Parser.takeEndBy cond (Parser.takeBetween m n p)
--   
-- --
--   >>> takeEndBy = Parser.takeEndByEsc (const False)
--   
-- -- See also "Streamly.Data.Fold.takeEndBy". Unlike the fold, the -- collecting parser in the takeEndBy parser can decide whether to fail -- or not if the stream does not end with separator. -- -- Pre-release takeEndBy :: Monad m => (a -> Bool) -> Parser a m b -> Parser a m b -- | Like takeEndBy but the separator is dropped. -- -- See also "Streamly.Data.Fold.takeEndBy_". -- -- Pre-release takeEndBy_ :: (a -> Bool) -> Parser a m b -> Parser a m b -- | Like takeEndBy but the separator elements can be escaped using -- an escape char determined by the first predicate. The escape -- characters are removed. -- -- pre-release takeEndByEsc :: Monad m => (a -> Bool) -> (a -> Bool) -> Parser a m b -> Parser a m b -- | Parse a token that starts with an element chosen by the predicate. The -- parser fails if the input does not start with the selected element. -- -- -- --
--   >>> splitWithPrefix p f = Stream.parseMany (Parser.takeStartBy p f)
--   
-- -- Examples: - -- --
--   >>> p = Parser.takeStartBy (== ',') Fold.toList
--   
--   >>> leadingComma = Stream.parse p . Stream.fromList
--   
--   >>> leadingComma "a,b"
--   Left (ParseError "takeStartBy: missing frame start")
--   ...
--   
--   >>> leadingComma ",,"
--   Right ","
--   
--   >>> leadingComma ",a,b"
--   Right ",a"
--   
--   >>> leadingComma ""
--   Right ""
--   
-- -- Pre-release takeStartBy :: Monad m => (a -> Bool) -> Fold m a b -> Parser a m b -- | Like takeStartBy but drops the separator. -- --
--   >>> takeStartBy_ isBegin = Parser.takeFramedByGeneric Nothing (Just isBegin) Nothing
--   
takeStartBy_ :: Monad m => (a -> Bool) -> Fold m a b -> Parser a m b -- | Take either the separator or the token. Separator is a Left value and -- token is Right value. -- -- Unimplemented takeEitherSepBy :: (a -> Bool) -> Fold m (Either a b) c -> Parser a m c -- | Like splitOn but strips leading, trailing, and repeated -- separators. Therefore, ".a..b." having . as the -- separator would be parsed as ["a","b"]. In other words, its -- like parsing words from whitespace separated text. -- -- -- --
--   >>> wordBy = Parser.wordFramedBy (const False) (const False) (const False)
--   
-- --
--   S.wordsBy pred f = S.parseMany (PR.wordBy pred f)
--   
wordBy :: Monad m => (a -> Bool) -> Fold m a b -> Parser a m b -- | Given an input stream [a,b,c,...] and a comparison function -- cmp, the parser assigns the element a to the first -- group, then if a `cmp` b is True b is also -- assigned to the same group. If a `cmp` c is True then -- c is also assigned to the same group and so on. When the -- comparison fails the parser is terminated. Each group is folded using -- the Fold f and the result of the fold is the result of -- the parser. -- -- -- --
--   >>> :{
--    runGroupsBy eq =
--        Stream.fold Fold.toList
--            . Stream.parseMany (Parser.groupBy eq Fold.toList)
--            . Stream.fromList
--   :}
--   
-- --
--   >>> runGroupsBy (<) []
--   []
--   
-- --
--   >>> runGroupsBy (<) [1]
--   [Right [1]]
--   
-- --
--   >>> runGroupsBy (<) [3, 5, 4, 1, 2, 0]
--   [Right [3,5,4],Right [1,2],Right [0]]
--   
groupBy :: Monad m => (a -> a -> Bool) -> Fold m a b -> Parser a m b -- | Unlike groupBy this combinator performs a rolling comparison of -- two successive elements in the input stream. Assuming the input stream -- is [a,b,c,...] and the comparison function is cmp, -- the parser first assigns the element a to the first group, -- then if a `cmp` b is True b is also assigned -- to the same group. If b `cmp` c is True then -- c is also assigned to the same group and so on. When the -- comparison fails the parser is terminated. Each group is folded using -- the Fold f and the result of the fold is the result of -- the parser. -- -- -- --
--   >>> :{
--    runGroupsByRolling eq =
--        Stream.fold Fold.toList
--            . Stream.parseMany (Parser.groupByRolling eq Fold.toList)
--            . Stream.fromList
--   :}
--   
-- --
--   >>> runGroupsByRolling (<) []
--   []
--   
-- --
--   >>> runGroupsByRolling (<) [1]
--   [Right [1]]
--   
-- --
--   >>> runGroupsByRolling (<) [3, 5, 4, 1, 2, 0]
--   [Right [3,5],Right [4],Right [1,2],Right [0]]
--   
-- -- Pre-release groupByRolling :: Monad m => (a -> a -> Bool) -> Fold m a b -> Parser a m b -- | Like groupByRolling, but if the predicate is True then -- collects using the first fold as long as the predicate holds -- True, if the predicate is False collects using the -- second fold as long as it remains False. Returns Left -- for the first case and Right for the second case. -- -- For example, if we want to detect sorted sequences in a stream, both -- ascending and descending cases we can use 'groupByRollingEither -- (<=) Fold.toList Fold.toList'. -- -- Pre-release groupByRollingEither :: Monad m => (a -> a -> Bool) -> Fold m a b -> Fold m a c -> Parser a m (Either b c) -- | Like wordBy but treats anything inside a pair of quotes as a -- single word, the quotes can be escaped by an escape character. -- Recursive quotes are possible if quote begin and end characters are -- different, quotes must be balanced. Outermost quotes are stripped. -- --
--   >>> braces = Parser.wordFramedBy (== '\\') (== '{') (== '}') isSpace Fold.toList
--   
--   >>> Stream.parse braces $ Stream.fromList "{ab} cd"
--   Right "ab"
--   
--   >>> Stream.parse braces $ Stream.fromList "{ab}{cd}"
--   Right "abcd"
--   
--   >>> Stream.parse braces $ Stream.fromList "a{b} cd"
--   Right "ab"
--   
--   >>> Stream.parse braces $ Stream.fromList "a{{b}} cd"
--   Right "a{b}"
--   
-- --
--   >>> quotes = Parser.wordFramedBy (== '\\') (== '"') (== '"') isSpace Fold.toList
--   
--   >>> Stream.parse quotes $ Stream.fromList "\"a\"\"b\""
--   Right "ab"
--   
wordFramedBy :: Monad m => (a -> Bool) -> (a -> Bool) -> (a -> Bool) -> (a -> Bool) -> Fold m a b -> Parser a m b -- | Quote and bracket aware word splitting with escaping. Like -- wordBy but word separators within specified quotes or brackets -- are ignored. Quotes and escape characters can be processed. If the end -- quote is different from the start quote it is called a bracket. The -- following quoting rules apply: -- -- -- -- We should note that unquoted and quoted escape processing are -- different. In unquoted part escape character is always removed. In -- quoted part it is removed only if followed by a special meaning -- character. This is consistent with how shell performs escape -- processing. wordWithQuotes :: (Monad m, Eq a) => Bool -> (a -> a -> Maybe a) -> a -> (a -> Maybe a) -> (a -> Bool) -> Fold m a b -> Parser a m b -- | wordWithQuotes without processing the quotes and escape -- function supplied to escape the quote char within a quote. Can be used -- to parse words keeping the quotes and escapes intact. -- --
--   >>> wordKeepQuotes = Parser.wordWithQuotes True (\_ _ -> Nothing)
--   
wordKeepQuotes :: (Monad m, Eq a) => a -> (a -> Maybe a) -> (a -> Bool) -> Fold m a b -> Parser a m b -- | wordWithQuotes with quote processing applied and escape -- function supplied to escape the quote char within a quote. Can be ysed -- to parse words and processing the quoting and escaping at the same -- time. -- --
--   >>> wordProcessQuotes = Parser.wordWithQuotes False (\_ _ -> Nothing)
--   
wordProcessQuotes :: (Monad m, Eq a) => a -> (a -> Maybe a) -> (a -> Bool) -> Fold m a b -> Parser a m b -- | takeFramedBy_ isBegin isEnd fold parses a token framed by a -- begin and an end predicate. -- --
--   >>> takeFramedBy_ = Parser.takeFramedByEsc_ (const False)
--   
takeFramedBy_ :: Monad m => (a -> Bool) -> (a -> Bool) -> Fold m a b -> Parser a m b -- | takeFramedByEsc_ isEsc isBegin isEnd fold parses a token -- framed using a begin and end predicate, and an escape character. The -- frame begin and end characters lose their special meaning if preceded -- by the escape character. -- -- Nested frames are allowed if begin and end markers are different, -- nested frames must be balanced unless escaped, nested frame markers -- are emitted as it is. -- -- For example, -- --
--   >>> p = Parser.takeFramedByEsc_ (== '\\') (== '{') (== '}') Fold.toList
--   
--   >>> Stream.parse p $ Stream.fromList "{hello}"
--   Right "hello"
--   
--   >>> Stream.parse p $ Stream.fromList "{hello {world}}"
--   Right "hello {world}"
--   
--   >>> Stream.parse p $ Stream.fromList "{hello \\{world}"
--   Right "hello {world"
--   
--   >>> Stream.parse p $ Stream.fromList "{hello {world}"
--   Left (ParseError "takeFramedByEsc_: missing frame end")
--   
-- -- Pre-release takeFramedByEsc_ :: Monad m => (a -> Bool) -> (a -> Bool) -> (a -> Bool) -> Fold m a b -> Parser a m b takeFramedByGeneric :: Monad m => Maybe (a -> Bool) -> Maybe (a -> Bool) -> Maybe (a -> Bool) -> Fold m a b -> Parser a m b -- | Parse a block enclosed within open, close brackets. Block contents may -- be quoted, brackets inside quotes are ignored. Quoting characters can -- be used within quotes if escaped. A block can have a nested block -- inside it. -- -- Quote begin and end chars are the same. Block brackets and quote chars -- must not overlap. Block start and end brackets must be different for -- nesting blocks within blocks. -- --
--   >>> p = Parser.blockWithQuotes (== '\\') (== '"') '{' '}' Fold.toList
--   
--   >>> Stream.parse p $ Stream.fromList "{msg: \"hello world\"}"
--   Right "msg: \"hello world\""
--   
blockWithQuotes :: (Monad m, Eq a) => (a -> Bool) -> (a -> Bool) -> a -> a -> Fold m a b -> Parser a m b -- | span p f1 f2 composes folds f1 and f2 such -- that f1 consumes the input as long as the predicate -- p is True. f2 consumes the rest of the input. -- --
--   > let span_ p xs = Stream.parse (Parser.span p Fold.toList Fold.toList) $ Stream.fromList xs
--   
--   > span_ (< 1) 1,2,3
--   
--   > span_ (< 2) 1,2,3
--   
--   > span_ (< 4) 1,2,3
--   
-- -- Pre-release span :: Monad m => (a -> Bool) -> Fold m a b -> Fold m a c -> Parser a m (b, c) -- | Break the input stream into two groups, the first group takes the -- input as long as the predicate applied to the first element of the -- stream and next input element holds True, the second group -- takes the rest of the input. -- -- Pre-release spanBy :: Monad m => (a -> a -> Bool) -> Fold m a b -> Fold m a c -> Parser a m (b, c) -- | Like spanBy but applies the predicate in a rolling fashion i.e. -- predicate is applied to the previous and the next input elements. -- -- Pre-release spanByRolling :: Monad m => (a -> a -> Bool) -> Fold m a b -> Fold m a c -> Parser a m (b, c) -- | sequence f p collects sequential parses of parsers in a -- serial stream p using the fold f. Fails if the input -- ends or any of the parsers fail. -- -- Pre-release sequence :: Monad m => Stream m (Parser a m b) -> Fold m b c -> Parser a m c -- | count n f p collects exactly n sequential parses of -- parser p using the fold f. Fails if the input ends -- or the parser fails before n results are collected. -- --
--   >>> count n = Parser.countBetween n n
--   
--   >>> count n p f = Parser.manyP p (Parser.takeEQ n f)
--   
-- -- Unimplemented count :: Int -> Parser a m b -> Fold m b c -> Parser a m c -- | countBetween m n f p collects between m and -- n sequential parses of parser p using the fold -- f. Stop after collecting n results. Fails if the -- input ends or the parser fails before m results are -- collected. -- --
--   >>> countBetween m n p f = Parser.manyP p (Parser.takeBetween m n f)
--   
-- -- Unimplemented countBetween :: Int -> Int -> Parser a m b -> Fold m b c -> Parser a m c -- | Like many but uses a Parser instead of a Fold to -- collect the results. Parsing stops or fails if the collecting parser -- stops or fails. -- -- Unimplemented manyP :: Parser a m b -> Parser b m c -> Parser a m c -- | Collect zero or more parses. Apply the supplied parser repeatedly on -- the input stream and push the parse results to a downstream fold. -- -- Stops: when the downstream fold stops or the parser fails. Fails: -- never, produces zero or more results. -- --
--   >>> many = Parser.countBetween 0 maxBound
--   
-- -- Compare with many. many :: Monad m => Parser a m b -> Fold m b c -> Parser a m c -- | Collect one or more parses. Apply the supplied parser repeatedly on -- the input stream and push the parse results to a downstream fold. -- -- Stops: when the downstream fold stops or the parser fails. Fails: if -- it stops without producing a single result. -- --
--   >>> some p f = Parser.manyP p (Parser.takeGE 1 f)
--   
--   >>> some = Parser.countBetween 1 maxBound
--   
-- -- Compare with some. some :: Monad m => Parser a m b -> Fold m b c -> Parser a m c -- | Apply two parsers alternately to an input stream. The input stream is -- considered an interleaving of two patterns. The two parsers represent -- the two patterns. Parsing starts at the first parser and stops at the -- first parser. It can be used to parse a infix style pattern e.g. p1 p2 -- p1 . Empty input or single parse of the first parser is accepted. -- --
--   >>> p1 = Parser.takeWhile1 (not . (== '+')) Fold.toList
--   
--   >>> p2 = Parser.satisfy (== '+')
--   
--   >>> p = Parser.deintercalate p1 p2 Fold.toList
--   
--   >>> Stream.parse p $ Stream.fromList ""
--   Right []
--   
--   >>> Stream.parse p $ Stream.fromList "1"
--   Right [Left "1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+"
--   Right [Left "1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+2+3"
--   Right [Left "1",Right '+',Left "2",Right '+',Left "3"]
--   
deintercalate :: Monad m => Parser a m x -> Parser a m y -> Fold m (Either x y) z -> Parser a m z -- | Apply two parsers alternately to an input stream. The input stream is -- considered an interleaving of two patterns. The two parsers represent -- the two patterns. Parsing starts at the first parser and stops at the -- first parser. It can be used to parse a infix style pattern e.g. p1 p2 -- p1 . Empty input or single parse of the first parser is accepted. -- --
--   >>> p1 = Parser.takeWhile1 (not . (== '+')) Fold.toList
--   
--   >>> p2 = Parser.satisfy (== '+')
--   
--   >>> p = Parser.deintercalate1 p1 p2 Fold.toList
--   
--   >>> Stream.parse p $ Stream.fromList ""
--   Left (ParseError "takeWhile1: end of input")
--   
--   >>> Stream.parse p $ Stream.fromList "1"
--   Right [Left "1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+"
--   Right [Left "1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+2+3"
--   Right [Left "1",Right '+',Left "2",Right '+',Left "3"]
--   
deintercalate1 :: Monad m => Parser a m x -> Parser a m y -> Fold m (Either x y) z -> Parser a m z -- | Like deintercalate but the entire input must satisfy the -- pattern otherwise the parser fails. This is many times faster than -- deintercalate. -- --
--   >>> p1 = Parser.takeWhile1 (not . (== '+')) Fold.toList
--   
--   >>> p2 = Parser.satisfy (== '+')
--   
--   >>> p = Parser.deintercalateAll p1 p2 Fold.toList
--   
--   >>> Stream.parse p $ Stream.fromList ""
--   Right []
--   
--   >>> Stream.parse p $ Stream.fromList "1"
--   Right [Left "1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+"
--   Left (ParseError "takeWhile1: end of input")
--   
--   >>> Stream.parse p $ Stream.fromList "1+2+3"
--   Right [Left "1",Right '+',Left "2",Right '+',Left "3"]
--   
deintercalateAll :: Monad m => Parser a m x -> Parser a m y -> Fold m (Either x y) z -> Parser a m z -- | Like sepBy but requires at least one successful parse. -- -- Definition: -- --
--   >>> sepBy1 p1 p2 f = Parser.deintercalate1 p1 p2 (Fold.catLefts f)
--   
-- -- Examples: -- --
--   >>> p1 = Parser.takeWhile1 (not . (== '+')) Fold.toList
--   
--   >>> p2 = Parser.satisfy (== '+')
--   
--   >>> p = Parser.sepBy1 p1 p2 Fold.toList
--   
--   >>> Stream.parse p $ Stream.fromList ""
--   Left (ParseError "takeWhile1: end of input")
--   
--   >>> Stream.parse p $ Stream.fromList "1"
--   Right ["1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+"
--   Right ["1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+2+3"
--   Right ["1","2","3"]
--   
sepBy1 :: Monad m => Parser a m b -> Parser a m x -> Fold m b c -> Parser a m c -- | Apply two parsers alternately to an input stream. Parsing starts at -- the first parser and stops at the first parser. The output of the -- first parser is emiited and the output of the second parser is -- discarded. It can be used to parse a infix style pattern e.g. p1 p2 p1 -- . Empty input or single parse of the first parser is accepted. -- -- Definitions: -- --
--   >>> sepBy p1 p2 f = Parser.deintercalate p1 p2 (Fold.catLefts f)
--   
--   >>> sepBy p1 p2 f = Parser.sepBy1 p1 p2 f <|> Parser.fromEffect (Fold.extractM f)
--   
-- -- Examples: -- --
--   >>> p1 = Parser.takeWhile1 (not . (== '+')) Fold.toList
--   
--   >>> p2 = Parser.satisfy (== '+')
--   
--   >>> p = Parser.sepBy p1 p2 Fold.toList
--   
--   >>> Stream.parse p $ Stream.fromList ""
--   Right []
--   
--   >>> Stream.parse p $ Stream.fromList "1"
--   Right ["1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+"
--   Right ["1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+2+3"
--   Right ["1","2","3"]
--   
sepBy :: Monad m => Parser a m b -> Parser a m x -> Fold m b c -> Parser a m c -- | Non-backtracking version of sepBy. Several times faster. sepByAll :: Monad m => Parser a m b -> Parser a m x -> Fold m b c -> Parser a m c -- | Like manyTill but uses a Parser to collect the results -- instead of a Fold. Parsing stops or fails if the collecting -- parser stops or fails. -- -- We can implemnent parsers like the following using manyTillP: -- --
--   countBetweenTill m n f p = manyTillP (takeBetween m n f) p
--   
-- -- Unimplemented manyTillP :: Parser a m b -> Parser a m x -> Parser b m c -> Parser a m c -- | manyTill chunking test f tries the parser test on -- the input, if test fails it backtracks and tries -- chunking, after chunking succeeds test is -- tried again and so on. The parser stops when test succeeds. -- The output of test is discarded and the output of -- chunking is accumulated by the supplied fold. The parser -- fails if chunking fails. -- -- Stops when the fold f stops. manyTill :: Monad m => Parser a m b -> Parser a m x -> Fold m b c -> Parser a m c -- | manyThen f collect recover repeats the parser -- collect on the input and collects the output in the supplied -- fold. If the the parser collect fails, parser -- recover is run until it stops and then we start repeating the -- parser collect again. The parser fails if the recovery parser -- fails. -- -- For example, this can be used to find a key frame in a video stream -- after an error. -- -- Unimplemented manyThen :: Parser a m b -> Parser a m x -> Fold m b c -> Parser a m c -- | Apply a collection of parsers to an input stream in a round robin -- fashion. Each parser is applied until it stops and then we repeat -- starting with the the first parser again. -- -- Unimplemented roundRobin :: t (Parser a m b) -> Fold m b c -> Parser a m c -- | Keep trying a parser up to a maximum of n failures. When the -- parser fails the input consumed till now is dropped and the new -- instance is tried on the fresh input. -- -- Unimplemented retryMaxTotal :: Int -> Parser a m b -> Fold m b c -> Parser a m c -- | Like retryMaxTotal but aborts after n successive -- failures. -- -- Unimplemented retryMaxSuccessive :: Int -> Parser a m b -> Fold m b c -> Parser a m c -- | Keep trying a parser until it succeeds. When the parser fails the -- input consumed till now is dropped and the new instance is tried on -- the fresh input. -- -- Unimplemented retry :: Parser a m b -> Parser a m b zipWithM :: Monad m => (a -> b -> m c) -> Stream m a -> Fold m c x -> Parser b m x -- | Zip the input of a fold with a stream. -- -- Pre-release zip :: Monad m => Stream m a -> Fold m (a, b) x -> Parser b m x -- | Pair each element of a fold input with its index, starting from index -- 0. -- -- Pre-release indexed :: forall m a b. Monad m => Fold m (Int, a) b -> Parser a m b -- | makeIndexFilter indexer filter predicate generates a fold -- filtering function using a fold indexing function that attaches an -- index to each input element and a filtering function that filters -- using @(index, element) -> Bool) as predicate. -- -- For example: -- --
--   filterWithIndex = makeIndexFilter indexed filter
--   filterWithAbsTime = makeIndexFilter timestamped filter
--   filterWithRelTime = makeIndexFilter timeIndexed filter
--   
-- -- Pre-release makeIndexFilter :: (Fold m (s, a) b -> Parser a m b) -> (((s, a) -> Bool) -> Fold m (s, a) b -> Fold m (s, a) b) -> ((s, a) -> Bool) -> Fold m a b -> Parser a m b -- | sampleFromthen offset stride samples the element at -- offset index and then every element at strides of -- stride. -- -- Pre-release sampleFromthen :: Monad m => Int -> Int -> Fold m a b -> Parser a m b -- | Return the next element of the input. Returns Nothing on end of -- input. Also known as head. -- -- Pre-release -- | Deprecated: Please use "fromFold Fold.one" instead next :: Monad m => Parser a m (Maybe a) instance (GHC.Show.Show a, GHC.Show.Show b) => GHC.Show.Show (Streamly.Internal.Data.Parser.Tuple'Fused a b) -- | A Producer is an Unfold with an extract -- function added to extract the state. It is more powerful but less -- general than an Unfold. -- -- A Producer represents steps of a loop generating a sequence of -- elements. While unfolds are closed representation of imperative loops -- with some opaque internal state, producers are open loops with the -- state being accessible to the user. -- -- Unlike an unfold, which runs a loop till completion, a producer can be -- stopped in the middle, its state can be extracted, examined, changed, -- and then it can be resumed later from the stopped state. -- -- A producer can be used in places where a CPS stream would otherwise be -- needed, because the state of the loop can be passed around. However, -- it can be much more efficient than CPS because it allows stream fusion -- and unecessary function calls can be avoided. module Streamly.Internal.Data.Producer -- | A seed with a buffer. It allows us to unread or return some -- data after reading it. Useful in backtracked parsing. data Source a b -- | Make a source from a seed value. The buffer would start as empty. -- -- Pre-release source :: Maybe a -> Source a b -- | Return some unused data back to the source. The data is prepended (or -- consed) to the source. -- -- Pre-release unread :: [b] -> Source a b -> Source a b -- | Determine if the source is empty. isEmpty :: Source a b -> Bool -- | Convert a producer to a producer from a buffered source. Any buffered -- data is read first and then the seed is unfolded. -- -- Pre-release producer :: Monad m => Producer m a b -> Producer m (Source a b) b parse :: Monad m => Parser a m b -> Producer m (Source s a) a -> Source s a -> m (Either ParseError b, Source s a) -- | Apply a parser repeatedly on a buffered source producer to generate a -- producer of parsed values. -- -- Pre-release parseMany :: Monad m => Parser a m b -> Producer m (Source x a) a -> Producer m (Source x a) (Either ParseError b) parseManyD :: Monad m => Parser a m b -> Producer m (Source x a) a -> Producer m (Source x a) (Either ParseError b) -- | A Producer m a b is a generator of a stream of values of type -- b from a seed of type a in Monad m. -- -- Pre-release data Producer m a b -- |
--   Producer step inject extract
--   
Producer :: (s -> m (Step s b)) -> (a -> m s) -> (s -> m a) -> Producer m a b nil :: Monad m => Producer m a b nilM :: Monad m => (a -> m c) -> Producer m a b unfoldrM :: Monad m => (a -> m (Maybe (b, a))) -> Producer m a b -- | Convert a list of pure values to a Stream -- -- Pre-release fromList :: Monad m => Producer m [a] a -- | Interconvert the producer between two interconvertible input types. -- -- Pre-release translate :: Functor m => (a -> c) -> (c -> a) -> Producer m c b -> Producer m a b -- | Map the producer input to another value of the same type. -- -- Pre-release lmap :: (a -> a) -> Producer m a b -> Producer m a b -- | State representing a nested loop. data NestedLoop s1 s2 OuterLoop :: s1 -> NestedLoop s1 s2 InnerLoop :: s1 -> s2 -> NestedLoop s1 s2 -- | Apply the second unfold to each output element of the first unfold and -- flatten the output in a single stream. -- -- Pre-release concat :: Monad m => Producer m a b -> Producer m b c -> Producer m (NestedLoop a b) c -- | Simplify a producer to an unfold. -- -- Pre-release simplify :: Producer m a b -> Unfold m a b -- | Convert a StreamD stream into a producer. -- -- Pre-release fromStreamD :: Monad m => Producer m (Stream m a) a -- | Parsers are stream consumers like folds with the following -- differences: -- -- -- -- This module implements parsers with stream fusion which compile to -- efficient loops comparable to the speed of C. -- --

Using Parsers

-- -- This module provides elementary parsers and parser combinators that -- can be used to parse a stream of data. Additionally, all the folds -- from the Streamly.Data.Fold module can be converted to parsers -- using fromFold. All the parsing functionality provided by -- popular parsing libraries, and more is available. Also see -- Streamly.Unicode.Parser module for Char stream parsers. -- -- A data stream can be transformed to a stream of parsed data elements. -- Parser combinators can be used to create a pipeline of folds or -- parsers such that the next fold or parser consumes the result of the -- previous parser. See parse and parseMany to run these -- parsers on a stream. -- --

Parser vs ParserK

-- -- There are two functionally equivalent parsing modules, -- Streamly.Data.Parser (this module) and -- Streamly.Data.ParserK. The latter is a CPS based wrapper over -- the former, and can be used for parsing in general. -- Streamly.Data.Parser enables stream fusion and should be -- preferred over Streamly.Data.ParserK for high performance -- stream parsing use cases. However, there are a few cases where this -- module is not suitable and ParserK should be used instead. -- -- For static fusion, parser combinators have to use strict pattern -- matching on arguments of type Parser. This leads to infinte loop when -- a parser is defined recursively, due to strict evaluation of the -- recursive call. For example, the following implementation loops -- infinitely because of the recursive use of parser p in the -- *> combinator: -- --
--   >>> import Streamly.Data.Parser (Parser)
--   
--   >>> import qualified Streamly.Data.Fold as Fold
--   
--   >>> import qualified Streamly.Data.Parser as Parser
--   
--   >>> import qualified Streamly.Data.Stream as Stream
--   
--   >>> import Control.Applicative ((<|>))
--   
-- --
--   >>> :{
--   
--   >>> p :: Monad m => Parser Char m String
--   
--   >>> p = Parser.satisfy (== '(') *> p <|> Parser.fromFold Fold.toList
--   
--   >>> :}
--   
-- -- Use ParserK when recursive use is required: -- --
--   >>> import Streamly.Data.ParserK (ParserK)
--   
--   >>> import qualified Streamly.Data.StreamK as StreamK
--   
--   >>> import qualified Streamly.Internal.Data.StreamK as StreamK (parse)
--   
--   >>> import qualified Streamly.Internal.Data.ParserK as ParserK (adapt)
--   
-- --
--   >>> :{
--   
--   >>> p :: Monad m => ParserK Char m String
--   
--   >>> p = ParserK.adapt (Parser.satisfy (== '(')) *> p <|> ParserK.adapt (Parser.fromFold Fold.toList)
--   
--   >>> :}
--   
-- --
--   >>> StreamK.parse p $ StreamK.fromStream $ Stream.fromList "hello"
--   Right "hello"
--   
-- -- For this reason Applicative, Alternative or Monad compositions with -- recursion cannot be used with the Parser type. Alternative type -- class based operations like asum and Alternative based -- generic parser combinators use recursion. Similarly, Applicative type -- class based operations like sequence use recursion. Custom -- implementations of many such operations are provided in this module -- (e.g. some, many), and those should be used instead. -- -- Another limitation of Parser type is due to the quadratic complexity -- causing slowdown when too many nested compositions are used. -- Especially Applicative, Monad, Alternative instances, and sequenced -- parsing operations (e.g. nested one, and splitWith) -- degrade the performance quadratically (O(n^2)) when combined -- n times, roughly 8 or less sequenced parsers are fine. READ -- THE DOCS OF APPLICATIVE, MONAD AND ALTERNATIVE INSTANCES. -- --

Streaming Parsers

-- -- With ParserK you can use the generic Alternative type class -- based parsers from the parser-combinators library or similar. -- However, we recommend that you use the equivalent functionality from -- this module for better performance and for streaming behavior. -- -- Firstly, the combinators in this module are faster due to stream -- fusion. Secondly, these are streaming in nature as the results can be -- passed directly to other stream consumers (folds or parsers). The -- Alternative type class based parsers would end up buffering all the -- results in lists before they can be consumed. -- -- When recursion or heavy nesting is needed use ParserK. -- --

Error Reporting

-- -- These parsers do not report the error context (e.g. line number or -- column). This may be supported in future. -- --

Monad Transformer Stack

-- -- MonadTrans instance is not provided. If the Parser -- type is the top most layer (which should be the case almost always) -- you can just use fromEffect to execute the lower layer monad -- effects. -- --

Parser vs ParserK Implementation

-- -- The Parser type represents a stream consumer by composing state -- as data which enables stream fusion. Stream fusion generates a tight -- loop without any constructor allocations between the stages, providing -- C like performance for the loop. Stream fusion works when multiple -- functions are combined in a pipeline statically. Therefore, the -- operations in this module must be inlined and must not be used -- recursively to allow for stream fusion. -- -- The ParserK type represents a stream consumer by composing -- function calls, therefore, a function call overhead is incurred at -- each composition. It is quite fast in general but may be a few times -- slower than a fused parser. However, it allows for scalable dynamic -- composition especially parsers can be used in recursive calls. Using -- the ParserK type operations like splitWith provide -- linear (O(n)) performance with respect to the number of compositions. -- --

Experimental APIs

-- -- Please refer to Streamly.Internal.Data.Parser for functions -- that have not yet been released. module Streamly.Data.Parser -- | A parser is a fold that can fail and is represented as Parser step -- initial extract. Before we drive a parser we call the -- initial action to retrieve the initial state of the fold. The -- parser driver invokes step with the state returned by the -- previous step and the next input element. It results into a new state -- and a command to the driver represented by Step type. The -- driver keeps invoking the step function until it stops or fails. At -- any point of time the driver can call extract to inspect the -- result of the fold. If the parser hits the end of input -- extract is called. It may result in an error or an output -- value. -- -- Pre-release data Parser a m b -- | Make a Parser from a Fold. This parser sends all of its -- input to the fold. fromFold :: Monad m => Fold m a b -> Parser a m b -- | A parser that always yields a pure value without consuming any input. fromPure :: Monad m => b -> Parser a m b -- | A parser that always yields the result of an effectful action without -- consuming any input. fromEffect :: Monad m => m b -> Parser a m b -- | A parser that always fails with an error message without consuming any -- input. die :: Monad m => String -> Parser a m b -- | Peek the head element of a stream, without consuming it. Fails if it -- encounters end of input. -- --
--   >>> Stream.parse ((,) <$> Parser.peek <*> Parser.satisfy (> 0)) $ Stream.fromList [1]
--   Right (1,1)
--   
-- --
--   peek = lookAhead (satisfy True)
--   
peek :: Monad m => Parser a m a -- | Succeeds if we are at the end of input, fails otherwise. -- --
--   >>> Stream.parse ((,) <$> Parser.satisfy (> 0) <*> Parser.eof) $ Stream.fromList [1]
--   Right (1,())
--   
eof :: Monad m => Parser a m () -- | Consume one element from the head of the stream. Fails if it -- encounters end of input. -- --
--   >>> one = Parser.satisfy $ const True
--   
one :: Monad m => Parser a m a -- | Match any one of the elements in the supplied list. -- --
--   >>> oneOf xs = Parser.satisfy (`Foldable.elem` xs)
--   
-- -- When performance matters a pattern matching predicate could be more -- efficient than a Foldable datatype: -- --
--   let p x =
--      case x of
--         a -> True
--         e -> True
--          _  -> False
--   in satisfy p
--   
-- -- GHC may use a binary search instead of linear search in the list. -- Alternatively, you can also use an array instead of list for storage -- and search. oneOf :: (Monad m, Eq a, Foldable f) => f a -> Parser a m a -- | See performance notes in oneOf. -- --
--   >>> noneOf xs = Parser.satisfy (`Foldable.notElem` xs)
--   
noneOf :: (Monad m, Eq a, Foldable f) => f a -> Parser a m a -- | Returns the next element if it passes the predicate, fails otherwise. -- --
--   >>> Stream.parse (Parser.satisfy (== 1)) $ Stream.fromList [1,0,1]
--   Right 1
--   
-- --
--   >>> toMaybe f x = if f x then Just x else Nothing
--   
--   >>> satisfy f = Parser.maybe (toMaybe f)
--   
satisfy :: Monad m => (a -> Bool) -> Parser a m a -- | Like listEqBy but uses a stream instead of a list and does not -- return the stream. streamEqBy :: Monad m => (a -> a -> Bool) -> Stream m a -> Parser a m () -- | Match the given sequence of elements using the given comparison -- function. Returns the original sequence if successful. -- -- Definition: -- --
--   >>> listEqBy cmp xs = Parser.streamEqBy cmp (Stream.fromList xs) *> Parser.fromPure xs
--   
-- -- Examples: -- --
--   >>> Stream.parse (Parser.listEqBy (==) "string") $ Stream.fromList "string"
--   Right "string"
--   
-- --
--   >>> Stream.parse (Parser.listEqBy (==) "mismatch") $ Stream.fromList "match"
--   Left (ParseError "streamEqBy: mismtach occurred")
--   
listEqBy :: Monad m => (a -> a -> Bool) -> [a] -> Parser a m [a] -- | Match the input sequence with the supplied list and return it if -- successful. -- --
--   >>> listEq = Parser.listEqBy (==)
--   
listEq :: (Monad m, Eq a) => [a] -> Parser a m [a] -- | lmap f parser maps the function f on the input of -- the parser. -- --
--   >>> Stream.parse (Parser.lmap (\x -> x * x) (Parser.fromFold Fold.sum)) (Stream.enumerateFromTo 1 100)
--   Right 338350
--   
-- --
--   lmap = Parser.lmapM return
--   
lmap :: (a -> b) -> Parser b m r -> Parser a m r -- | lmapM f parser maps the monadic function f on the -- input of the parser. lmapM :: Monad m => (a -> m b) -> Parser b m r -> Parser a m r -- | rmapM f parser maps the monadic function f on the -- output of the parser. -- --
--   >>> rmap = fmap
--   
rmapM :: Monad m => (b -> m c) -> Parser a m b -> Parser a m c -- | Include only those elements that pass a predicate. -- --
--   >>> Stream.parse (Parser.filter (> 5) (Parser.fromFold Fold.sum)) $ Stream.fromList [1..10]
--   Right 40
--   
filter :: Monad m => (a -> Bool) -> Parser a m b -> Parser a m b -- | Run a parser without consuming the input. lookAhead :: Monad m => Parser a m b -> Parser a m b -- | Stops after taking exactly n input elements. -- -- -- --
--   >>> Stream.parse (Parser.takeEQ 2 Fold.toList) $ Stream.fromList [1,0,1]
--   Right [1,0]
--   
-- --
--   >>> Stream.parse (Parser.takeEQ 4 Fold.toList) $ Stream.fromList [1,0,1]
--   Left (ParseError "takeEQ: Expecting exactly 4 elements, input terminated on 3")
--   
takeEQ :: Monad m => Int -> Fold m a b -> Parser a m b -- | Collect stream elements until an element fails the predicate. The -- element on which the predicate fails is returned back to the input -- stream. -- -- -- --
--   >>> Stream.parse (Parser.takeWhile (== 0) Fold.toList) $ Stream.fromList [0,0,1,0,1]
--   Right [0,0]
--   
-- --
--   >>> takeWhile cond f = Parser.takeWhileP cond (Parser.fromFold f)
--   
-- -- We can implement a breakOn using takeWhile: -- --
--   breakOn p = takeWhile (not p)
--   
takeWhile :: Monad m => (a -> Bool) -> Fold m a b -> Parser a m b -- | Like takeWhile but takes at least one element otherwise fails. -- --
--   >>> takeWhile1 cond p = Parser.takeWhileP cond (Parser.takeBetween 1 maxBound p)
--   
takeWhile1 :: Monad m => (a -> Bool) -> Fold m a b -> Parser a m b -- | Drain the input as long as the predicate succeeds, running the effects -- and discarding the results. -- -- This is also called skipWhile in some parsing libraries. -- --
--   >>> dropWhile p = Parser.takeWhile p Fold.drain
--   
dropWhile :: Monad m => (a -> Bool) -> Parser a m () -- | Like splitOn but strips leading, trailing, and repeated -- separators. Therefore, ".a..b." having . as the -- separator would be parsed as ["a","b"]. In other words, its -- like parsing words from whitespace separated text. -- -- -- --
--   >>> wordBy = Parser.wordFramedBy (const False) (const False) (const False)
--   
-- --
--   S.wordsBy pred f = S.parseMany (PR.wordBy pred f)
--   
wordBy :: Monad m => (a -> Bool) -> Fold m a b -> Parser a m b -- | Given an input stream [a,b,c,...] and a comparison function -- cmp, the parser assigns the element a to the first -- group, then if a `cmp` b is True b is also -- assigned to the same group. If a `cmp` c is True then -- c is also assigned to the same group and so on. When the -- comparison fails the parser is terminated. Each group is folded using -- the Fold f and the result of the fold is the result of -- the parser. -- -- -- --
--   >>> :{
--    runGroupsBy eq =
--        Stream.fold Fold.toList
--            . Stream.parseMany (Parser.groupBy eq Fold.toList)
--            . Stream.fromList
--   :}
--   
-- --
--   >>> runGroupsBy (<) []
--   []
--   
-- --
--   >>> runGroupsBy (<) [1]
--   [Right [1]]
--   
-- --
--   >>> runGroupsBy (<) [3, 5, 4, 1, 2, 0]
--   [Right [3,5,4],Right [1,2],Right [0]]
--   
groupBy :: Monad m => (a -> a -> Bool) -> Fold m a b -> Parser a m b -- | Unlike groupBy this combinator performs a rolling comparison of -- two successive elements in the input stream. Assuming the input stream -- is [a,b,c,...] and the comparison function is cmp, -- the parser first assigns the element a to the first group, -- then if a `cmp` b is True b is also assigned -- to the same group. If b `cmp` c is True then -- c is also assigned to the same group and so on. When the -- comparison fails the parser is terminated. Each group is folded using -- the Fold f and the result of the fold is the result of -- the parser. -- -- -- --
--   >>> :{
--    runGroupsByRolling eq =
--        Stream.fold Fold.toList
--            . Stream.parseMany (Parser.groupByRolling eq Fold.toList)
--            . Stream.fromList
--   :}
--   
-- --
--   >>> runGroupsByRolling (<) []
--   []
--   
-- --
--   >>> runGroupsByRolling (<) [1]
--   [Right [1]]
--   
-- --
--   >>> runGroupsByRolling (<) [3, 5, 4, 1, 2, 0]
--   [Right [3,5],Right [4],Right [1,2],Right [0]]
--   
-- -- Pre-release groupByRolling :: Monad m => (a -> a -> Bool) -> Fold m a b -> Parser a m b -- | Like groupByRolling, but if the predicate is True then -- collects using the first fold as long as the predicate holds -- True, if the predicate is False collects using the -- second fold as long as it remains False. Returns Left -- for the first case and Right for the second case. -- -- For example, if we want to detect sorted sequences in a stream, both -- ascending and descending cases we can use 'groupByRollingEither -- (<=) Fold.toList Fold.toList'. -- -- Pre-release groupByRollingEither :: Monad m => (a -> a -> Bool) -> Fold m a b -> Fold m a c -> Parser a m (Either b c) -- | Quote and bracket aware word splitting with escaping. Like -- wordBy but word separators within specified quotes or brackets -- are ignored. Quotes and escape characters can be processed. If the end -- quote is different from the start quote it is called a bracket. The -- following quoting rules apply: -- -- -- -- We should note that unquoted and quoted escape processing are -- different. In unquoted part escape character is always removed. In -- quoted part it is removed only if followed by a special meaning -- character. This is consistent with how shell performs escape -- processing. wordWithQuotes :: (Monad m, Eq a) => Bool -> (a -> a -> Maybe a) -> a -> (a -> Maybe a) -> (a -> Bool) -> Fold m a b -> Parser a m b -- | Collect zero or more parses. Apply the supplied parser repeatedly on -- the input stream and push the parse results to a downstream fold. -- -- Stops: when the downstream fold stops or the parser fails. Fails: -- never, produces zero or more results. -- --
--   >>> many = Parser.countBetween 0 maxBound
--   
-- -- Compare with many. many :: Monad m => Parser a m b -> Fold m b c -> Parser a m c -- | Collect one or more parses. Apply the supplied parser repeatedly on -- the input stream and push the parse results to a downstream fold. -- -- Stops: when the downstream fold stops or the parser fails. Fails: if -- it stops without producing a single result. -- --
--   >>> some p f = Parser.manyP p (Parser.takeGE 1 f)
--   
--   >>> some = Parser.countBetween 1 maxBound
--   
-- -- Compare with some. some :: Monad m => Parser a m b -> Fold m b c -> Parser a m c -- | manyTill chunking test f tries the parser test on -- the input, if test fails it backtracks and tries -- chunking, after chunking succeeds test is -- tried again and so on. The parser stops when test succeeds. -- The output of test is discarded and the output of -- chunking is accumulated by the supplied fold. The parser -- fails if chunking fails. -- -- Stops when the fold f stops. manyTill :: Monad m => Parser a m b -> Parser a m x -> Fold m b c -> Parser a m c -- | Apply two parsers alternately to an input stream. The input stream is -- considered an interleaving of two patterns. The two parsers represent -- the two patterns. Parsing starts at the first parser and stops at the -- first parser. It can be used to parse a infix style pattern e.g. p1 p2 -- p1 . Empty input or single parse of the first parser is accepted. -- --
--   >>> p1 = Parser.takeWhile1 (not . (== '+')) Fold.toList
--   
--   >>> p2 = Parser.satisfy (== '+')
--   
--   >>> p = Parser.deintercalate p1 p2 Fold.toList
--   
--   >>> Stream.parse p $ Stream.fromList ""
--   Right []
--   
--   >>> Stream.parse p $ Stream.fromList "1"
--   Right [Left "1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+"
--   Right [Left "1"]
--   
--   >>> Stream.parse p $ Stream.fromList "1+2+3"
--   Right [Left "1",Right '+',Left "2",Right '+',Left "3"]
--   
deintercalate :: Monad m => Parser a m x -> Parser a m y -> Fold m (Either x y) z -> Parser a m z module Streamly.Internal.Data.MutArray.Generic data MutArray a MutArray :: MutableArray# RealWorld a -> {-# UNPACK #-} !Int -> {-# UNPACK #-} !Int -> {-# UNPACK #-} !Int -> MutArray a -- | The internal contents of the array representing the entire array. [arrContents#] :: MutArray a -> MutableArray# RealWorld a -- | The starting index of this slice. [arrStart] :: MutArray a -> {-# UNPACK #-} !Int -- | The length of this slice. [arrLen] :: MutArray a -> {-# UNPACK #-} !Int -- | This is the true length of the array. Coincidentally, this also -- represents the first index beyond the maximum acceptable index of the -- array. This is specific to the array contents itself and not dependent -- on the slice. This value should not change and is shared across all -- the slices. [arrTrueLen] :: MutArray a -> {-# UNPACK #-} !Int -- | Definition: -- --
--   >>> nil = MutArray.new 0
--   
nil :: MonadIO m => m (MutArray a) -- | emptyOf count allocates a zero length array that can be -- extended to hold up to count items without reallocating. -- -- Pre-release emptyOf :: MonadIO m => Int -> m (MutArray a) -- | Like createOf but does not check the array bounds when writing. -- The fold driver must not call the step function more than n -- times otherwise it will corrupt the memory and crash. This function -- exists mainly because any conditional in the step function blocks -- fusion causing 10x performance slowdown. -- -- Pre-release unsafeCreateOf :: MonadIO m => Int -> Fold m a (MutArray a) -- | createOf n folds a maximum of n elements from the -- input stream to an Array. -- --
--   >>> createOf n = Fold.take n (MutArray.unsafeCreateOf n)
--   
-- -- Pre-release createOf :: MonadIO m => Int -> Fold m a (MutArray a) -- | createWith minCount folds the whole input to a single array. -- The array starts at a size big enough to hold minCount elements, the -- size is doubled every time the array needs to be grown. -- -- Caution! Do not use this on infinite streams. -- -- Pre-release createWith :: MonadIO m => Int -> Fold m a (MutArray a) -- | Fold the whole input to a single array. -- -- Same as createWith using an initial array size of -- arrayChunkSize bytes rounded up to the element size. -- -- Caution! Do not use this on infinite streams. create :: MonadIO m => Fold m a (MutArray a) -- | Create a MutArray from the first n elements of a -- stream. The array is allocated to size n, if the stream -- terminates before n elements then the array may hold less -- than n elements. fromStreamN :: MonadIO m => Int -> Stream m a -> m (MutArray a) fromStream :: MonadIO m => Stream m a -> m (MutArray a) fromPureStream :: MonadIO m => Stream Identity a -> m (MutArray a) fromListN :: MonadIO m => Int -> [a] -> m (MutArray a) fromList :: MonadIO m => [a] -> m (MutArray a) -- | O(1) Write the given element at the given index in the array. -- Performs in-place mutation of the array. -- --
--   >>> putIndex ix arr val = MutArray.modifyIndex ix arr (const (val, ()))
--   
-- -- Pre-release putIndex :: MonadIO m => Int -> MutArray a -> a -> m () -- | Write the given element to the given index of the array. Does not -- check if the index is out of bounds of the array. -- -- Pre-release putIndexUnsafe :: forall m a. MonadIO m => Int -> MutArray a -> a -> m () -- | Write an input stream of (index, value) pairs to an array. Throws an -- error if any index is out of bounds. -- -- Pre-release putIndices :: MonadIO m => MutArray a -> Fold m (Int, a) () -- | Modify a given index of an array using a modifier function without -- checking the bounds. -- -- Unsafe because it does not check the bounds of the array. -- -- Pre-release modifyIndexUnsafe :: MonadIO m => Int -> MutArray a -> (a -> (a, b)) -> m b -- | Modify a given index of an array using a modifier function. -- -- Pre-release modifyIndex :: MonadIO m => Int -> MutArray a -> (a -> (a, b)) -> m b -- | Reallocates the array according to the new size. This is a safe -- function that always creates a new array and copies the old array into -- the new one. If the reallocated size is less than the original array -- it results in a truncated version of the original array. realloc :: MonadIO m => Int -> MutArray a -> m (MutArray a) -- | Make the uninitialized memory in the array available for use extending -- it by the supplied length beyond the current length of the array. The -- array may be reallocated. uninit :: MonadIO m => MutArray a -> Int -> m (MutArray a) -- | snocWith sizer arr elem mutates arr to append -- elem. The length of the array increases by 1. -- -- If there is no reserved space available in arr it is -- reallocated to a size in bytes determined by the sizer -- oldSize function, where oldSize is the original size of -- the array. -- -- Note that the returned array may be a mutated version of the original -- array. -- -- Pre-release snocWith :: MonadIO m => (Int -> Int) -> MutArray a -> a -> m (MutArray a) -- | The array is mutated to append an additional element to it. If there -- is no reserved space available in the array then it is reallocated to -- double the original size. -- -- This is useful to reduce allocations when appending unknown number of -- elements. -- -- Note that the returned array may be a mutated version of the original -- array. -- --
--   >>> snoc = MutArray.snocWith (* 2)
--   
-- -- Performs O(n * log n) copies to grow, but is liberal with memory -- allocation. -- -- Pre-release snoc :: MonadIO m => MutArray a -> a -> m (MutArray a) -- | Really really unsafe, appends the element into the first array, may -- cause silent data corruption or if you are lucky a segfault if the -- index is out of bounds. -- -- Internal snocUnsafe :: MonadIO m => MutArray a -> a -> m (MutArray a) -- | Unfold an array into a stream. reader :: MonadIO m => Unfold m (MutArray a) a -- | Resumable unfold of an array. producerWith :: Monad m => (forall b. IO b -> m b) -> Producer m (MutArray a) a -- | Resumable unfold of an array. producer :: MonadIO m => Producer m (MutArray a) a -- | Generates a stream from the elements of a MutArray. -- --
--   >>> read = Stream.unfold MutArray.reader
--   
read :: MonadIO m => MutArray a -> Stream m a readRev :: MonadIO m => MutArray a -> Stream m a toStreamK :: MonadIO m => MutArray a -> StreamK m a -- | Convert an Array into a list. -- -- Pre-release toList :: MonadIO m => MutArray a -> m [a] -- | O(1) Lookup the element at the given index. Index starts from -- 0. getIndex :: MonadIO m => Int -> MutArray a -> m (Maybe a) -- | Return the element at the specified index without checking the bounds. -- -- Unsafe because it does not check the bounds of the array. getIndexUnsafe :: MonadIO m => Int -> MutArray a -> m a -- | Return the element at the specified index without checking the bounds -- from a MutableArray# RealWorld. -- -- Unsafe because it does not check the bounds of the array. getIndexUnsafeWith :: MonadIO m => MutableArray# RealWorld a -> Int -> m a length :: MutArray a -> Int strip :: MonadIO m => (a -> Bool) -> MutArray a -> m (MutArray a) -- | Compare the length of the arrays. If the length is equal, compare the -- lexicographical ordering of two underlying byte arrays otherwise -- return the result of length comparison. -- -- Pre-release cmp :: (MonadIO m, Ord a) => MutArray a -> MutArray a -> m Ordering eq :: (MonadIO m, Eq a) => MutArray a -> MutArray a -> m Bool -- | chunksOf n stream groups the input stream into a stream of -- arrays of size n. -- --
--   chunksOf n = foldMany (MutArray.writeN n)
--   
-- -- Pre-release chunksOf :: forall m a. MonadIO m => Int -> Stream m a -> Stream m (MutArray a) -- | O(1) Slice an array in constant time. -- -- Unsafe: The bounds of the slice are not checked. -- -- Unsafe -- -- Pre-release getSliceUnsafe :: Int -> Int -> MutArray a -> MutArray a -- | O(1) Slice an array in constant time. Throws an error if the -- slice extends out of the array bounds. -- -- Pre-release getSlice :: Int -> Int -> MutArray a -> MutArray a -- | Put a sub range of a source array into a subrange of a destination -- array. This is not safe as it does not check the bounds. putSliceUnsafe :: MonadIO m => MutArray a -> Int -> MutArray a -> Int -> Int -> m () clone :: MonadIO m => MutArray a -> m (MutArray a) new :: MonadIO m => Int -> m (MutArray a) -- | Deprecated: Please use unsafeCreateOf instead. writeNUnsafe :: MonadIO m => Int -> Fold m a (MutArray a) writeN :: MonadIO m => Int -> Fold m a (MutArray a) -- | Deprecated: Please use createWith instead. writeWith :: MonadIO m => Int -> Fold m a (MutArray a) write :: MonadIO m => Fold m a (MutArray a) module Streamly.Internal.Data.Ring.Generic data Ring a Ring :: MutArray a -> !Int -> !Int -> Ring a [ringArr] :: Ring a -> MutArray a [ringHead] :: Ring a -> !Int [ringMax] :: Ring a -> !Int createRing :: MonadIO m => Int -> m (Ring a) -- | Note that it is not safe to return a reference to the mutable Ring -- using a scan as the Ring is continuously getting mutated. You could -- however copy out the Ring. writeLastN :: MonadIO m => Int -> Fold m a (Ring a) -- | Move the ring head clockwise (+ve adj) or counter clockwise (-ve adj) -- by the given amount. seek :: MonadIO m => Int -> Ring a -> m (Ring a) unsafeInsertRingWith :: Ring a -> a -> IO Int -- | toMutArray rignHeadAdjustment lengthToRead ring. Convert the -- ring into a boxed mutable array. Note that the returned MutArray -- shares the same underlying memory as the Ring, the user of this API -- needs to ensure that the ring is not mutated during and after the -- conversion. toMutArray :: MonadIO m => Int -> Int -> Ring a -> m (MutArray a) -- | Copy out the mutable ring to a mutable Array. copyToMutArray :: MonadIO m => Int -> Int -> Ring a -> m (MutArray a) -- | Seek by n and then read the entire ring. Use take on the stream -- to restrict the reads. toStreamWith :: Int -> Ring a -> Stream m a -- | Unconstrained version of Streamly.Data.MutArray module. -- -- See the Streamly.Data.MutArray module for documentation. module Streamly.Data.MutArray.Generic data MutArray a -- | emptyOf count allocates a zero length array that can be -- extended to hold up to count items without reallocating. -- -- Pre-release emptyOf :: MonadIO m => Int -> m (MutArray a) fromListN :: MonadIO m => Int -> [a] -> m (MutArray a) fromList :: MonadIO m => [a] -> m (MutArray a) -- | createOf n folds a maximum of n elements from the -- input stream to an Array. -- --
--   >>> createOf n = Fold.take n (MutArray.unsafeCreateOf n)
--   
-- -- Pre-release createOf :: MonadIO m => Int -> Fold m a (MutArray a) -- | Fold the whole input to a single array. -- -- Same as createWith using an initial array size of -- arrayChunkSize bytes rounded up to the element size. -- -- Caution! Do not use this on infinite streams. create :: MonadIO m => Fold m a (MutArray a) -- | The array is mutated to append an additional element to it. If there -- is no reserved space available in the array then it is reallocated to -- double the original size. -- -- This is useful to reduce allocations when appending unknown number of -- elements. -- -- Note that the returned array may be a mutated version of the original -- array. -- --
--   >>> snoc = MutArray.snocWith (* 2)
--   
-- -- Performs O(n * log n) copies to grow, but is liberal with memory -- allocation. -- -- Pre-release snoc :: MonadIO m => MutArray a -> a -> m (MutArray a) -- | O(1) Write the given element at the given index in the array. -- Performs in-place mutation of the array. -- --
--   >>> putIndex ix arr val = MutArray.modifyIndex ix arr (const (val, ()))
--   
-- -- Pre-release putIndex :: MonadIO m => Int -> MutArray a -> a -> m () -- | Write the given element to the given index of the array. Does not -- check if the index is out of bounds of the array. -- -- Pre-release putIndexUnsafe :: forall m a. MonadIO m => Int -> MutArray a -> a -> m () -- | Modify a given index of an array using a modifier function. -- -- Pre-release modifyIndex :: MonadIO m => Int -> MutArray a -> (a -> (a, b)) -> m b -- | Modify a given index of an array using a modifier function without -- checking the bounds. -- -- Unsafe because it does not check the bounds of the array. -- -- Pre-release modifyIndexUnsafe :: MonadIO m => Int -> MutArray a -> (a -> (a, b)) -> m b -- | O(1) Lookup the element at the given index. Index starts from -- 0. getIndex :: MonadIO m => Int -> MutArray a -> m (Maybe a) -- | Return the element at the specified index without checking the bounds. -- -- Unsafe because it does not check the bounds of the array. getIndexUnsafe :: MonadIO m => Int -> MutArray a -> m a -- | Convert an Array into a list. -- -- Pre-release toList :: MonadIO m => MutArray a -> m [a] -- | Generates a stream from the elements of a MutArray. -- --
--   >>> read = Stream.unfold MutArray.reader
--   
read :: MonadIO m => MutArray a -> Stream m a readRev :: MonadIO m => MutArray a -> Stream m a -- | Unfold an array into a stream. reader :: MonadIO m => Unfold m (MutArray a) a length :: MutArray a -> Int new :: MonadIO m => Int -> m (MutArray a) writeN :: MonadIO m => Int -> Fold m a (MutArray a) write :: MonadIO m => Fold m a (MutArray a) -- | A ring array is a circular mutable array. module Streamly.Internal.Data.Ring -- | A ring buffer is a mutable array of fixed size. Initially the array is -- empty, with ringStart pointing at the start of allocated memory. We -- call the next location to be written in the ring as ringHead. -- Initially ringHead == ringStart. When the first item is added, -- ringHead points to ringStart + sizeof item. When the buffer becomes -- full ringHead would wrap around to ringStart. When the buffer is full, -- ringHead always points at the oldest item in the ring and the newest -- item added always overwrites the oldest item. -- -- When using it we should keep in mind that a ringBuffer is a mutable -- data structure. We should not leak out references to it for immutable -- use. data Ring a Ring :: {-# UNPACK #-} !ForeignPtr a -> {-# UNPACK #-} !Ptr a -> Ring a [ringStart] :: Ring a -> {-# UNPACK #-} !ForeignPtr a [ringBound] :: Ring a -> {-# UNPACK #-} !Ptr a -- | Create a new ringbuffer and return the ring buffer and the ringHead. -- Returns the ring and the ringHead, the ringHead is same as ringStart. new :: forall a. Storable a => Int -> IO (Ring a, Ptr a) -- | newRing count allocates an empty array that can hold -- count items. The memory of the array is uninitialized and the -- allocation is aligned as per the Storable instance of the type. -- -- Unimplemented newRing :: Int -> m (Ring a) -- | writeN n is a rolling fold that keeps the last n elements of -- the stream in a ring array. -- -- Unimplemented writeN :: Int -> Fold m a (Ring a) -- | Advance the ringHead by 1 item, wrap around if we hit the end of the -- array. advance :: forall a. Storable a => Ring a -> Ptr a -> Ptr a -- | Move the ringHead by n items. The direction depends on the sign on -- whether n is positive or negative. Wrap around if we hit the beginning -- or end of the array. moveBy :: forall a. Storable a => Int -> Ring a -> Ptr a -> Ptr a -- | Get the first address of the ring as a pointer. startOf :: Ring a -> Ptr a -- | Insert an item at the head of the ring, when the ring is full this -- replaces the oldest item in the ring with the new item. This is unsafe -- beause ringHead supplied is not verified to be within the Ring. Also, -- the ringStart foreignPtr must be guaranteed to be alive by the caller. unsafeInsert :: Storable a => Ring a -> Ptr a -> a -> IO (Ptr a) -- | Insert an item at the head of the ring, when the ring is full this -- replaces the oldest item in the ring with the new item. -- -- Unimplemented slide :: Ring a -> a -> m (Ring a) -- | O(1) Write the given element at the given index in the ring -- array. Performs in-place mutation of the array. -- --
--   >>> putIndex arr ix val = Ring.modifyIndex arr ix (const (val, ()))
--   
-- -- Unimplemented putIndex :: Ring a -> Int -> a -> m () -- | Modify a given index of a ring array using a modifier function. -- -- Unimplemented modifyIndex :: Ring a -> Int -> (a -> (a, b)) -> m b -- | Read n elements from the ring starting at the supplied ring head. If n -- is more than the ring size it keeps reading the ring in a circular -- fashion. -- -- If the ring is not full the user must ensure than n is less than or -- equal to the number of valid elements in the ring. -- -- Internal read :: forall m a. (MonadIO m, Storable a) => Unfold m (Ring a, Ptr a, Int) a -- | Unfold a ring array into a stream in reverse order. -- -- Unimplemented readRev :: Unfold m (MutArray a) a -- | O(1) Lookup the element at the given index. Index starts from -- 0. getIndex :: Ring a -> Int -> m a -- | Return the element at the specified index without checking the bounds. -- -- Unsafe because it does not check the bounds of the ring array. getIndexUnsafe :: Ring a -> Int -> m a -- | O(1) Lookup the element at the given index from the end of the -- array. Index starts from 0. -- -- Slightly faster than computing the forward index and using getIndex. getIndexRev :: Ring a -> Int -> m a -- | O(1) Get the length of the array i.e. the number of elements in -- the array. -- -- Note that byteLength is less expensive than this operation, as -- length involves a costly division operation. -- -- Unimplemented length :: Ring a -> Int -- | O(1) Get the byte length of the array. -- -- Unimplemented byteLength :: Ring a -> Int -- | Get the total capacity of an array. An array may have space reserved -- beyond the current used length of the array. -- -- Pre-release byteCapacity :: Ring a -> Int -- | The remaining capacity in the array for appending more elements -- without reallocation. -- -- Pre-release bytesFree :: Ring a -> Int -- | Cast an array having elements of type a into an array having -- elements of type b. The length of the array should be a -- multiple of the size of the target element otherwise Nothing is -- returned. -- -- Pre-release cast :: forall a b. Storable b => Ring a -> Maybe (Ring b) -- | Cast an array having elements of type a into an array having -- elements of type b. The array size must be a multiple of the -- size of type b. -- -- Unimplemented castUnsafe :: Ring a -> Ring b -- | Cast an Array a into an Array Word8. -- -- Unimplemented asBytes :: Ring a -> Ring Word8 -- | Cast a mutable array to a ring array. fromArray :: MutArray a -> Ring a -- | Fold the buffer starting from ringStart up to the given Ptr -- using a pure step function. This is useful to fold the items in the -- ring when the ring is not full. The supplied pointer is usually the -- end of the ring. -- -- Unsafe because the supplied Ptr is not checked to be in range. unsafeFoldRing :: forall a b. Storable a => Ptr a -> (b -> a -> b) -> b -> Ring a -> b -- | Like unsafeFoldRing but with a monadic step function. unsafeFoldRingM :: forall m a b. (MonadIO m, Storable a) => Ptr a -> (b -> a -> m b) -> b -> Ring a -> m b -- | Fold the entire length of a ring buffer starting at the supplied -- ringHead pointer. Assuming the supplied ringHead pointer points to the -- oldest item, this would fold the ring starting from the oldest item to -- the newest item in the ring. -- -- Note, this will crash on ring of 0 size. unsafeFoldRingFullM :: forall m a b. (MonadIO m, Storable a) => Ptr a -> (b -> a -> m b) -> b -> Ring a -> m b -- | Fold Int items in the ring starting at Ptr a. Won't -- fold more than the length of the ring. -- -- Note, this will crash on ring of 0 size. unsafeFoldRingNM :: forall m a b. (MonadIO m, Storable a) => Int -> Ptr a -> (b -> a -> m b) -> b -> Ring a -> m b -- | ringsOf n stream groups the input stream into a stream of -- ring arrays of size n. Each ring is a sliding window of size n. -- -- Unimplemented ringsOf :: Int -> Stream m a -> Stream m (MutArray a) -- | Byte compare the entire length of ringBuffer with the given array, -- starting at the supplied ringHead pointer. Returns true if the Array -- and the ringBuffer have identical contents. -- -- This is unsafe because the ringHead Ptr is not checked to be in range. -- The supplied array must be equal to or bigger than the ringBuffer, -- ARRAY BOUNDS ARE NOT CHECKED. unsafeEqArray :: Ring a -> Ptr a -> Array a -> Bool -- | Like unsafeEqArray but compares only N bytes instead of entire -- length of the ring buffer. This is unsafe because the ringHead Ptr is -- not checked to be in range. unsafeEqArrayN :: Ring a -> Ptr a -> Array a -> Int -> Bool -- | slidingWindow collector is an incremental sliding window fold -- that does not require all the intermediate elements in a computation. -- This maintains n elements in the window, when a new element -- comes it slides out the oldest element and the new element along with -- the old element are supplied to the collector fold. -- -- The Maybe type is for the case when initially the window is -- filling and there is no old element. slidingWindow :: forall m a b. (MonadIO m, Storable a, Unbox a) => Int -> Fold m (a, Maybe a) b -> Fold m a b -- | Like slidingWindow but also provides the entire ring contents as an -- Array. The array reflects the state of the ring after inserting the -- incoming element. -- -- IMPORTANT NOTE: The ring is mutable, therefore, the result of (m -- (Array a)) action depends on when it is executed. It does not -- capture the sanpshot of the ring at a particular time. slidingWindowWith :: forall m a b. (MonadIO m, Storable a, Unbox a) => Int -> Fold m ((a, Maybe a), m (MutArray a)) b -> Fold m a b instance (GHC.Show.Show a, GHC.Show.Show b, GHC.Show.Show c, GHC.Show.Show d) => GHC.Show.Show (Streamly.Internal.Data.Ring.Tuple4' a b c d) -- | See Streamly.Data.Fold for an overview and -- Streamly.Internal.Data.Fold.Type for design notes. module Streamly.Internal.Data.Fold -- | Represents the result of the step of a Fold. -- Partial returns an intermediate state of the fold, the fold -- step can be called again with the state or the driver can use -- extract on the state to get the result out. Done -- returns the final result and the fold cannot be driven further. -- -- Pre-release data Step s b Partial :: !s -> Step s b Done :: !b -> Step s b -- | Map a monadic function over the result b in Step s -- b. -- -- Internal mapMStep :: Applicative m => (a -> m b) -> Step s a -> m (Step s b) -- | If Partial then map the state, if Done then call the -- next step. chainStepM :: Applicative m => (s1 -> m s2) -> (a -> m (Step s2 b)) -> Step s1 a -> m (Step s2 b) -- | The type Fold m a b represents a consumer of an input stream -- of values of type a and returning a final value of type -- b in Monad m. The constructor of a fold is -- Fold step initial extract final. -- -- The fold uses an internal state of type s. The initial value -- of the state s is created by initial. This function -- is called once and only once before the fold starts consuming input. -- Any resource allocation can be done in this function. -- -- The step function is called on each input, it consumes an -- input and returns the next intermediate state (see Step) or the -- final result b if the fold terminates. -- -- If the fold is used as a scan, the extract function is used -- by the scan driver to map the current state s of the fold to -- the fold result. Thus extract can be called multiple times. -- In some folds, where scanning does not make sense, this function is -- left unimplemented; such folds cannot be used as scans. -- -- Before a fold terminates, final is called once and only once -- (unless the fold terminated in initial itself). Any resources -- allocated by initial can be released in final. In -- folds that do not require any cleanup extract and -- final are typically the same. -- -- When implementing fold combinators, care should be taken to cleanup -- any state of the argument folds held by the fold by calling the -- respective final at all exit points of the fold. Also, -- final should not be called more than once. Note that if a -- fold terminates by Done constructor, there is no state to -- cleanup. -- -- NOTE: The constructor is not yet released, smart constructors are -- provided to create folds. data Fold m a b -- | Fold step initial extract -- final Fold :: (s -> a -> m (Step s b)) -> m (Step s b) -> (s -> m b) -> (s -> m b) -> Fold m a b -- | Make a fold from a left fold style pure step function and initial -- value of the accumulator. -- -- If your Fold returns only Partial (i.e. never returns a -- Done) then you can use foldl'* constructors. -- -- A fold with an extract function can be expressed using fmap: -- --
--   mkfoldlx :: Monad m => (s -> a -> s) -> s -> (s -> b) -> Fold m a b
--   mkfoldlx step initial extract = fmap extract (foldl' step initial)
--   
foldl' :: Monad m => (b -> a -> b) -> b -> Fold m a b -- | Make a fold from a left fold style monadic step function and initial -- value of the accumulator. -- -- A fold with an extract function can be expressed using rmapM: -- --
--   mkFoldlxM :: Functor m => (s -> a -> m s) -> m s -> (s -> m b) -> Fold m a b
--   mkFoldlxM step initial extract = rmapM extract (foldlM' step initial)
--   
foldlM' :: Monad m => (b -> a -> m b) -> m b -> Fold m a b -- | Make a strict left fold, for non-empty streams, using first element as -- the starting value. Returns Nothing if the stream is empty. -- -- Pre-release foldl1' :: Monad m => (a -> a -> a) -> Fold m a (Maybe a) -- | Like 'foldl1'' but with a monadic step function. -- -- Pre-release foldlM1' :: Monad m => (a -> a -> m a) -> Fold m a (Maybe a) -- | Make a terminating fold using a pure step function, a pure initial -- state and a pure state extraction function. -- -- Pre-release foldt' :: Monad m => (s -> a -> Step s b) -> Step s b -> (s -> b) -> Fold m a b -- | Make a terminating fold with an effectful step function and initial -- state, and a state extraction function. -- --
--   >>> foldtM' = Fold.Fold
--   
-- -- We can just use Fold but it is provided for completeness. -- -- Pre-release foldtM' :: (s -> a -> m (Step s b)) -> m (Step s b) -> (s -> m b) -> Fold m a b -- | Make a fold using a right fold style step function and a terminal -- value. It performs a strict right fold via a left fold using function -- composition. Note that a strict right fold can only be useful for -- constructing strict structures in memory. For reductions this will be -- very inefficient. -- -- Definitions: -- --
--   >>> foldr' f z = fmap (flip appEndo z) $ Fold.foldMap (Endo . f)
--   
--   >>> foldr' f z = fmap ($ z) $ Fold.foldl' (\g x -> g . f x) id
--   
-- -- Example: -- --
--   >>> Stream.fold (Fold.foldr' (:) []) $ Stream.enumerateFromTo 1 5
--   [1,2,3,4,5]
--   
foldr' :: Monad m => (a -> b -> b) -> b -> Fold m a b -- | Like foldr' but with a monadic step function. -- -- Example: -- --
--   >>> toList = Fold.foldrM' (\a xs -> return $ a : xs) (return [])
--   
-- -- See also: foldrM -- -- Pre-release foldrM' :: Monad m => (a -> b -> m b) -> m b -> Fold m a b -- | Make a fold that yields the supplied value without consuming any -- further input. -- -- Pre-release fromPure :: Applicative m => b -> Fold m a b -- | Make a fold that yields the result of the supplied effectful action -- without consuming any further input. -- -- Pre-release fromEffect :: Applicative m => m b -> Fold m a b -- | Make a fold from a consumer. -- -- Internal fromRefold :: Refold m c a b -> c -> Fold m a b -- | A fold that drains all its input, running the effects and discarding -- the results. -- --
--   >>> drain = Fold.drainMapM (const (return ()))
--   
--   >>> drain = Fold.foldl' (\_ _ -> ()) ()
--   
drain :: Monad m => Fold m a () -- | Folds the input stream to a list. -- -- Warning! working on large lists accumulated as buffers in -- memory could be very inefficient, consider using -- Streamly.Data.Array instead. -- --
--   >>> toList = Fold.foldr' (:) []
--   
toList :: Monad m => Fold m a [a] -- | A fold that buffers its input to a pure stream. -- --
--   >>> toStreamK = foldr StreamK.cons StreamK.nil
--   
--   >>> toStreamK = fmap StreamK.reverse Fold.toStreamKRev
--   
-- -- Internal toStreamK :: Monad m => Fold m a (StreamK n a) -- | Buffers the input stream to a pure stream in the reverse order of the -- input. -- --
--   >>> toStreamKRev = Foldable.foldl' (flip StreamK.cons) StreamK.nil
--   
-- -- This is more efficient than toStreamK. toStreamK has exactly -- the same performance as reversing the stream after toStreamKRev. -- -- Pre-release toStreamKRev :: Monad m => Fold m a (StreamK n a) -- | Like length, except with a more general Num return value -- -- Definition: -- --
--   >>> lengthGeneric = fmap getSum $ Fold.foldMap (Sum . const  1)
--   
--   >>> lengthGeneric = Fold.foldl' (\n _ -> n + 1) 0
--   
-- -- Pre-release lengthGeneric :: (Monad m, Num b) => Fold m a b -- | Determine the length of the input stream. -- -- Definition: -- --
--   >>> length = Fold.lengthGeneric
--   
--   >>> length = fmap getSum $ Fold.foldMap (Sum . const  1)
--   
length :: Monad m => Fold m a Int -- | Map a monadic function on the output of a fold. rmapM :: Monad m => (b -> m c) -> Fold m a b -> Fold m a c -- | lmap f fold maps the function f on the input of the -- fold. -- -- Definition: -- --
--   >>> lmap = Fold.lmapM return
--   
-- -- Example: -- --
--   >>> sumSquared = Fold.lmap (\x -> x * x) Fold.sum
--   
--   >>> Stream.fold sumSquared (Stream.enumerateFromTo 1 100)
--   338350
--   
lmap :: (a -> b) -> Fold m b r -> Fold m a r -- | lmapM f fold maps the monadic function f on the -- input of the fold. lmapM :: Monad m => (a -> m b) -> Fold m b r -> Fold m a r -- | Postscan the input of a Fold to change it in a stateful manner -- using another Fold. -- --
--   postscan scanner collector
--   
-- -- Pre-release postscan :: Monad m => Fold m a b -> Fold m b c -> Fold m a c -- | Modify a fold to receive a Maybe input, the Just values -- are unwrapped and sent to the original fold, Nothing values are -- discarded. -- --
--   >>> catMaybes = Fold.mapMaybe id
--   
--   >>> catMaybes = Fold.filter isJust . Fold.lmap fromJust
--   
catMaybes :: Monad m => Fold m a b -> Fold m (Maybe a) b -- | Use a Maybe returning fold as a filtering scan. -- --
--   >>> scanMaybe p f = Fold.postscan p (Fold.catMaybes f)
--   
-- -- Pre-release scanMaybe :: Monad m => Fold m a (Maybe b) -> Fold m b c -> Fold m a c -- | Include only those elements that pass a predicate. -- --
--   >>> Stream.fold (Fold.filter (> 5) Fold.sum) $ Stream.fromList [1..10]
--   40
--   
-- --
--   >>> filter p = Fold.scanMaybe (Fold.filtering p)
--   
--   >>> filter p = Fold.filterM (return . p)
--   
--   >>> filter p = Fold.mapMaybe (\x -> if p x then Just x else Nothing)
--   
filter :: Monad m => (a -> Bool) -> Fold m a r -> Fold m a r -- | A scanning fold for filtering elements based on a predicate. filtering :: Monad m => (a -> Bool) -> Fold m a (Maybe a) -- | Like filter but with a monadic predicate. -- --
--   >>> f p x = p x >>= \r -> return $ if r then Just x else Nothing
--   
--   >>> filterM p = Fold.mapMaybeM (f p)
--   
filterM :: Monad m => (a -> m Bool) -> Fold m a r -> Fold m a r -- | Discard Rights and unwrap Lefts in an Either -- stream. -- -- Pre-release catLefts :: Monad m => Fold m a c -> Fold m (Either a b) c -- | Discard Lefts and unwrap Rights in an Either -- stream. -- -- Pre-release catRights :: Monad m => Fold m b c -> Fold m (Either a b) c -- | Remove the either wrapper and flatten both lefts and as well as rights -- in the output stream. -- -- Definition: -- --
--   >>> catEithers = Fold.lmap (either id id)
--   
-- -- Pre-release catEithers :: Fold m a b -> Fold m (Either a a) b -- | Take at most n input elements and fold them using the -- supplied fold. A negative count is treated as 0. -- --
--   >>> Stream.fold (Fold.take 2 Fold.toList) $ Stream.fromList [1..10]
--   [1,2]
--   
take :: Monad m => Int -> Fold m a b -> Fold m a b taking :: Monad m => Int -> Fold m a (Maybe a) -- | Like takeEndBy but drops the element on which the predicate -- succeeds. -- -- Example: -- --
--   >>> input = Stream.fromList "hello\nthere\n"
--   
--   >>> line = Fold.takeEndBy_ (== '\n') Fold.toList
--   
--   >>> Stream.fold line input
--   "hello"
--   
-- --
--   >>> Stream.fold Fold.toList $ Stream.foldMany line input
--   ["hello","there"]
--   
takeEndBy_ :: Monad m => (a -> Bool) -> Fold m a b -> Fold m a b -- | Take the input, stop when the predicate succeeds taking the succeeding -- element as well. -- -- Example: -- --
--   >>> input = Stream.fromList "hello\nthere\n"
--   
--   >>> line = Fold.takeEndBy (== '\n') Fold.toList
--   
--   >>> Stream.fold line input
--   "hello\n"
--   
-- --
--   >>> Stream.fold Fold.toList $ Stream.foldMany line input
--   ["hello\n","there\n"]
--   
takeEndBy :: Monad m => (a -> Bool) -> Fold m a b -> Fold m a b dropping :: Monad m => Int -> Fold m a (Maybe a) -- | Sequential fold application. Apply two folds sequentially to an input -- stream. The input is provided to the first fold, when it is done - the -- remaining input is provided to the second fold. When the second fold -- is done or if the input stream is over, the outputs of the two folds -- are combined using the supplied function. -- -- Example: -- --
--   >>> header = Fold.take 8 Fold.toList
--   
--   >>> line = Fold.takeEndBy (== '\n') Fold.toList
--   
--   >>> f = Fold.splitWith (,) header line
--   
--   >>> Stream.fold f $ Stream.fromList "header: hello\n"
--   ("header: ","hello\n")
--   
-- -- Note: This is dual to appending streams using append. -- -- Note: this implementation allows for stream fusion but has quadratic -- time complexity, because each composition adds a new branch that each -- subsequent fold's input element has to traverse, therefore, it cannot -- scale to a large number of compositions. After around 100 compositions -- the performance starts dipping rapidly compared to a CPS style -- implementation. -- -- For larger number of compositions you can convert the fold to a parser -- and use ParserK. -- -- Time: O(n^2) where n is the number of compositions. splitWith :: Monad m => (a -> b -> c) -> Fold m x a -> Fold m x b -> Fold m x c -- | Same as applicative *>. Run two folds serially one after the -- other discarding the result of the first. -- -- This was written in the hope that it might be faster than implementing -- it using splitWith, but the current benchmarks show that it has the -- same performance. So do not expose it unless some benchmark shows -- benefit. split_ :: Monad m => Fold m x a -> Fold m x b -> Fold m x b data ManyState s1 s2 -- | Collect zero or more applications of a fold. many first -- second applies the first fold repeatedly on the input -- stream and accumulates it's results using the second fold. -- --
--   >>> two = Fold.take 2 Fold.toList
--   
--   >>> twos = Fold.many two Fold.toList
--   
--   >>> Stream.fold twos $ Stream.fromList [1..10]
--   [[1,2],[3,4],[5,6],[7,8],[9,10]]
--   
-- -- Stops when second fold stops. -- -- See also: concatMap, foldMany many :: Monad m => Fold m a b -> Fold m b c -> Fold m a c -- | Like many, but the "first" fold emits an output at the end even if no -- input is received. -- -- Internal -- -- See also: concatMap, foldMany manyPost :: Monad m => Fold m a b -> Fold m b c -> Fold m a c -- | groupsOf n split collect repeatedly applies the -- split fold to chunks of n items in the input stream -- and supplies the result to the collect fold. -- -- Definition: -- --
--   >>> groupsOf n split = Fold.many (Fold.take n split)
--   
-- -- Example: -- --
--   >>> twos = Fold.groupsOf 2 Fold.toList Fold.toList
--   
--   >>> Stream.fold twos $ Stream.fromList [1..10]
--   [[1,2],[3,4],[5,6],[7,8],[9,10]]
--   
-- -- Stops when collect stops. groupsOf :: Monad m => Int -> Fold m a b -> Fold m b c -> Fold m a c -- | Like many but uses a Refold for collecting. refoldMany :: Monad m => Fold m a b -> Refold m x b c -> Refold m x a c -- | Like many but uses a Refold for splitting. -- -- Internal refoldMany1 :: Monad m => Refold m x a b -> Fold m b c -> Refold m x a c -- | Map a Fold returning function on the result of a Fold -- and run the returned fold. This operation can be used to express data -- dependencies between fold operations. -- -- Let's say the first element in the stream is a count of the following -- elements that we have to add, then: -- --
--   >>> import Data.Maybe (fromJust)
--   
--   >>> count = fmap fromJust Fold.one
--   
--   >>> total n = Fold.take n Fold.sum
--   
--   >>> Stream.fold (Fold.concatMap total count) $ Stream.fromList [10,9..1]
--   45
--   
-- -- This does not fuse completely, see refold for a fusible -- alternative. -- -- Time: O(n^2) where n is the number of compositions. -- -- See also: foldIterateM, refold concatMap :: Monad m => (b -> Fold m a c) -> Fold m a b -> Fold m a c -- | duplicate provides the ability to run a fold in parts. The -- duplicated fold consumes the input and returns the same fold as output -- instead of returning the final result, the returned fold can be run -- later to consume more input. -- -- duplicate essentially appends a stream to the fold without -- finishing the fold. Compare with snoc which appends a singleton -- value to the fold. -- -- Pre-release duplicate :: Monad m => Fold m a b -> Fold m a (Fold m a b) -- | Extract the output of a fold and refold it using a Refold. -- -- A fusible alternative to concatMap. -- -- Internal refold :: Monad m => Refold m b a c -> Fold m a b -> Fold m a c -- | teeWith k f1 f2 distributes its input to both f1 and -- f2 until both of them terminate and combines their output -- using k. -- -- Definition: -- --
--   >>> teeWith k f1 f2 = fmap (uncurry k) (Fold.tee f1 f2)
--   
-- -- Example: -- --
--   >>> avg = Fold.teeWith (/) Fold.sum (fmap fromIntegral Fold.length)
--   
--   >>> Stream.fold avg $ Stream.fromList [1.0..100.0]
--   50.5
--   
-- -- For applicative composition using this combinator see -- Streamly.Data.Fold.Tee. -- -- See also: Streamly.Data.Fold.Tee -- -- Note that nested applications of teeWith do not fuse. teeWith :: Monad m => (a -> b -> c) -> Fold m x a -> Fold m x b -> Fold m x c -- | Like teeWith but terminates as soon as the first fold -- terminates. -- -- Pre-release teeWithFst :: Monad m => (b -> c -> d) -> Fold m a b -> Fold m a c -> Fold m a d -- | Like teeWith but terminates as soon as any one of the two folds -- terminates. -- -- Pre-release teeWithMin :: Monad m => (b -> c -> d) -> Fold m a b -> Fold m a c -> Fold m a d -- | Shortest alternative. Apply both folds in parallel but choose the -- result from the one which consumed least input i.e. take the shortest -- succeeding fold. -- -- If both the folds finish at the same time or if the result is -- extracted before any of the folds could finish then the left one is -- taken. -- -- Pre-release shortest :: Monad m => Fold m x a -> Fold m x b -> Fold m x (Either a b) -- | Longest alternative. Apply both folds in parallel but choose the -- result from the one which consumed more input i.e. take the longest -- succeeding fold. -- -- If both the folds finish at the same time or if the result is -- extracted before any of the folds could finish then the left one is -- taken. -- -- Pre-release longest :: Monad m => Fold m x a -> Fold m x b -> Fold m x (Either a b) -- | Extract the accumulated result of the fold. -- -- Definition: -- --
--   >>> extractM = Fold.drive Stream.nil
--   
-- -- Example: -- --
--   >>> Fold.extractM Fold.toList
--   []
--   
-- -- Pre-release extractM :: Monad m => Fold m a b -> m b -- | Evaluate the initialization effect of a fold. If we are building the -- fold by chaining lazy actions in fold init this would reduce the -- actions to a strict accumulator value. -- -- Pre-release reduce :: Monad m => Fold m a b -> m (Fold m a b) -- | Append a singleton value to the fold, in other words run a single step -- of the fold. -- -- Example: -- --
--   >>> import qualified Data.Foldable as Foldable
--   
--   >>> Foldable.foldlM Fold.snoc Fold.toList [1..3] >>= Fold.drive Stream.nil
--   [1,2,3]
--   
-- -- Pre-release snoc :: Monad m => Fold m a b -> a -> m (Fold m a b) -- | Append a singleton value to the fold. -- -- See examples under addStream. -- -- Pre-release addOne :: Monad m => a -> Fold m a b -> m (Fold m a b) -- | Append a singleton value to the fold in other words run a single step -- of the fold. -- -- Definition: -- --
--   >>> snocM f = Fold.reduce . Fold.snoclM f
--   
-- -- Pre-release snocM :: Monad m => Fold m a b -> m a -> m (Fold m a b) -- | Append a singleton value to the fold lazily, in other words run a -- single step of the fold. -- -- Definition: -- --
--   >>> snocl f = Fold.snoclM f . return
--   
-- -- Example: -- --
--   >>> import qualified Data.Foldable as Foldable
--   
--   >>> Fold.extractM $ Foldable.foldl Fold.snocl Fold.toList [1..3]
--   [1,2,3]
--   
-- -- Pre-release snocl :: Monad m => Fold m a b -> a -> Fold m a b -- | Append an effect to the fold lazily, in other words run a single step -- of the fold. -- -- Pre-release snoclM :: Monad m => Fold m a b -> m a -> Fold m a b -- | Close a fold so that it does not accept any more input. close :: Monad m => Fold m a b -> Fold m a b -- | Check if the fold has terminated and can take no more input. -- -- Pre-release isClosed :: Monad m => Fold m a b -> m Bool -- | Change the underlying monad of a fold. Also known as hoist. -- -- Pre-release morphInner :: (forall x. m x -> n x) -> Fold m a b -> Fold n a b -- | Adapt a pure fold to any monad. -- --
--   >>> generalizeInner = Fold.morphInner (return . runIdentity)
--   
-- -- Pre-release generalizeInner :: Monad m => Fold Identity a b -> Fold m a b -- | Deprecated: Please use foldr' instead. foldr :: Monad m => (a -> b -> b) -> b -> Fold m a b -- | Deprecated: Please use "splitWith" instead serialWith :: Monad m => (a -> b -> c) -> Fold m x a -> Fold m x b -> Fold m x c -- | Tee is a newtype wrapper over the Fold type providing -- distributing Applicative, Semigroup, Monoid, -- Num, Floating and Fractional instances. -- -- The input received by the composed Tee is replicated and -- distributed to the constituent folds of the Tee. -- -- For example, to compute the average of numbers in a stream without -- going through the stream twice: -- --
--   >>> avg = (/) <$> (Tee Fold.sum) <*> (Tee $ fmap fromIntegral Fold.length)
--   
--   >>> Stream.fold (unTee avg) $ Stream.fromList [1.0..100.0]
--   50.5
--   
-- -- Similarly, the Semigroup and Monoid instances of -- Tee distribute the input to both the folds and combine the -- outputs using Monoid or Semigroup instances of the output types: -- --
--   >>> import Data.Monoid (Sum(..))
--   
--   >>> t = Tee Fold.one <> Tee Fold.latest
--   
--   >>> Stream.fold (unTee t) (fmap Sum $ Stream.enumerateFromTo 1.0 100.0)
--   Just (Sum {getSum = 101.0})
--   
-- -- The Num, Floating, and Fractional instances work -- in the same way. newtype Tee m a b Tee :: Fold m a b -> Tee m a b [unTee] :: Tee m a b -> Fold m a b -- | Deprecated: Please use unTee instead. toFold :: Tee m a b -> Fold m a b -- | Apply a monadic function on the input and return the input. -- --
--   >>> Stream.fold (Fold.lmapM (Fold.tracing print) Fold.drain) $ (Stream.enumerateFromTo (1 :: Int) 2)
--   1
--   2
--   
-- -- Pre-release tracing :: Monad m => (a -> m b) -> a -> m a -- | Apply a monadic function to each element flowing through and discard -- the results. -- --
--   >>> Stream.fold (Fold.trace print Fold.drain) $ (Stream.enumerateFromTo (1 :: Int) 2)
--   1
--   2
--   
-- --
--   >>> trace f = Fold.lmapM (Fold.tracing f)
--   
-- -- Pre-release trace :: Monad m => (a -> m b) -> Fold m a r -> Fold m a r -- | Semigroup concat. Append the elements of an input stream to a provided -- starting value. -- -- Definition: -- --
--   >>> sconcat = Fold.foldl' (<>)
--   
-- --
--   >>> semigroups = fmap Data.Monoid.Sum $ Stream.enumerateFromTo 1 10
--   
--   >>> Stream.fold (Fold.sconcat 10) semigroups
--   Sum {getSum = 65}
--   
sconcat :: (Monad m, Semigroup a) => a -> Fold m a a -- | Monoid concat. Fold an input stream consisting of monoidal elements -- using mappend and mempty. -- -- Definition: -- --
--   >>> mconcat = Fold.sconcat mempty
--   
-- --
--   >>> monoids = fmap Data.Monoid.Sum $ Stream.enumerateFromTo 1 10
--   
--   >>> Stream.fold Fold.mconcat monoids
--   Sum {getSum = 55}
--   
mconcat :: (Monad m, Monoid a) => Fold m a a -- | Definition: -- --
--   >>> foldMap f = Fold.lmap f Fold.mconcat
--   
-- -- Make a fold from a pure function that folds the output of the function -- using mappend and mempty. -- --
--   >>> sum = Fold.foldMap Data.Monoid.Sum
--   
--   >>> Stream.fold sum $ Stream.enumerateFromTo 1 10
--   Sum {getSum = 55}
--   
foldMap :: (Monad m, Monoid b) => (a -> b) -> Fold m a b -- | Definition: -- --
--   >>> foldMapM f = Fold.lmapM f Fold.mconcat
--   
-- -- Make a fold from a monadic function that folds the output of the -- function using mappend and mempty. -- --
--   >>> sum = Fold.foldMapM (return . Data.Monoid.Sum)
--   
--   >>> Stream.fold sum $ Stream.enumerateFromTo 1 10
--   Sum {getSum = 55}
--   
foldMapM :: (Monad m, Monoid b) => (a -> m b) -> Fold m a b -- | Definitions: -- --
--   >>> drainMapM f = Fold.lmapM f Fold.drain
--   
--   >>> drainMapM f = Fold.foldMapM (void . f)
--   
-- -- Drain all input after passing it through a monadic function. This is -- the dual of mapM_ on stream producers. drainMapM :: Monad m => (a -> m b) -> Fold m a () -- | Terminates with Nothing as soon as it finds an element -- different than the previous one, returns the element if the -- entire input consists of the same element. the :: (Monad m, Eq a) => Fold m a (Maybe a) -- | Compute a numerically stable arithmetic mean of all elements in the -- input stream. mean :: (Monad m, Fractional a) => Fold m a a -- | Compute an Int sized polynomial rolling hash of a stream. -- --
--   >>> rollingHash = Fold.rollingHashWithSalt Fold.defaultSalt
--   
rollingHash :: (Monad m, Enum a) => Fold m a Int64 -- | A default salt used in the implementation of rollingHash. defaultSalt :: Int64 -- | Compute an Int sized polynomial rolling hash -- --
--   H = salt * k ^ n + c1 * k ^ (n - 1) + c2 * k ^ (n - 2) + ... + cn * k ^ 0
--   
-- -- Where c1, c2, cn are the elements in the -- input stream and k is a constant. -- -- This hash is often used in Rabin-Karp string search algorithm. -- -- See https://en.wikipedia.org/wiki/Rolling_hash rollingHashWithSalt :: (Monad m, Enum a) => Int64 -> Fold m a Int64 -- | Compute an Int sized polynomial rolling hash of the first n -- elements of a stream. -- --
--   >>> rollingHashFirstN n = Fold.take n Fold.rollingHash
--   
-- -- Pre-release rollingHashFirstN :: (Monad m, Enum a) => Int -> Fold m a Int64 -- | Determine the sum of all elements of a stream of numbers. Returns -- additive identity (0) when the stream is empty. Note that -- this is not numerically stable for floating point numbers. -- --
--   >>> sum = Fold.cumulative Fold.windowSum
--   
-- -- Same as following but numerically stable: -- --
--   >>> sum = Fold.foldl' (+) 0
--   
--   >>> sum = fmap Data.Monoid.getSum $ Fold.foldMap Data.Monoid.Sum
--   
sum :: (Monad m, Num a) => Fold m a a -- | Determine the product of all elements of a stream of numbers. Returns -- multiplicative identity (1) when the stream is empty. The -- fold terminates when it encounters (0) in its input. -- -- Same as the following but terminates on multiplication by 0: -- --
--   >>> product = fmap Data.Monoid.getProduct $ Fold.foldMap Data.Monoid.Product
--   
product :: (Monad m, Num a, Eq a) => Fold m a a -- | Determine the maximum element in a stream using the supplied -- comparison function. maximumBy :: Monad m => (a -> a -> Ordering) -> Fold m a (Maybe a) -- | Determine the maximum element in a stream. -- -- Definitions: -- --
--   >>> maximum = Fold.maximumBy compare
--   
--   >>> maximum = Fold.foldl1' max
--   
-- -- Same as the following but without a default maximum. The Max -- Monoid uses the minBound as the default maximum: -- --
--   >>> maximum = fmap Data.Semigroup.getMax $ Fold.foldMap Data.Semigroup.Max
--   
maximum :: (Monad m, Ord a) => Fold m a (Maybe a) -- | Computes the minimum element with respect to the given comparison -- function minimumBy :: Monad m => (a -> a -> Ordering) -> Fold m a (Maybe a) -- | Determine the minimum element in a stream using the supplied -- comparison function. -- -- Definitions: -- --
--   >>> minimum = Fold.minimumBy compare
--   
--   >>> minimum = Fold.foldl1' min
--   
-- -- Same as the following but without a default minimum. The Min -- Monoid uses the maxBound as the default maximum: -- --
--   >>> maximum = fmap Data.Semigroup.getMin $ Fold.foldMap Data.Semigroup.Min
--   
minimum :: (Monad m, Ord a) => Fold m a (Maybe a) -- | Buffers the input stream to a list in the reverse order of the input. -- -- Definition: -- --
--   >>> toListRev = Fold.foldl' (flip (:)) []
--   
-- -- Warning! working on large lists accumulated as buffers in -- memory could be very inefficient, consider using Streamly.Array -- instead. toListRev :: Monad m => Fold m a [a] -- | A fold that buffers its input to a pure stream. -- -- Warning! working on large streams accumulated as buffers in -- memory could be very inefficient, consider using -- Streamly.Data.Array instead. -- --
--   >>> toStream = fmap Stream.fromList Fold.toList
--   
-- -- Pre-release toStream :: (Monad m, Monad n) => Fold m a (Stream n a) -- | Buffers the input stream to a pure stream in the reverse order of the -- input. -- --
--   >>> toStreamRev = fmap Stream.fromList Fold.toListRev
--   
-- -- Warning! working on large streams accumulated as buffers in -- memory could be very inefficient, consider using -- Streamly.Data.Array instead. -- -- Pre-release toStreamRev :: (Monad m, Monad n) => Fold m a (Stream n a) -- | Get the top n elements using the supplied comparison -- function. -- -- To get bottom n elements instead: -- --
--   >>> bottomBy cmp = Fold.topBy (flip cmp)
--   
-- -- Example: -- --
--   >>> stream = Stream.fromList [2::Int,7,9,3,1,5,6,11,17]
--   
--   >>> Stream.fold (Fold.topBy compare 3) stream >>= MutArray.toList
--   [17,11,9]
--   
-- -- Pre-release topBy :: (MonadIO m, Unbox a) => (a -> a -> Ordering) -> Int -> Fold m a (MutArray a) -- | Fold the input stream to top n elements. -- -- Definition: -- --
--   >>> top = Fold.topBy compare
--   
-- --
--   >>> stream = Stream.fromList [2::Int,7,9,3,1,5,6,11,17]
--   
--   >>> Stream.fold (Fold.top 3) stream >>= MutArray.toList
--   [17,11,9]
--   
-- -- Pre-release top :: (MonadIO m, Unbox a, Ord a) => Int -> Fold m a (MutArray a) -- | Get the bottom most n elements using the supplied comparison -- function. bottomBy :: (MonadIO m, Unbox a) => (a -> a -> Ordering) -> Int -> Fold m a (MutArray a) -- | Fold the input stream to bottom n elements. -- -- Definition: -- --
--   >>> bottom = Fold.bottomBy compare
--   
-- --
--   >>> stream = Stream.fromList [2::Int,7,9,3,1,5,6,11,17]
--   
--   >>> Stream.fold (Fold.bottom 3) stream >>= MutArray.toList
--   [1,2,3]
--   
-- -- Pre-release bottom :: (MonadIO m, Unbox a, Ord a) => Int -> Fold m a (MutArray a) -- | Returns the latest element of the input stream, if any. -- --
--   >>> latest = Fold.foldl1' (\_ x -> x)
--   
--   >>> latest = fmap getLast $ Fold.foldMap (Last . Just)
--   
latest :: Monad m => Fold m a (Maybe a) -- | Pair each element of a fold input with its index, starting from index -- 0. indexingWith :: Monad m => Int -> (Int -> Int) -> Fold m a (Maybe (Int, a)) -- |
--   >>> indexing = Fold.indexingWith 0 (+ 1)
--   
indexing :: Monad m => Fold m a (Maybe (Int, a)) -- |
--   >>> indexingRev n = Fold.indexingWith n (subtract 1)
--   
indexingRev :: Monad m => Int -> Fold m a (Maybe (Int, a)) -- | Apply a function on every two successive elements of a stream. The -- first argument of the map function is the previous element and the -- second argument is the current element. When processing the very first -- element in the stream, the previous element is Nothing. -- -- Pre-release rollingMapM :: Monad m => (Maybe a -> a -> m b) -> Fold m a b -- | Returns the latest element omitting the first occurrence that -- satisfies the given equality predicate. -- -- Example: -- --
--   >>> input = Stream.fromList [1,3,3,5]
--   
--   >>> Stream.fold Fold.toList $ Stream.scanMaybe (Fold.deleteBy (==) 3) input
--   [1,3,5]
--   
deleteBy :: Monad m => (a -> a -> Bool) -> a -> Fold m a (Maybe a) -- | Return the latest unique element using the supplied comparison -- function. Returns Nothing if the current element is same as the -- last element otherwise returns Just. -- -- Example, strip duplicate path separators: -- --
--   >>> input = Stream.fromList "//a//b"
--   
--   >>> f x y = x == '/' && y == '/'
--   
--   >>> Stream.fold Fold.toList $ Stream.scanMaybe (Fold.uniqBy f) input
--   "/a/b"
--   
-- -- Space: O(1) -- -- Pre-release uniqBy :: Monad m => (a -> a -> Bool) -> Fold m a (Maybe a) -- | See uniqBy. -- -- Definition: -- --
--   >>> uniq = Fold.uniqBy (==)
--   
uniq :: (Monad m, Eq a) => Fold m a (Maybe a) -- | Emit only repeated elements, once. -- -- Unimplemented repeated :: Fold m a (Maybe a) -- | Returns the index of the latest element if the element satisfies the -- given predicate. findIndices :: Monad m => (a -> Bool) -> Fold m a (Maybe Int) -- | Returns the index of the latest element if the element matches the -- given value. -- -- Definition: -- --
--   >>> elemIndices a = Fold.findIndices (== a)
--   
elemIndices :: (Monad m, Eq a) => a -> Fold m a (Maybe Int) -- | Take one element from the stream and stop. -- -- Definition: -- --
--   >>> one = Fold.maybe Just
--   
-- -- This is similar to the stream uncons operation. one :: Monad m => Fold m a (Maybe a) -- | Consume one element, return True if successful else return -- False. In other words, test if the input is empty or not. -- -- WARNING! It consumes one element if the stream is not empty. If that -- is not what you want please use the eof parser instead. -- -- Definition: -- --
--   >>> null = fmap isJust Fold.one
--   
null :: Monad m => Fold m a Bool -- | Consume a single element and return it if it passes the predicate else -- return Nothing. -- -- Definition: -- --
--   >>> satisfy f = Fold.maybe (\a -> if f a then Just a else Nothing)
--   
-- -- Pre-release satisfy :: Monad m => (a -> Bool) -> Fold m a (Maybe a) -- | Consume a single input and transform it using the supplied -- Maybe returning function. -- -- Pre-release maybe :: Monad m => (a -> Maybe b) -> Fold m a (Maybe b) -- | A fold that drains the first n elements of its input, running the -- effects and discarding the results. -- -- Definition: -- --
--   >>> drainN n = Fold.take n Fold.drain
--   
-- -- Pre-release drainN :: Monad m => Int -> Fold m a () -- | Like index, except with a more general Integral argument -- -- Pre-release indexGeneric :: (Integral i, Monad m) => i -> Fold m a (Maybe a) -- | Return the element at the given index. -- -- Definition: -- --
--   >>> index = Fold.indexGeneric
--   
index :: Monad m => Int -> Fold m a (Maybe a) -- | Returns the first element that satisfies the given predicate. -- -- Pre-release findM :: Monad m => (a -> m Bool) -> Fold m a (Maybe a) -- | Returns the first element that satisfies the given predicate. find :: Monad m => (a -> Bool) -> Fold m a (Maybe a) -- | In a stream of (key-value) pairs (a, b), return the value -- b of the first pair where the key equals the given value -- a. -- -- Definition: -- --
--   >>> lookup x = fmap snd <$> Fold.find ((== x) . fst)
--   
lookup :: (Eq a, Monad m) => a -> Fold m (a, b) (Maybe b) -- | Returns the first index that satisfies the given predicate. findIndex :: Monad m => (a -> Bool) -> Fold m a (Maybe Int) -- | Returns the first index where a given value is found in the stream. -- -- Definition: -- --
--   >>> elemIndex a = Fold.findIndex (== a)
--   
elemIndex :: (Eq a, Monad m) => a -> Fold m a (Maybe Int) -- | Return True if the given element is present in the stream. -- -- Definition: -- --
--   >>> elem a = Fold.any (== a)
--   
elem :: (Eq a, Monad m) => a -> Fold m a Bool -- | Returns True if the given element is not present in the stream. -- -- Definition: -- --
--   >>> notElem a = Fold.all (/= a)
--   
notElem :: (Eq a, Monad m) => a -> Fold m a Bool -- | Returns True if all elements of the input satisfy the -- predicate. -- -- Definition: -- --
--   >>> all p = Fold.lmap p Fold.and
--   
-- -- Example: -- --
--   >>> Stream.fold (Fold.all (== 0)) $ Stream.fromList [1,0,1]
--   False
--   
all :: Monad m => (a -> Bool) -> Fold m a Bool -- | Returns True if any element of the input satisfies the -- predicate. -- -- Definition: -- --
--   >>> any p = Fold.lmap p Fold.or
--   
-- -- Example: -- --
--   >>> Stream.fold (Fold.any (== 0)) $ Stream.fromList [1,0,1]
--   True
--   
any :: Monad m => (a -> Bool) -> Fold m a Bool -- | Returns True if all elements are True, False -- otherwise -- -- Definition: -- --
--   >>> and = Fold.all (== True)
--   
and :: Monad m => Fold m Bool Bool -- | Returns True if any element is True, False -- otherwise -- -- Definition: -- --
--   >>> or = Fold.any (== True)
--   
or :: Monad m => Fold m Bool Bool takingEndByM :: Monad m => (a -> m Bool) -> Fold m a (Maybe a) -- |
--   >>> takingEndBy p = Fold.takingEndByM (return . p)
--   
takingEndBy :: Monad m => (a -> Bool) -> Fold m a (Maybe a) takingEndByM_ :: Monad m => (a -> m Bool) -> Fold m a (Maybe a) -- |
--   >>> takingEndBy_ p = Fold.takingEndByM_ (return . p)
--   
takingEndBy_ :: Monad m => (a -> Bool) -> Fold m a (Maybe a) droppingWhileM :: Monad m => (a -> m Bool) -> Fold m a (Maybe a) -- |
--   >>> droppingWhile p = Fold.droppingWhileM (return . p)
--   
droppingWhile :: Monad m => (a -> Bool) -> Fold m a (Maybe a) -- | Strip all leading and trailing occurrences of an element passing a -- predicate and make all other consecutive occurrences uniq. -- --
--   > prune p = Stream.dropWhileAround p $ Stream.uniqBy (x y -> p x && p y)
--   
-- --
--   > Stream.prune isSpace (Stream.fromList "  hello      world!   ")
--   "hello world!"
--   
-- -- Space: O(1) -- -- Unimplemented prune :: (a -> Bool) -> Fold m a (Maybe a) -- | Drive a fold using the supplied Stream, reducing the resulting -- expression strictly at each step. -- -- Definition: -- --
--   >>> drive = flip Stream.fold
--   
-- -- Example: -- --
--   >>> Fold.drive (Stream.enumerateFromTo 1 100) Fold.sum
--   5050
--   
drive :: Monad m => Stream m a -> Fold m a b -> m b -- | Append a stream to a fold to build the fold accumulator incrementally. -- We can repeatedly call addStream on the same fold to continue -- building the fold and finally use drive to finish the fold and -- extract the result. Also see the addOne operation which is a -- singleton version of addStream. -- -- Definitions: -- --
--   >>> addStream stream = Fold.drive stream . Fold.duplicate
--   
-- -- Example, build a list incrementally: -- --
--   >>> :{
--   pure (Fold.toList :: Fold IO Int [Int])
--       >>= Fold.addOne 1
--       >>= Fold.addStream (Stream.enumerateFromTo 2 4)
--       >>= Fold.drive Stream.nil
--       >>= print
--   :}
--   [1,2,3,4]
--   
-- -- This can be used as an O(n) list append compared to the O(n^2) -- ++ when used for incrementally building a list. -- -- Example, build a stream incrementally: -- --
--   >>> :{
--   pure (Fold.toStream :: Fold IO Int (Stream Identity Int))
--       >>= Fold.addOne 1
--       >>= Fold.addStream (Stream.enumerateFromTo 2 4)
--       >>= Fold.drive Stream.nil
--       >>= print
--   :}
--   fromList [1,2,3,4]
--   
-- -- This can be used as an O(n) stream append compared to the O(n^2) -- <> when used for incrementally building a stream. -- -- Example, build an array incrementally: -- --
--   >>> :{
--   pure (Array.write :: Fold IO Int (Array Int))
--       >>= Fold.addOne 1
--       >>= Fold.addStream (Stream.enumerateFromTo 2 4)
--       >>= Fold.drive Stream.nil
--       >>= print
--   :}
--   fromList [1,2,3,4]
--   
-- -- Example, build an array stream incrementally: -- --
--   >>> :{
--   let f :: Fold IO Int (Stream Identity (Array Int))
--       f = Fold.groupsOf 2 (Array.writeN 3) Fold.toStream
--   in pure f
--       >>= Fold.addOne 1
--       >>= Fold.addStream (Stream.enumerateFromTo 2 4)
--       >>= Fold.drive Stream.nil
--       >>= print
--   :}
--   fromList [fromList [1,2],fromList [3,4]]
--   
addStream :: Monad m => Stream m a -> Fold m a b -> m (Fold m a b) -- | Change the predicate function of a Fold from a -> b to -- accept an additional state input (s, a) -> b. Convenient -- to filter with an addiitonal index or time input. -- --
--   >>> filterWithIndex = Fold.with Fold.indexed Fold.filter
--   
-- --
--   filterWithAbsTime = with timestamped filter
--   filterWithRelTime = with timeIndexed filter
--   
-- -- Pre-release with :: (Fold m (s, a) b -> Fold m a b) -> (((s, a) -> c) -> Fold m (s, a) b -> Fold m (s, a) b) -> ((s, a) -> c) -> Fold m a b -> Fold m a b -- | Apply a transformation on a Fold using a Pipe. -- -- Pre-release transform :: Monad m => Pipe m a b -> Fold m b c -> Fold m a c -- | Provide a sliding window of length 2 elements. -- -- See Streamly.Internal.Data.Fold.Window. slide2 :: Monad m => Fold m (a, Maybe a) b -> Fold m a b -- | Scan the input of a Fold to change it in a stateful manner -- using another Fold. The scan stops as soon as the fold -- terminates. -- -- Pre-release scan :: Monad m => Fold m a b -> Fold m b c -> Fold m a c -- | Scan the input of a Fold to change it in a stateful manner -- using another Fold. The scan restarts with a fresh state if the -- fold terminates. -- -- Pre-release scanMany :: Monad m => Fold m a b -> Fold m b c -> Fold m a c -- | Pair each element of a fold input with its index, starting from index -- 0. -- --
--   >>> indexed = Fold.scanMaybe Fold.indexing
--   
indexed :: Monad m => Fold m (Int, a) b -> Fold m a b -- | Zip a stream with the input of a fold using the supplied function. -- -- Unimplemented zipStreamWithM :: (a -> b -> m c) -> Stream m a -> Fold m c x -> Fold m b x -- | Zip a stream with the input of a fold. -- --
--   >>> zip = Fold.zipStreamWithM (curry return)
--   
-- -- Unimplemented zipStream :: Monad m => Stream m a -> Fold m (a, b) x -> Fold m b x -- |
--   >>> mapMaybeM f = Fold.lmapM f . Fold.catMaybes
--   
mapMaybeM :: Monad m => (a -> m (Maybe b)) -> Fold m b r -> Fold m a r -- | mapMaybe f fold maps a Maybe returning function -- f on the input of the fold, filters out Nothing -- elements, and return the values extracted from Just. -- --
--   >>> mapMaybe f = Fold.lmap f . Fold.catMaybes
--   
--   >>> mapMaybe f = Fold.mapMaybeM (return . f)
--   
-- --
--   >>> f x = if even x then Just x else Nothing
--   
--   >>> fld = Fold.mapMaybe f Fold.toList
--   
--   >>> Stream.fold fld (Stream.enumerateFromTo 1 10)
--   [2,4,6,8,10]
--   
mapMaybe :: Monad m => (a -> Maybe b) -> Fold m b r -> Fold m a r -- | sampleFromthen offset stride samples the element at -- offset index and then every element at strides of -- stride. sampleFromthen :: Monad m => Int -> Int -> Fold m a b -> Fold m a b -- | Continue taking the input until the input sequence matches the -- supplied sequence, taking the supplied sequence as well. If the -- pattern is empty this acts as an identity fold. -- --
--   >>> s = Stream.fromList "hello there. How are you?"
--   
--   >>> f = Fold.takeEndBySeq (Array.fromList "re") Fold.toList
--   
--   >>> Stream.fold f s
--   "hello there"
--   
-- --
--   >>> Stream.fold Fold.toList $ Stream.foldMany f s
--   ["hello there",". How are"," you?"]
--   
-- -- Pre-release takeEndBySeq :: forall m a b. (MonadIO m, Storable a, Unbox a, Enum a, Eq a) => Array a -> Fold m a b -> Fold m a b -- | Like takeEndBySeq but discards the matched sequence. -- -- Pre-release takeEndBySeq_ :: forall m a b. (MonadIO m, Storable a, Unbox a, Enum a, Eq a) => Array a -> Fold m a b -> Fold m a b -- | splitAt n f1 f2 composes folds f1 and f2 -- such that first n elements of its input are consumed by fold -- f1 and the rest of the stream is consumed by fold -- f2. -- --
--   >>> let splitAt_ n xs = Stream.fold (Fold.splitAt n Fold.toList Fold.toList) $ Stream.fromList xs
--   
-- --
--   >>> splitAt_ 6 "Hello World!"
--   ("Hello ","World!")
--   
-- --
--   >>> splitAt_ (-1) [1,2,3]
--   ([],[1,2,3])
--   
-- --
--   >>> splitAt_ 0 [1,2,3]
--   ([],[1,2,3])
--   
-- --
--   >>> splitAt_ 1 [1,2,3]
--   ([1],[2,3])
--   
-- --
--   >>> splitAt_ 3 [1,2,3]
--   ([1,2,3],[])
--   
-- --
--   >>> splitAt_ 4 [1,2,3]
--   ([1,2,3],[])
--   
-- --
--   splitAt n f1 f2 = Fold.splitWith (,) (Fold.take n f1) f2
--   
-- -- Internal splitAt :: Monad m => Int -> Fold m a b -> Fold m a c -> Fold m a (b, c) -- | Distribute one copy of the stream to each fold and zip the results. -- --
--                   |-------Fold m a b--------|
--   ---stream m a---|                         |---m (b,c)
--                   |-------Fold m a c--------|
--   
-- -- Definition: -- --
--   >>> tee = Fold.teeWith (,)
--   
-- -- Example: -- --
--   >>> t = Fold.tee Fold.sum Fold.length
--   
--   >>> Stream.fold t (Stream.enumerateFromTo 1.0 100.0)
--   (5050.0,100)
--   
tee :: Monad m => Fold m a b -> Fold m a c -> Fold m a (b, c) -- | Distribute one copy of the stream to each fold and collect the results -- in a container. -- --
--                   |-------Fold m a b--------|
--   ---stream m a---|                         |---m [b]
--                   |-------Fold m a b--------|
--                   |                         |
--                              ...
--   
-- --
--   >>> Stream.fold (Fold.distribute [Fold.sum, Fold.length]) (Stream.enumerateFromTo 1 5)
--   [15,5]
--   
-- --
--   >>> distribute = Prelude.foldr (Fold.teeWith (:)) (Fold.fromPure [])
--   
-- -- This is the consumer side dual of the producer side sequence -- operation. -- -- Stops when all the folds stop. distribute :: Monad m => [Fold m a b] -> Fold m a [b] -- | Send the elements of tuples in a stream of tuples through two -- different folds. -- --
--                             |-------Fold m a x--------|
--   ---------stream of (a,b)--|                         |----m (x,y)
--                             |-------Fold m b y--------|
--   
-- -- Definition: -- --
--   >>> unzip = Fold.unzipWith id
--   
-- -- This is the consumer side dual of the producer side zip -- operation. unzip :: Monad m => Fold m a x -> Fold m b y -> Fold m (a, b) (x, y) -- | Split elements in the input stream into two parts using a pure -- splitter function, direct each part to a different fold and zip the -- results. -- -- Definitions: -- --
--   >>> unzipWith f = Fold.unzipWithM (return . f)
--   
--   >>> unzipWith f fld1 fld2 = Fold.lmap f (Fold.unzip fld1 fld2)
--   
-- -- This fold terminates when both the input folds terminate. -- -- Pre-release unzipWith :: Monad m => (a -> (b, c)) -> Fold m b x -> Fold m c y -> Fold m a (x, y) -- | Like unzipWith but with a monadic splitter function. -- -- Definition: -- --
--   >>> unzipWithM k f1 f2 = Fold.lmapM k (Fold.unzip f1 f2)
--   
-- -- Pre-release unzipWithM :: Monad m => (a -> m (b, c)) -> Fold m b x -> Fold m c y -> Fold m a (x, y) -- | Similar to unzipWithM but terminates when the first fold -- terminates. unzipWithFstM :: Monad m => (a -> m (b, c)) -> Fold m b x -> Fold m c y -> Fold m a (x, y) -- | Similar to unzipWithM but terminates when any fold terminates. unzipWithMinM :: Monad m => (a -> m (b, c)) -> Fold m b x -> Fold m c y -> Fold m a (x, y) -- | Partition the input over two folds using an Either partitioning -- predicate. -- --
--                                       |-------Fold b x--------|
--   -----stream m a --> (Either b c)----|                       |----(x,y)
--                                       |-------Fold c y--------|
--   
-- -- Example, send input to either fold randomly: -- --
--   >>> :set -package random
--   
--   >>> import System.Random (randomIO)
--   
--   >>> randomly a = randomIO >>= \x -> return $ if x then Left a else Right a
--   
--   >>> f = Fold.partitionByM randomly Fold.length Fold.length
--   
--   >>> Stream.fold f (Stream.enumerateFromTo 1 100)
--   ...
--   
-- -- Example, send input to the two folds in a proportion of 2:1: -- --
--   >>> :{
--   proportionately m n = do
--    ref <- newIORef $ cycle $ concat [replicate m Left, replicate n Right]
--    return $ \a -> do
--        r <- readIORef ref
--        writeIORef ref $ tail r
--        return $ Prelude.head r a
--   :}
--   
-- --
--   >>> :{
--   main = do
--    g <- proportionately 2 1
--    let f = Fold.partitionByM g Fold.length Fold.length
--    r <- Stream.fold f (Stream.enumerateFromTo (1 :: Int) 100)
--    print r
--   :}
--   
-- --
--   >>> main
--   (67,33)
--   
-- -- This is the consumer side dual of the producer side mergeBy -- operation. -- -- When one fold is done, any input meant for it is ignored until the -- other fold is also done. -- -- Stops when both the folds stop. -- -- See also: partitionByFstM and partitionByMinM. -- -- Pre-release partitionByM :: Monad m => (a -> m (Either b c)) -> Fold m b x -> Fold m c y -> Fold m a (x, y) -- | Similar to partitionByM but terminates when the first fold -- terminates. partitionByFstM :: Monad m => (a -> m (Either b c)) -> Fold m b x -> Fold m c y -> Fold m a (x, y) -- | Similar to partitionByM but terminates when any fold -- terminates. partitionByMinM :: Monad m => (a -> m (Either b c)) -> Fold m b x -> Fold m c y -> Fold m a (x, y) -- | Same as partitionByM but with a pure partition function. -- -- Example, count even and odd numbers in a stream: -- --
--   >>> :{
--    let f = Fold.partitionBy (\n -> if even n then Left n else Right n)
--                        (fmap (("Even " ++) . show) Fold.length)
--                        (fmap (("Odd "  ++) . show) Fold.length)
--     in Stream.fold f (Stream.enumerateFromTo 1 100)
--   :}
--   ("Even 50","Odd 50")
--   
-- -- Pre-release partitionBy :: Monad m => (a -> Either b c) -> Fold m b x -> Fold m c y -> Fold m a (x, y) -- | Compose two folds such that the combined fold accepts a stream of -- Either and routes the Left values to the first fold and -- Right values to the second fold. -- -- Definition: -- --
--   >>> partition = Fold.partitionBy id
--   
partition :: Monad m => Fold m b x -> Fold m c y -> Fold m (Either b c) (x, y) -- | Group the input stream into groups of elements between low -- and high. Collection starts in chunks of low and -- then keeps doubling until we reach high. Each chunk is folded -- using the provided fold function. -- -- This could be useful, for example, when we are folding a stream of -- unknown size to a stream of arrays and we want to minimize the number -- of allocations. -- -- NOTE: this would be an application of "many" using a terminating fold. -- -- Unimplemented chunksBetween :: Int -> Int -> Fold m a b -> Fold m b c -> Fold m a c intersperseWithQuotes :: (Monad m, Eq a) => a -> a -> a -> Fold m a b -> Fold m b c -> Fold m a c -- | Unfold and flatten the input stream of a fold. -- --
--   Stream.fold (unfoldMany u f) = Stream.fold f . Stream.unfoldMany u
--   
-- -- Pre-release unfoldMany :: Monad m => Unfold m a b -> Fold m b c -> Fold m a c -- | concatSequence f t applies folds from stream t -- sequentially and collects the results using the fold f. -- -- Unimplemented concatSequence :: Fold m b c -> t (Fold m a b) -> Fold m a c -- | Deprecated: Please use drainMapM instead. drainBy :: Monad m => (a -> m b) -> Fold m a () -- | Deprecated: Please use latest instead. last :: Monad m => Fold m a (Maybe a) -- | Extract the first element of the stream, if any. -- --
--   >>> head = Fold.one
--   
-- | Deprecated: Please use "one" instead head :: Monad m => Fold m a (Maybe a) -- | Flatten the monadic output of a fold to pure output. -- | Deprecated: Use "rmapM id" instead sequence :: Monad m => Fold m a (m b) -> Fold m a b -- | Map a monadic function on the output of a fold. -- | Deprecated: Use rmapM instead mapM :: Monad m => (b -> m c) -> Fold m a b -> Fold m a c -- | Compute a numerically stable (population) variance over all elements -- in the input stream. -- | Deprecated: Use the streamly-statistics package instead variance :: (Monad m, Fractional a) => Fold m a a -- | Compute a numerically stable (population) standard deviation over all -- elements in the input stream. -- | Deprecated: Use the streamly-statistics package instead stdDev :: (Monad m, Floating a) => Fold m a a -- | Fold the input to a set. -- -- Definition: -- --
--   >>> toSet = Fold.foldl' (flip Set.insert) Set.empty
--   
toSet :: (Monad m, Ord a) => Fold m a (Set a) -- | Fold the input to an int set. For integer inputs this performs better -- than toSet. -- -- Definition: -- --
--   >>> toIntSet = Fold.foldl' (flip IntSet.insert) IntSet.empty
--   
toIntSet :: Monad m => Fold m Int IntSet -- | Count non-duplicate elements in the stream. -- -- Definition: -- --
--   >>> countDistinct = fmap Set.size Fold.toSet
--   
--   >>> countDistinct = Fold.postscan Fold.nub $ Fold.catMaybes $ Fold.length
--   
-- -- The memory used is proportional to the number of distinct elements in -- the stream, to guard against using too much memory use it as a scan -- and terminate if the count reaches more than a threshold. -- -- Space: <math> -- -- Pre-release countDistinct :: (Monad m, Ord a) => Fold m a Int -- | Like countDistinct but specialized to a stream of Int, -- for better performance. -- -- Definition: -- --
--   >>> countDistinctInt = fmap IntSet.size Fold.toIntSet
--   
--   >>> countDistinctInt = Fold.postscan Fold.nubInt $ Fold.catMaybes $ Fold.length
--   
-- -- Pre-release countDistinctInt :: Monad m => Fold m Int Int -- | Used as a scan. Returns Just for the first occurrence of an -- element, returns Nothing for any other occurrences. -- -- Example: -- --
--   >>> stream = Stream.fromList [1::Int,1,2,3,4,4,5,1,5,7]
--   
--   >>> Stream.fold Fold.toList $ Stream.scanMaybe Fold.nub stream
--   [1,2,3,4,5,7]
--   
-- -- Pre-release nub :: (Monad m, Ord a) => Fold m a (Maybe a) -- | Like nub but specialized to a stream of Int, for better -- performance. -- -- Pre-release nubInt :: Monad m => Fold m Int (Maybe Int) -- | Determine the frequency of each element in the stream. -- -- You can just collect the keys of the resulting map to get the unique -- elements in the stream. -- -- Definition: -- --
--   >>> frequency = Fold.toMap id Fold.length
--   
frequency :: (Monad m, Ord a) => Fold m a (Map a Int) demuxToContainer :: (Monad m, IsMap f, Traversable f) => (a -> Key f) -> (a -> m (Fold m a b)) -> Fold m a (f b) demuxToContainerIO :: (MonadIO m, IsMap f, Traversable f) => (a -> Key f) -> (a -> m (Fold m a b)) -> Fold m a (f b) -- | This collects all the results of demux in a Map. demuxToMap :: (Monad m, Ord k) => (a -> k) -> (a -> m (Fold m a b)) -> Fold m a (Map k b) -- | Same as demuxToMap but uses demuxIO for better -- performance. demuxToMapIO :: (MonadIO m, Ord k) => (a -> k) -> (a -> m (Fold m a b)) -> Fold m a (Map k b) demuxKvToContainer :: (Monad m, IsMap f, Traversable f) => (Key f -> m (Fold m a b)) -> Fold m (Key f, a) (f b) -- | Fold a stream of key value pairs using a function that maps keys to -- folds. -- -- Definition: -- --
--   >>> demuxKvToMap f = Fold.demuxToContainer fst (Fold.lmap snd . f)
--   
-- -- Example: -- --
--   >>> import Data.Map (Map)
--   
--   >>> :{
--    let f "SUM" = return Fold.sum
--        f _ = return Fold.product
--        input = Stream.fromList [("SUM",1),("PRODUCT",2),("SUM",3),("PRODUCT",4)]
--     in Stream.fold (Fold.demuxKvToMap f) input :: IO (Map String Int)
--   :}
--   fromList [("PRODUCT",8),("SUM",4)]
--   
-- -- Pre-release demuxKvToMap :: (Monad m, Ord k) => (k -> m (Fold m a b)) -> Fold m (k, a) (Map k b) -- | This is the most general of all demux, classify operations. -- -- See demux for documentation. demuxGeneric :: (Monad m, IsMap f, Traversable f) => (a -> Key f) -> (a -> m (Fold m a b)) -> Fold m a (m (f b), Maybe (Key f, b)) -- | demux getKey getFold: In a key value stream, fold values -- corresponding to each key using a key specific fold. getFold -- is invoked to generate a key specific fold when a key is encountered -- for the first time in the stream. -- -- The first component of the output tuple is a key-value Map of -- in-progress folds. The fold returns the fold result as the second -- component of the output tuple whenever a fold terminates. -- -- If a fold terminates, another instance of the fold is started upon -- receiving an input with that key, getFold is invoked again -- whenever the key is encountered again. -- -- This can be used to scan a stream and collect the results from the -- scan output. -- -- Since the fold generator function is monadic we can add folds -- dynamically. For example, we can maintain a Map of keys to folds in an -- IORef and lookup the fold from that corresponding to a key. This Map -- can be changed dynamically, folds for new keys can be added or folds -- for old keys can be deleted or modified. -- -- Compare with classify, the fold in classify is a static -- fold. -- -- Pre-release demux :: (Monad m, Ord k) => (a -> k) -> (a -> m (Fold m a b)) -> Fold m a (m (Map k b), Maybe (k, b)) -- | This is specialized version of demuxGeneric that uses mutable -- IO cells as fold accumulators for better performance. demuxGenericIO :: (MonadIO m, IsMap f, Traversable f) => (a -> Key f) -> (a -> m (Fold m a b)) -> Fold m a (m (f b), Maybe (Key f, b)) -- | This is specialized version of demux that uses mutable IO cells -- as fold accumulators for better performance. -- -- Keep in mind that the values in the returned Map may be changed by the -- ongoing fold if you are using those concurrently in another thread. demuxIO :: (MonadIO m, Ord k) => (a -> k) -> (a -> m (Fold m a b)) -> Fold m a (m (Map k b), Maybe (k, b)) -- | Given an input stream of key value pairs and a fold for values, fold -- all the values belonging to each key. Useful for map/reduce, -- bucketizing the input in different bins or for generating histograms. -- -- Definition: -- --
--   >>> kvToMap = Fold.toMap fst . Fold.lmap snd
--   
-- -- Example: -- --
--   >>> :{
--    let input = Stream.fromList [("ONE",1),("ONE",1.1),("TWO",2), ("TWO",2.2)]
--     in Stream.fold (Fold.kvToMap Fold.toList) input
--   :}
--   fromList [("ONE",[1.0,1.1]),("TWO",[2.0,2.2])]
--   
-- -- Pre-release kvToMap :: (Monad m, Ord k) => Fold m a b -> Fold m (k, a) (Map k b) toContainer :: (Monad m, IsMap f, Traversable f, Ord (Key f)) => (a -> Key f) -> Fold m a b -> Fold m a (f b) toContainerIO :: (MonadIO m, IsMap f, Traversable f, Ord (Key f)) => (a -> Key f) -> Fold m a b -> Fold m a (f b) -- | Split the input stream based on a key field and fold each split using -- the given fold. Useful for map/reduce, bucketizing the input in -- different bins or for generating histograms. -- -- Example: -- --
--   >>> import Data.Map.Strict (Map)
--   
--   >>> :{
--    let input = Stream.fromList [("ONE",1),("ONE",1.1),("TWO",2), ("TWO",2.2)]
--        classify = Fold.toMap fst (Fold.lmap snd Fold.toList)
--     in Stream.fold classify input :: IO (Map String [Double])
--   :}
--   fromList [("ONE",[1.0,1.1]),("TWO",[2.0,2.2])]
--   
-- -- Once the classifier fold terminates for a particular key any further -- inputs in that bucket are ignored. -- -- Space used is proportional to the number of keys seen till now and -- monotonically increases because it stores whether a key has been seen -- or not. -- -- See demuxToMap for a more powerful version where you can use a -- different fold for each key. A simpler version of toMap -- retaining only the last value for a key can be written as: -- --
--   >>> toMap = Fold.foldl' (\kv (k, v) -> Map.insert k v kv) Map.empty
--   
-- -- Stops: never -- -- Pre-release toMap :: (Monad m, Ord k) => (a -> k) -> Fold m a b -> Fold m a (Map k b) -- | Same as toMap but maybe faster because it uses mutable cells as -- fold accumulators in the Map. toMapIO :: (MonadIO m, Ord k) => (a -> k) -> Fold m a b -> Fold m a (Map k b) classifyGeneric :: (Monad m, IsMap f, Traversable f, Ord (Key f)) => (a -> Key f) -> Fold m a b -> Fold m a (m (f b), Maybe (Key f, b)) -- | Folds the values for each key using the supplied fold. When scanning, -- as soon as the fold is complete, its result is available in the second -- component of the tuple. The first component of the tuple is a snapshot -- of the in-progress folds. -- -- Once the fold for a key is done, any future values of the key are -- ignored. -- -- Definition: -- --
--   >>> classify f fld = Fold.demux f (const fld)
--   
classify :: (Monad m, Ord k) => (a -> k) -> Fold m a b -> Fold m a (m (Map k b), Maybe (k, b)) classifyGenericIO :: (MonadIO m, IsMap f, Traversable f, Ord (Key f)) => (a -> Key f) -> Fold m a b -> Fold m a (m (f b), Maybe (Key f, b)) -- | Same as classify except that it uses mutable IORef cells in the Map -- providing better performance. Be aware that if this is used as a scan, -- the values in the intermediate Maps would be mutable. -- -- Definitions: -- --
--   >>> classifyIO f fld = Fold.demuxIO f (const fld)
--   
classifyIO :: (MonadIO m, Ord k) => (a -> k) -> Fold m a b -> Fold m a (m (Map k b), Maybe (k, b)) -- | Map a function on the incoming as well as outgoing element of a -- rolling window fold. -- --
--   >>> lmap f = Fold.lmap (bimap f (f <$>))
--   
windowLmap :: (c -> a) -> Fold m (a, Maybe a) b -> Fold m (c, Maybe c) b -- | Convert an incremental fold to a cumulative fold using the entire -- input stream as a single window. -- --
--   >>> cumulative f = Fold.lmap (\x -> (x, Nothing)) f
--   
cumulative :: Fold m (a, Maybe a) b -> Fold m a b -- | Apply a pure function on the latest and the oldest element of the -- window. -- --
--   >>> windowRollingMap f = Fold.windowRollingMapM (\x y -> return $ f x y)
--   
windowRollingMap :: Monad m => (Maybe a -> a -> Maybe b) -> Fold m (a, Maybe a) (Maybe b) -- | Apply an effectful function on the latest and the oldest element of -- the window. windowRollingMapM :: Monad m => (Maybe a -> a -> m (Maybe b)) -> Fold m (a, Maybe a) (Maybe b) -- | The number of elements in the rolling window. -- -- This is the <math>th power sum. -- --
--   >>> length = powerSum 0
--   
windowLength :: (Monad m, Num b) => Fold m (a, Maybe a) b -- | Sum of all the elements in a rolling window: -- -- <math> -- -- This is the first power sum. -- --
--   >>> sum = powerSum 1
--   
-- -- Uses Kahan-Babuska-Neumaier style summation for numerical stability of -- floating precision arithmetic. -- -- Space: <math> -- -- Time: <math> windowSum :: forall m a. (Monad m, Num a) => Fold m (a, Maybe a) a -- | The sum of all the elements in a rolling window. The input elements -- are required to be intergal numbers. -- -- This was written in the hope that it would be a tiny bit faster than -- sum for Integral values. But turns out that -- sum is 2% faster than this even for intergal values! -- -- Internal windowSumInt :: forall m a. (Monad m, Integral a) => Fold m (a, Maybe a) a -- | Sum of the <math>th power of all the elements in a rolling -- window: -- -- <math> -- --
--   >>> powerSum k = lmap (^ k) sum
--   
-- -- Space: <math> -- -- Time: <math> windowPowerSum :: (Monad m, Num a) => Int -> Fold m (a, Maybe a) a -- | Like powerSum but powers can be negative or fractional. This -- is slower than powerSum for positive intergal powers. -- --
--   >>> powerSumFrac p = lmap (** p) sum
--   
windowPowerSumFrac :: (Monad m, Floating a) => a -> Fold m (a, Maybe a) a -- | Find the minimum element in a rolling window. -- -- This implementation traverses the entire window buffer to compute the -- minimum whenever we demand it. It performs better than the dequeue -- based implementation in streamly-statistics package when the -- window size is small (< 30). -- -- If you want to compute the minimum of the entire stream minimum -- is much faster. -- -- Time: <math> where <math> is the window size. windowMinimum :: (MonadIO m, Storable a, Ord a) => Int -> Fold m a (Maybe a) -- | The maximum element in a rolling window. -- -- See the performance related comments in minimum. -- -- If you want to compute the maximum of the entire stream maximum -- would be much faster. -- -- Time: <math> where <math> is the window size. windowMaximum :: (MonadIO m, Storable a, Ord a) => Int -> Fold m a (Maybe a) -- | Determine the maximum and minimum in a rolling window. -- -- If you want to compute the range of the entire stream Fold.teeWith -- (,) Fold.maximum Fold.minimum would be much faster. -- -- Space: <math> where n is the window size. -- -- Time: <math> where <math> is the window size. windowRange :: (MonadIO m, Storable a, Ord a) => Int -> Fold m a (Maybe (a, a)) -- | Arithmetic mean of elements in a sliding window: -- -- <math> -- -- This is also known as the Simple Moving Average (SMA) when used in the -- sliding window and Cumulative Moving Avergae (CMA) when used on the -- entire stream. -- --
--   >>> mean = Fold.teeWith (/) sum length
--   
-- -- Space: <math> -- -- Time: <math> windowMean :: forall m a. (Monad m, Fractional a) => Fold m (a, Maybe a) a module Streamly.Internal.Data.MutArray -- | An unboxed mutable array. An array is created with a given length and -- capacity. Length is the number of valid elements in the array. -- Capacity is the maximum number of elements that the array can be -- expanded to without having to reallocate the memory. -- -- The elements in the array can be mutated in-place without changing the -- reference (constructor). However, the length of the array cannot be -- mutated in-place. A new array reference is generated when the length -- changes. When the length is increased (upto the maximum reserved -- capacity of the array), the array is not reallocated and the new -- reference uses the same underlying memory as the old one. -- -- Several routines in this module allow the programmer to control the -- capacity of the array. The programmer can control the trade-off -- between memory usage and performance impact due to reallocations when -- growing or shrinking the array. data MutArray a MutArray :: {-# UNPACK #-} !MutByteArray -> {-# UNPACK #-} !Int -> {-# UNPACK #-} !Int -> {-# UNPACK #-} !Int -> MutArray a [arrContents] :: MutArray a -> {-# UNPACK #-} !MutByteArray -- | index into arrContents [arrStart] :: MutArray a -> {-# UNPACK #-} !Int -- | index into arrContents Represents the first invalid index of the -- array. [arrEnd] :: MutArray a -> {-# UNPACK #-} !Int -- | first invalid index of arrContents. [arrBound] :: MutArray a -> {-# UNPACK #-} !Int -- | Return a copy of the array in pinned memory if unpinned, else return -- the original array. pin :: MutArray a -> IO (MutArray a) -- | Return a copy of the array in unpinned memory if pinned, else return -- the original array. unpin :: MutArray a -> IO (MutArray a) -- | Return True if the array is allocated in pinned memory. isPinned :: MutArray a -> Bool -- | Cast an array having elements of type a into an array having -- elements of type b. The length of the array should be a -- multiple of the size of the target element otherwise Nothing is -- returned. cast :: forall a b. Unbox b => MutArray a -> Maybe (MutArray b) -- | Cast an array having elements of type a into an array having -- elements of type b. The array size must be a multiple of the -- size of type b otherwise accessing the last element of the -- array may result into a crash or a random value. -- -- Pre-release castUnsafe :: MutArray a -> MutArray b -- | Cast an MutArray a into an MutArray Word8. asBytes :: MutArray a -> MutArray Word8 -- | Use a MutArray a as Ptr a. This is useful when we -- want to pass an array as a pointer to some operating system call or to -- a "safe" FFI call. -- -- If the array is not pinned it is copied to pinned memory before -- passing it to the monadic action. -- -- Performance Notes: Forces a copy if the array is not pinned. It -- is advised that the programmer keeps this in mind and creates a pinned -- array opportunistically before this operation occurs, to avoid the -- cost of a copy if possible. -- -- Unsafe because of direct pointer operations. The user must -- ensure that they are writing within the legal bounds of the array. -- -- Pre-release unsafePinnedAsPtr :: MonadIO m => MutArray a -> (Ptr a -> m b) -> m b unsafeAsPtr :: MonadIO m => MutArray a -> (Ptr a -> m b) -> m b -- | Create an empty array. empty :: MutArray a -- | Allocates an unpinned array of zero length but growable to the -- specified capacity without reallocation. emptyOf :: (MonadIO m, Unbox a) => Int -> m (MutArray a) -- | newArrayWith allocator alignment count allocates a new array -- of zero length and with a capacity to hold count elements, -- using allocator size alignment as the memory allocator -- function. -- -- Alignment must be greater than or equal to machine word size and a -- power of 2. -- -- Alignment is ignored if the allocator allocates unpinned memory. -- -- Pre-release newArrayWith :: forall m a. (MonadIO m, Unbox a) => (Int -> Int -> m MutByteArray) -> Int -> Int -> m (MutArray a) -- | Allocates a pinned array of zero length but growable to the specified -- capacity without reallocation. pinnedEmptyOf :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) -- | Like newArrayWith but using an allocator is a pinned memory -- allocator and the alignment is dictated by the Unboxed -- instance of the type. -- -- Internal pinnedNewAligned :: (MonadIO m, Unbox a) => Int -> Int -> m (MutArray a) clone :: MonadIO m => MutArray a -> m (MutArray a) pinnedClone :: MonadIO m => MutArray a -> m (MutArray a) -- | O(1) Slice an array in constant time. -- -- Unsafe: The bounds of the slice are not checked. -- -- Unsafe -- -- Pre-release getSliceUnsafe :: forall a. Unbox a => Int -> Int -> MutArray a -> MutArray a -- | O(1) Slice an array in constant time. Throws an error if the -- slice extends out of the array bounds. -- -- Pre-release getSlice :: forall a. Unbox a => Int -> Int -> MutArray a -> MutArray a -- | Create two slices of an array without copying the original array. The -- specified index i is the first index of the second slice. splitAt :: forall a. Unbox a => Int -> MutArray a -> (MutArray a, MutArray a) -- | Drops the separator byte breakOn :: MonadIO m => Word8 -> MutArray Word8 -> m (MutArray Word8, Maybe (MutArray Word8)) data ArrayUnsafe a ArrayUnsafe :: {-# UNPACK #-} !MutByteArray -> {-# UNPACK #-} !Int -> {-# UNPACK #-} !Int -> ArrayUnsafe a -- | Like unsafeCreateOf but takes a new array allocator alloc -- size function as argument. -- --
--   >>> unsafeCreateOfWith alloc n = MutArray.unsafeAppendN (alloc n) n
--   
-- -- Pre-release unsafeCreateOfWith :: forall m a. (MonadIO m, Unbox a) => (Int -> m (MutArray a)) -> Int -> Fold m a (MutArray a) -- | Like createOf but does not check the array bounds when writing. -- The fold driver must not call the step function more than n -- times otherwise it will corrupt the memory and crash. This function -- exists mainly because any conditional in the step function blocks -- fusion causing 10x performance slowdown. -- --
--   >>> unsafeCreateOf = MutArray.unsafeCreateOfWith MutArray.emptyOf
--   
unsafeCreateOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | Like unsafeCreateOf but creates a pinned array. unsafePinnedCreateOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | Like createOf but creates a pinned array. pinnedCreateOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | createOfWith alloc n folds a maximum of n elements -- into an array allocated using the alloc function. -- --
--   >>> createOfWith alloc n = Fold.take n (MutArray.unsafeCreateOfWith alloc n)
--   
--   >>> createOfWith alloc n = MutArray.appendN (alloc n) n
--   
createOfWith :: forall m a. (MonadIO m, Unbox a) => (Int -> m (MutArray a)) -> Int -> Fold m a (MutArray a) -- | createOf n folds a maximum of n elements from the -- input stream to an MutArray. -- --
--   >>> createOf = MutArray.createOfWith MutArray.new
--   
--   >>> createOf n = Fold.take n (MutArray.unsafeCreateOf n)
--   
--   >>> createOf n = MutArray.appendN n (MutArray.emptyOf n)
--   
createOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | Like createOf but writes the array in reverse order. -- -- Pre-release revCreateOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | Like create but creates a pinned array. pinnedCreate :: forall m a. (MonadIO m, Unbox a) => Fold m a (MutArray a) -- | createWith minCount folds the whole input to a single array. -- The array starts at a size big enough to hold minCount elements, the -- size is doubled every time the array needs to be grown. -- -- Caution! Do not use this on infinite streams. -- --
--   >>> f n = MutArray.appendWith (* 2) (MutArray.emptyOf n)
--   
--   >>> createWith n = Fold.rmapM MutArray.rightSize (f n)
--   
--   >>> createWith n = Fold.rmapM MutArray.fromChunksK (MutArray.buildChunks n)
--   
-- -- Pre-release createWith :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | Fold the whole input to a single array. -- -- Same as createWith using an initial array size of -- arrayChunkBytes bytes rounded up to the element size. -- -- Caution! Do not use this on infinite streams. create :: forall m a. (MonadIO m, Unbox a) => Fold m a (MutArray a) -- | Create a MutArray from the first N elements of a list. The -- array is allocated to size N, if the list terminates before N elements -- then the array may hold less than N elements. fromListN :: (MonadIO m, Unbox a) => Int -> [a] -> m (MutArray a) -- | Like fromListN but creates a pinned array. pinnedFromListN :: (MonadIO m, Unbox a) => Int -> [a] -> m (MutArray a) -- | Create a MutArray from a list. The list must be of finite size. fromList :: (MonadIO m, Unbox a) => [a] -> m (MutArray a) -- | Like fromList but creates a pinned array. pinnedFromList :: (MonadIO m, Unbox a) => [a] -> m (MutArray a) -- | Like fromListN but writes the array in reverse order. -- -- Pre-release fromListRevN :: (MonadIO m, Unbox a) => Int -> [a] -> m (MutArray a) -- | Like fromList but writes the contents of the list in reverse -- order. fromListRev :: (MonadIO m, Unbox a) => [a] -> m (MutArray a) -- | Use the createOf fold instead. -- --
--   >>> fromStreamN n = Stream.fold (MutArray.createOf n)
--   
fromStreamN :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> m (MutArray a) -- | Create an Array from a stream. This is useful when we want to -- create a single array from a stream of unknown size. createOf -- is at least twice as efficient when the size is already known. -- -- Note that if the input stream is too large memory allocation for the -- array may fail. When the stream size is not known, chunksOf -- followed by processing of indvidual arrays in the resulting stream -- should be preferred. -- -- Pre-release fromStream :: (MonadIO m, Unbox a) => Stream m a -> m (MutArray a) -- | Convert a pure stream in Identity monad to a mutable array. fromPureStreamN :: (MonadIO m, Unbox a) => Int -> Stream Identity a -> m (MutArray a) -- | Convert a pure stream in Identity monad to a mutable array. fromPureStream :: (MonadIO m, Unbox a) => Stream Identity a -> m (MutArray a) fromByteStr# :: MonadIO m => Addr# -> m (MutArray Word8) fromPtrN :: MonadIO m => Int -> Ptr Word8 -> m (MutArray Word8) -- | Convert an array stream to an array. Note that this requires peak -- memory that is double the size of the array stream. -- -- Also see fromChunksRealloced. fromChunksK :: (Unbox a, MonadIO m) => StreamK m (MutArray a) -> m (MutArray a) -- | Also see fromChunksK. fromChunksRealloced :: forall m a. (MonadIO m, Unbox a) => Stream m (MutArray a) -> m (MutArray a) -- | O(1) Write the given element at the given index in the array. -- Performs in-place mutation of the array. -- --
--   >>> putIndex ix arr val = MutArray.modifyIndex ix arr (const (val, ()))
--   
--   >>> f = MutArray.putIndices
--   
--   >>> putIndex ix arr val = Stream.fold (f arr) (Stream.fromPure (ix, val))
--   
putIndex :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> a -> m () -- | Write the given element to the given index of the array. Does not -- check if the index is out of bounds of the array. -- -- Pre-release putIndexUnsafe :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> a -> m () -- | Write an input stream of (index, value) pairs to an array. Throws an -- error if any index is out of bounds. -- -- Pre-release putIndices :: forall m a. (MonadIO m, Unbox a) => MutArray a -> Fold m (Int, a) () -- | Modify a given index of an array using a modifier function. -- -- Unsafe because it does not check the bounds of the array. -- -- Pre-release modifyIndexUnsafe :: forall m a b. (MonadIO m, Unbox a) => Int -> MutArray a -> (a -> (a, b)) -> m b -- | Modify a given index of an array using a modifier function. -- -- Pre-release modifyIndex :: forall m a b. (MonadIO m, Unbox a) => Int -> MutArray a -> (a -> (a, b)) -> m b -- | Modify the array indices generated by the supplied stream. -- -- Pre-release modifyIndices :: forall m a. (MonadIO m, Unbox a) => MutArray a -> (Int -> a -> a) -> Fold m Int () -- | Modify each element of an array using the supplied modifier function. -- -- This is an in-place equivalent of an immutable map operation. -- -- Pre-release modify :: forall m a. (MonadIO m, Unbox a) => MutArray a -> (a -> a) -> m () -- | Swap the elements at two indices. -- -- Pre-release swapIndices :: forall m a. (MonadIO m, Unbox a) => Int -> Int -> MutArray a -> m () -- | Swap the elements at two indices without validating the indices. -- -- Unsafe: This could result in memory corruption if indices are -- not valid. -- -- Pre-release unsafeSwapIndices :: forall m a. (MonadIO m, Unbox a) => Int -> Int -> MutArray a -> m () -- | O(1) Lookup the element at the given index. Index starts from -- 0. getIndex :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> m (Maybe a) -- | Return the element at the specified index without checking the bounds. -- -- Unsafe because it does not check the bounds of the array. getIndexUnsafe :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> m a -- | O(1) Lookup the element at the given index from the end of the -- array. Index starts from 0. -- -- Slightly faster than computing the forward index and using getIndex. getIndexRev :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> m a -- | Given an unfold that generates array indices, read the elements on -- those indices from the supplied MutArray. An error is thrown if an -- index is out of bounds. -- -- Pre-release indexReader :: (MonadIO m, Unbox a) => Stream m Int -> Unfold m (MutArray a) a indexReaderWith :: (Monad m, Unbox a) => (forall b. IO b -> m b) -> Stream m Int -> Unfold m (MutArray a) a -- | Convert a MutArray into a stream. -- --
--   >>> read = Stream.unfold MutArray.reader
--   
read :: forall m a. (MonadIO m, Unbox a) => MutArray a -> Stream m a -- | Convert a MutArray into a stream in reverse order. -- --
--   >>> readRev = Stream.unfold MutArray.readerRev
--   
readRev :: forall m a. (MonadIO m, Unbox a) => MutArray a -> Stream m a toStreamWith :: forall m a. (Monad m, Unbox a) => (forall b. IO b -> m b) -> MutArray a -> Stream m a toStreamRevWith :: forall m a. (Monad m, Unbox a) => (forall b. IO b -> m b) -> MutArray a -> Stream m a toStreamK :: forall m a. (MonadIO m, Unbox a) => MutArray a -> StreamK m a toStreamKWith :: forall m a. (Monad m, Unbox a) => (forall b. IO b -> m b) -> MutArray a -> StreamK m a toStreamKRev :: forall m a. (MonadIO m, Unbox a) => MutArray a -> StreamK m a toStreamKRevWith :: forall m a. (Monad m, Unbox a) => (forall b. IO b -> m b) -> MutArray a -> StreamK m a -- | Convert a MutArray into a list. toList :: forall m a. (MonadIO m, Unbox a) => MutArray a -> m [a] producerWith :: forall m a. (Monad m, Unbox a) => (forall b. IO b -> m b) -> Producer m (MutArray a) a -- | Resumable unfold of an array. producer :: forall m a. (MonadIO m, Unbox a) => Producer m (MutArray a) a -- | Unfold an array into a stream. reader :: forall m a. (MonadIO m, Unbox a) => Unfold m (MutArray a) a readerRevWith :: forall m a. (Monad m, Unbox a) => (forall b. IO b -> m b) -> Unfold m (MutArray a) a -- | Unfold an array into a stream in reverse order. readerRev :: forall m a. (MonadIO m, Unbox a) => Unfold m (MutArray a) a -- | O(1) Get the length of the array i.e. the number of elements in -- the array. -- -- Note that byteLength is less expensive than this operation, as -- length involves a costly division operation. length :: forall a. Unbox a => MutArray a -> Int -- | O(1) Get the byte length of the array. byteLength :: MutArray a -> Int -- | Get the total capacity of an array. An array may have space reserved -- beyond the current used length of the array. -- -- Pre-release byteCapacity :: MutArray a -> Int -- | The remaining capacity in the array for appending more elements -- without reallocation. -- -- Pre-release bytesFree :: MutArray a -> Int -- | The page or block size used by the GHC allocator. Allocator allocates -- at least a block and then allocates smaller allocations from within a -- block. blockSize :: Int -- | The default chunk size by which the array creation routines increase -- the size of the array when the array is grown linearly. arrayChunkBytes :: Int -- | Given an Unboxed type (unused first arg) and real allocation -- size (including overhead), return how many elements of that type will -- completely fit in it, returns at least 1. allocBytesToElemCount :: Unbox a => a -> Int -> Int -- | realloc newCapacity array reallocates the array to the -- specified capacity in bytes. -- -- If the new size is less than the original array the array gets -- truncated. If the new size is not a multiple of array element size -- then it is rounded down to multiples of array size. If the new size is -- more than largeObjectThreshold then it is rounded up to the -- block size (4K). -- -- If the original array is pinned, the newly allocated array is also -- pinned. realloc :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> m (MutArray a) -- | grow newCapacity array changes the total capacity of the -- array so that it is enough to hold the specified number of elements. -- Nothing is done if the specified capacity is less than the length of -- the array. -- -- If the capacity is more than largeObjectThreshold then it is -- rounded up to the block size (4K). -- -- Pre-release grow :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> m (MutArray a) -- | Like grow but if the requested byte capacity is more than -- largeObjectThreshold then it is rounded up to the closest power -- of 2. -- -- Pre-release growExp :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> m (MutArray a) -- | Resize the allocated memory to drop any reserved free space at the end -- of the array and reallocate it to reduce wastage. -- -- Up to 25% wastage is allowed to avoid reallocations. If the capacity -- is more than largeObjectThreshold then free space up to the -- blockSize is retained. -- -- Pre-release rightSize :: forall m a. (MonadIO m, Unbox a) => MutArray a -> m (MutArray a) -- | Strict left fold of an array. foldl' :: (MonadIO m, Unbox a) => (b -> a -> b) -> b -> MutArray a -> m b -- | Right fold of an array. foldr :: (MonadIO m, Unbox a) => (a -> b -> b) -> b -> MutArray a -> m b -- | Byte compare two arrays. Compare the length of the arrays. If the -- length is equal, compare the lexicographical ordering of two -- underlying byte arrays otherwise return the result of length -- comparison. -- -- Unsafe: Note that the Unbox instance of sum types with -- constructors of different sizes may leave some memory uninitialized -- which can make byte comparison unreliable. -- -- Pre-release byteCmp :: MonadIO m => MutArray a -> MutArray a -> m Ordering -- | Byte equality of two arrays. -- --
--   >>> byteEq arr1 arr2 = (==) EQ $ MArray.byteCmp arr1 arr2
--   
-- -- Unsafe: See byteCmp. byteEq :: MonadIO m => MutArray a -> MutArray a -> m Bool -- | Strip elements which match with predicate from both ends. -- -- Pre-release strip :: forall a m. (Unbox a, MonadIO m) => (a -> Bool) -> MutArray a -> m (MutArray a) -- | You may not need to reverse an array because you can consume it in -- reverse using readerRev. To reverse large arrays you can read -- in reverse and write to another array. However, in-place reverse can -- be useful to take adavantage of cache locality and when you do not -- want to allocate additional memory. reverse :: forall m a. (MonadIO m, Unbox a) => MutArray a -> m () -- | Generate the next permutation of the sequence, returns False if this -- is the last permutation. -- -- Unimplemented permute :: MutArray a -> m Bool -- | Partition an array into two halves using a partitioning predicate. The -- first half retains values where the predicate is False and the -- second half retains values where the predicate is True. -- -- Pre-release partitionBy :: forall m a. (MonadIO m, Unbox a) => (a -> Bool) -> MutArray a -> m (MutArray a, MutArray a) -- | Shuffle corresponding elements from two arrays using a shuffle -- function. If the shuffle function returns False then do nothing -- otherwise swap the elements. This can be used in a bottom up fold to -- shuffle or reorder the elements. -- -- Unimplemented shuffleBy :: (a -> a -> m Bool) -> MutArray a -> MutArray a -> m () -- | divideBy level partition array performs a top down -- hierarchical recursive partitioning fold of items in the container -- using the given function as the partition function. Level indicates -- the level in the tree where the fold would stop. -- -- This performs a quick sort if the partition function is 'partitionBy -- (< pivot)'. -- -- Unimplemented divideBy :: Int -> (MutArray a -> m (MutArray a, MutArray a)) -> MutArray a -> m () -- | mergeBy level merge array performs a pairwise bottom up fold -- recursively merging the pairs using the supplied merge function. Level -- indicates the level in the tree where the fold would stop. -- -- This performs a random shuffle if the merge function is random. If we -- stop at level 0 and repeatedly apply the function then we can do a -- bubble sort. -- -- Unimplemented mergeBy :: Int -> (MutArray a -> MutArray a -> m ()) -> MutArray a -> m () -- | Given an array sorted in ascending order except the last element being -- out of order, use bubble sort to place the last element at the right -- place such that the array remains sorted in ascending order. -- -- Pre-release bubble :: (MonadIO m, Unbox a) => (a -> a -> Ordering) -> MutArray a -> m () -- | snocWith sizer arr elem mutates arr to append -- elem. The length of the array increases by 1. -- -- If there is no reserved space available in arr it is -- reallocated to a size in bytes determined by the sizer -- oldSizeBytes function, where oldSizeBytes is the -- original size of the array in bytes. -- -- If the new array size is more than largeObjectThreshold we -- automatically round it up to blockSize. -- -- Note that the returned array may be a mutated version of the original -- array. -- -- Pre-release snocWith :: forall m a. (MonadIO m, Unbox a) => (Int -> Int) -> MutArray a -> a -> m (MutArray a) -- | The array is mutated to append an additional element to it. If there -- is no reserved space available in the array then it is reallocated to -- double the original size. -- -- This is useful to reduce allocations when appending unknown number of -- elements. -- -- Note that the returned array may be a mutated version of the original -- array. -- --
--   >>> snoc = MutArray.snocWith (* 2)
--   
-- -- Performs O(n * log n) copies to grow, but is liberal with memory -- allocation. snoc :: forall m a. (MonadIO m, Unbox a) => MutArray a -> a -> m (MutArray a) -- | The array is mutated to append an additional element to it. If there -- is no reserved space available in the array then it is reallocated to -- grow it by arrayChunkBytes rounded up to blockSize when -- the size becomes more than largeObjectThreshold. -- -- Note that the returned array may be a mutated version of the original -- array. -- -- Performs O(n^2) copies to grow but is thrifty on memory. -- -- Pre-release snocLinear :: forall m a. (MonadIO m, Unbox a) => MutArray a -> a -> m (MutArray a) -- | Like snoc but does not reallocate when pre-allocated array -- capacity becomes full. -- -- Internal snocMay :: forall m a. (MonadIO m, Unbox a) => MutArray a -> a -> m (Maybe (MutArray a)) -- | Really really unsafe, appends the element into the first array, may -- cause silent data corruption or if you are lucky a segfault if the -- first array does not have enough space to append the element. -- -- Internal snocUnsafe :: forall m a. (MonadIO m, Unbox a) => MutArray a -> a -> m (MutArray a) -- | unsafeAppendN n arr appends up to n input items to -- the supplied array. -- -- Unsafe: Do not drive the fold beyond n elements, it will lead -- to memory corruption or segfault. -- -- Any free space left in the array after appending n elements -- is lost. -- -- Internal unsafeAppendN :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) -> Fold m a (MutArray a) -- | Append n elements to an existing array. Any free space left -- in the array after appending n elements is lost. -- --
--   >>> appendN n initial = Fold.take n (MutArray.unsafeAppendN n initial)
--   
appendN :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) -> Fold m a (MutArray a) -- | appendWith realloc action mutates the array generated by -- action to append the input stream. If there is no reserved -- space available in the array it is reallocated to a size in bytes -- determined by realloc oldSize, where oldSize is the -- current size of the array in bytes. -- -- Note that the returned array may be a mutated version of original -- array. -- --
--   >>> appendWith sizer = Fold.foldlM' (MutArray.snocWith sizer)
--   
-- -- Pre-release appendWith :: forall m a. (MonadIO m, Unbox a) => (Int -> Int) -> m (MutArray a) -> Fold m a (MutArray a) -- | append action mutates the array generated by action -- to append the input stream. If there is no reserved space available in -- the array it is reallocated to double the size. -- -- Note that the returned array may be a mutated version of original -- array. -- --
--   >>> append = MutArray.appendWith (* 2)
--   
append :: forall m a. (MonadIO m, Unbox a) => m (MutArray a) -> Fold m a (MutArray a) -- | Copy two arrays into a newly allocated array. If the first array is -- pinned the spliced array is also pinned. spliceCopy :: forall m a. MonadIO m => MutArray a -> MutArray a -> m (MutArray a) -- | spliceWith sizer dst src mutates dst to append -- src. If there is no reserved space available in dst -- it is reallocated to a size determined by the sizer dstBytes -- srcBytes function, where dstBytes is the size of the -- first array and srcBytes is the size of the second array, in -- bytes. -- -- Note that the returned array may be a mutated version of first array. -- -- Pre-release spliceWith :: forall m a. (MonadIO m, Unbox a) => (Int -> Int -> Int) -> MutArray a -> MutArray a -> m (MutArray a) -- | The first array is mutated to append the second array. If there is no -- reserved space available in the first array a new allocation of exact -- required size is done. -- -- Note that the returned array may be a mutated version of first array. -- --
--   >>> splice = MutArray.spliceWith (+)
--   
-- -- If the original array is pinned the spliced array is also pinned. -- -- Pre-release splice :: (MonadIO m, Unbox a) => MutArray a -> MutArray a -> m (MutArray a) -- | Like append but the growth of the array is exponential. -- Whenever a new allocation is required the previous array size is at -- least doubled. -- -- This is useful to reduce allocations when folding many arrays -- together. -- -- Note that the returned array may be a mutated version of first array. -- --
--   >>> spliceExp = MutArray.spliceWith (\l1 l2 -> max (l1 * 2) (l1 + l2))
--   
-- -- Pre-release spliceExp :: (MonadIO m, Unbox a) => MutArray a -> MutArray a -> m (MutArray a) -- | Really really unsafe, appends the second array into the first array. -- If the first array does not have enough space it may cause silent data -- corruption or if you are lucky a segfault. spliceUnsafe :: MonadIO m => MutArray a -> MutArray a -> m (MutArray a) -- | Unbox a Haskell type and append the resulting bytes to a mutable byte -- array. The array is grown exponentially when more space is needed. -- -- Definition: -- --
--   >>> pokeAppend arr x = MutArray.castUnsafe <$> MutArray.snoc (MutArray.castUnsafe arr) x
--   
pokeAppend :: forall m a. (MonadIO m, Unbox a) => MutArray Word8 -> a -> m (MutArray Word8) -- | Like pokeAppend but does not grow the array when pre-allocated -- array capacity becomes full. -- -- Internal pokeAppendMay :: forall m a. (MonadIO m, Unbox a) => MutArray Word8 -> a -> m (Maybe (MutArray Word8)) -- | Skip the specified number of bytes in the array. The data in the -- skipped region remains uninitialzed. pokeSkipUnsafe :: Int -> MutArray Word8 -> MutArray Word8 -- | Create a Haskell value from its unboxed representation from the head -- of a byte array, return the value and the remaining array. peekUncons :: forall m a. (MonadIO m, Unbox a) => MutArray Word8 -> m (Maybe a, MutArray Word8) -- | Really really unsafe, create a Haskell value from an unboxed byte -- array, does not check if the array is big enough, may return garbage -- or if you are lucky may cause a segfault. -- -- Internal peekUnconsUnsafe :: forall m a. (MonadIO m, Unbox a) => MutArray Word8 -> m (a, MutArray Word8) -- | Discard the specified number of bytes in the array. peekSkipUnsafe :: Int -> MutArray Word8 -> MutArray Word8 -- | chunksOf n stream groups the elements in the input stream -- into arrays of n elements each. -- -- Same as the following but may be more efficient: -- --
--   >>> chunksOf n = Stream.foldMany (MutArray.createOf n)
--   
-- -- Pre-release chunksOf :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> Stream m (MutArray a) -- | Like chunksOf but creates pinned arrays. pinnedChunksOf :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> Stream m (MutArray a) -- | Buffer a stream into a stream of arrays. -- --
--   >>> buildChunks n = Fold.many (MutArray.createOf n) Fold.toStreamK
--   
-- -- Breaking an array into an array stream can be useful to consume a -- large array sequentially such that memory of the array is released -- incrementatlly. -- -- See also: arrayStreamKFromStreamD. -- -- Unimplemented buildChunks :: (MonadIO m, Unbox a) => Int -> Fold m a (StreamK n (MutArray a)) -- | Generate a stream of array slices using a predicate. The array element -- matching the predicate is dropped. -- -- Pre-release splitOn :: (MonadIO m, Unbox a) => (a -> Bool) -> MutArray a -> Stream m (MutArray a) concatWith :: forall m a. (Monad m, Unbox a) => (forall b. IO b -> m b) -> Stream m (MutArray a) -> Stream m a concatRevWith :: forall m a. (Monad m, Unbox a) => (forall b. IO b -> m b) -> Stream m (MutArray a) -> Stream m a -- | Use the "reader" unfold instead. -- --
--   concat = unfoldMany reader
--   
-- -- We can try this if there are any fusion issues in the unfold. concat :: forall m a. (MonadIO m, Unbox a) => Stream m (MutArray a) -> Stream m a -- | Use the "readerRev" unfold instead. -- --
--   concat = unfoldMany readerRev
--   
-- -- We can try this if there are any fusion issues in the unfold. concatRev :: forall m a. (MonadIO m, Unbox a) => Stream m (MutArray a) -> Stream m a data SpliceState s arr SpliceInitial :: s -> SpliceState s arr SpliceBuffering :: s -> arr -> SpliceState s arr SpliceYielding :: arr -> SpliceState s arr -> SpliceState s arr SpliceFinish :: SpliceState s arr -- | Parser pCompactLE maxElems coalesces adjacent arrays in the -- input stream only if the combined size would be less than or equal to -- maxElems elements. Note that it won't split an array if the -- original array is already larger than maxElems. -- -- maxElems must be greater than 0. -- -- Generates unpinned arrays irrespective of the pinning status of input -- arrays. -- -- Internal pCompactLE :: forall m a. (MonadIO m, Unbox a) => Int -> Parser (MutArray a) m (MutArray a) -- | Pinned version of pCompactLE. pPinnedCompactLE :: forall m a. (MonadIO m, Unbox a) => Int -> Parser (MutArray a) m (MutArray a) compactLeAs :: forall m a. (MonadIO m, Unbox a) => PinnedState -> Int -> Stream m (MutArray a) -> Stream m (MutArray a) -- | Fold fCompactGE minElems coalesces adjacent arrays in the -- input stream until the size becomes greater than or equal to -- minElems. -- -- Generates unpinned arrays irrespective of the pinning status of input -- arrays. fCompactGE :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m (MutArray a) (MutArray a) -- | Pinned version of fCompactGE. fPinnedCompactGE :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m (MutArray a) (MutArray a) -- | Like compactGE but for transforming folds instead of stream. -- --
--   >>> lCompactGE n = Fold.many (MutArray.fCompactGE n)
--   
-- -- Generates unpinned arrays irrespective of the pinning status of input -- arrays. lCompactGE :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m (MutArray a) () -> Fold m (MutArray a) () -- | Pinned version of lCompactGE. lPinnedCompactGE :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m (MutArray a) () -> Fold m (MutArray a) () -- | compactGE n stream coalesces adjacent arrays in the -- stream until the size becomes greater than or equal to -- n. -- --
--   >>> compactGE n = Stream.foldMany (MutArray.fCompactGE n)
--   
compactGE :: (MonadIO m, Unbox a) => Int -> Stream m (MutArray a) -> Stream m (MutArray a) -- | 'compactEQ n' coalesces adajacent arrays in the input stream to arrays -- of exact size n. -- -- Unimplemented compactEQ :: Int -> Stream m (MutArray a) -> Stream m (MutArray a) roundUpToPower2 :: Int -> Int memcpy :: Ptr Word8 -> Ptr Word8 -> Int -> IO () memcmp :: Ptr Word8 -> Ptr Word8 -> Int -> IO Bool c_memchr :: Ptr Word8 -> Word8 -> CSize -> IO (Ptr Word8) -- | Deprecated: Please use unsafePinnedAsPtr instead. asPtrUnsafe :: MonadIO m => MutArray a -> (Ptr a -> m b) -> m b -- | Deprecated: Please use buildChunks instead. writeChunks :: (MonadIO m, Unbox a) => Int -> Fold m a (StreamK n (MutArray a)) -- | Deprecated: Please use "unfoldMany reader" instead. flattenArrays :: forall m a. (MonadIO m, Unbox a) => Stream m (MutArray a) -> Stream m a -- | Deprecated: Please use "unfoldMany readerRev" instead. flattenArraysRev :: forall m a. (MonadIO m, Unbox a) => Stream m (MutArray a) -> Stream m a -- | Deprecated: Please use fromChunksK instead. fromArrayStreamK :: (Unbox a, MonadIO m) => StreamK m (MutArray a) -> m (MutArray a) -- | Deprecated: Please use fromStreamN instead. fromStreamDN :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> m (MutArray a) -- | We could take the approach of doubling the memory allocation on each -- overflow. This would result in more or less the same amount of copying -- as in the chunking approach. However, if we have to shrink in the end -- then it may result in an extra copy of the entire data. -- --
--   >>> fromStreamD = StreamD.fold MutArray.create
--   
-- | Deprecated: Please use fromStream instead. fromStreamD :: (MonadIO m, Unbox a) => Stream m a -> m (MutArray a) -- | Deprecated: Please use byteCmp instead. cmp :: MonadIO m => MutArray a -> MutArray a -> m Ordering -- | Deprecated: Please use indexReader instead. getIndices :: (MonadIO m, Unbox a) => Stream m Int -> Unfold m (MutArray a) a -- | Deprecated: Please use indexReaderWith instead. getIndicesWith :: (Monad m, Unbox a) => (forall b. IO b -> m b) -> Stream m Int -> Unfold m (MutArray a) a -- | Deprecated: Please use grow instead. resize :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> m (MutArray a) -- | Deprecated: Please use growExp instead. resizeExp :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> m (MutArray a) -- | Deprecated: Please use empty instead. nil :: MutArray a new :: (MonadIO m, Unbox a) => Int -> m (MutArray a) pinnedNew :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) -- | Allocates a pinned empty array that with a reserved capacity of bytes. -- The memory of the array is uninitialized and the allocation is aligned -- as per the Unboxed instance of the type. -- -- Pre-release -- | Deprecated: Please use pinnedEmptyOf with appropriate -- calculation pinnedNewBytes :: MonadIO m => Int -> m (MutArray a) -- | Deprecated: Please use unsafeAppendN instead. writeAppendNUnsafe :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) -> Fold m a (MutArray a) writeAppendN :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) -> Fold m a (MutArray a) -- | Deprecated: Please use appendWith instead. writeAppendWith :: forall m a. (MonadIO m, Unbox a) => (Int -> Int) -> m (MutArray a) -> Fold m a (MutArray a) writeAppend :: forall m a. (MonadIO m, Unbox a) => m (MutArray a) -> Fold m a (MutArray a) -- | Deprecated: Please use unsafeCreateOfWith instead. writeNWithUnsafe :: forall m a. (MonadIO m, Unbox a) => (Int -> m (MutArray a)) -> Int -> Fold m a (MutArray a) writeNWith :: forall m a. (MonadIO m, Unbox a) => (Int -> m (MutArray a)) -> Int -> Fold m a (MutArray a) -- | Deprecated: Please use unsafeCreateOf instead. writeNUnsafe :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | Deprecated: Please use unsafePinnedCreateOf instead. pinnedWriteNUnsafe :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) writeN :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | Deprecated: Please use pinnedCreateOf instead. pinnedWriteN :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | pinnedWriteNAligned align n folds a maximum of n -- elements from the input stream to a MutArray aligned to the -- given size. -- --
--   >>> pinnedWriteNAligned align = MutArray.createOfWith (MutArray.pinnedNewAligned align)
--   
--   >>> pinnedWriteNAligned align n = MutArray.appendN n (MutArray.pinnedNewAligned align n)
--   
-- -- Pre-release pinnedWriteNAligned :: forall m a. (MonadIO m, Unbox a) => Int -> Int -> Fold m a (MutArray a) -- | Deprecated: Please use createWith instead. writeWith :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) write :: forall m a. (MonadIO m, Unbox a) => Fold m a (MutArray a) -- | Deprecated: Please use pinnedCreate instead. pinnedWrite :: forall m a. (MonadIO m, Unbox a) => Fold m a (MutArray a) -- | Deprecated: Please use revCreateOf instead. writeRevN :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | Generate a stream of array slice descriptors ((index, len)) of -- specified length from an array, starting from the supplied array -- index. The last slice may be shorter than the requested length -- depending on the array length. -- -- Pre-release sliceIndexerFromLen :: forall m a. (Monad m, Unbox a) => Int -> Int -> Unfold m (MutArray a) (Int, Int) -- | Generate a stream of slices of specified length from an array, -- starting from the supplied array index. The last slice may be shorter -- than the requested length depending on the array length. -- -- Pre-release slicerFromLen :: forall m a. (Monad m, Unbox a) => Int -> Int -> Unfold m (MutArray a) (MutArray a) -- | compactLE maxElems coalesces adjacent arrays in the input -- stream only if the combined size would be less than or equal to -- maxElems elements. Note that it won't split an array if the -- original array is already larger than maxElems. -- -- maxElems must be greater than 0. -- -- Generates unpinned arrays irrespective of the pinning status of input -- arrays. compactLE :: (MonadIO m, Unbox a) => Int -> Stream m (MutArray a) -> Stream m (MutArray a) -- | Pinned version of compactLE. pinnedCompactLE :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m (MutArray a) -> Stream m (MutArray a) -- | Split a stream of arrays on a given separator byte, dropping the -- separator and coalescing all the arrays between two separators into a -- single array. compactOnByte :: MonadIO m => Word8 -> Stream m (MutArray Word8) -> Stream m (MutArray Word8) -- | Like compactOnByte considers the separator in suffix position -- instead of infix position. compactOnByteSuffix :: MonadIO m => Word8 -> Stream m (MutArray Word8) -> Stream m (MutArray Word8) -- | An IORef holds a single Unbox-able value. data IORef a -- | Create a new IORef. -- -- Pre-release newIORef :: forall a. Unbox a => a -> IO (IORef a) -- | Write a value to an IORef. -- -- Pre-release writeIORef :: Unbox a => IORef a -> a -> IO () -- | Modify the value of an IORef using a function with strict -- application. -- -- Pre-release modifyIORef' :: Unbox a => IORef a -> (a -> a) -> IO () -- | Read a value from an IORef. -- -- Pre-release readIORef :: Unbox a => IORef a -> IO a -- | Generate a stream by continuously reading the IORef. -- -- This operation reads the IORef without any synchronization. It can be -- assumed to be atomic because the IORef (MutableByteArray) is always -- aligned to Int boundaries, we are assuming that compiler uses single -- instructions to access the memory. It may read stale values though -- until caches are synchronised in a multiprocessor architecture. -- -- Pre-release pollIntIORef :: (MonadIO m, Unbox a) => IORef a -> Stream m a -- | Deprecated: Please use sliceIndexerFromLen instead. genSlicesFromLen :: forall m a. (Monad m, Unbox a) => Int -> Int -> Unfold m (MutArray a) (Int, Int) -- | Deprecated: Please use slicerFromLen instead. getSlicesFromLen :: forall m a. (Monad m, Unbox a) => Int -> Int -> Unfold m (MutArray a) (MutArray a) -- | This module provides a mutable version of Streamly.Data.Array. -- The contents of a mutable array can be modified in-place. For general -- documentation, please refer to the original module. -- -- Please refer to Streamly.Internal.Data.MutArray for functions -- that have not yet been released. -- -- For mutable arrays that work on boxed types, not requiring the -- Unbox constraint, please refer to -- Streamly.Data.MutArray.Generic. module Streamly.Data.MutArray -- | An unboxed mutable array. An array is created with a given length and -- capacity. Length is the number of valid elements in the array. -- Capacity is the maximum number of elements that the array can be -- expanded to without having to reallocate the memory. -- -- The elements in the array can be mutated in-place without changing the -- reference (constructor). However, the length of the array cannot be -- mutated in-place. A new array reference is generated when the length -- changes. When the length is increased (upto the maximum reserved -- capacity of the array), the array is not reallocated and the new -- reference uses the same underlying memory as the old one. -- -- Several routines in this module allow the programmer to control the -- capacity of the array. The programmer can control the trade-off -- between memory usage and performance impact due to reallocations when -- growing or shrinking the array. data MutArray a -- | Allocates an unpinned array of zero length but growable to the -- specified capacity without reallocation. emptyOf :: (MonadIO m, Unbox a) => Int -> m (MutArray a) -- | Allocates a pinned array of zero length but growable to the specified -- capacity without reallocation. pinnedEmptyOf :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) -- | Create a MutArray from the first N elements of a list. The -- array is allocated to size N, if the list terminates before N elements -- then the array may hold less than N elements. fromListN :: (MonadIO m, Unbox a) => Int -> [a] -> m (MutArray a) -- | Create a MutArray from a list. The list must be of finite size. fromList :: (MonadIO m, Unbox a) => [a] -> m (MutArray a) -- | createOf n folds a maximum of n elements from the -- input stream to an MutArray. -- --
--   >>> createOf = MutArray.createOfWith MutArray.new
--   
--   >>> createOf n = Fold.take n (MutArray.unsafeCreateOf n)
--   
--   >>> createOf n = MutArray.appendN n (MutArray.emptyOf n)
--   
createOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) -- | Fold the whole input to a single array. -- -- Same as createWith using an initial array size of -- arrayChunkBytes bytes rounded up to the element size. -- -- Caution! Do not use this on infinite streams. create :: forall m a. (MonadIO m, Unbox a) => Fold m a (MutArray a) -- | Return a copy of the array in pinned memory if unpinned, else return -- the original array. pin :: MutArray a -> IO (MutArray a) -- | Return a copy of the array in unpinned memory if pinned, else return -- the original array. unpin :: MutArray a -> IO (MutArray a) -- | Return True if the array is allocated in pinned memory. isPinned :: MutArray a -> Bool -- | The array is mutated to append an additional element to it. If there -- is no reserved space available in the array then it is reallocated to -- double the original size. -- -- This is useful to reduce allocations when appending unknown number of -- elements. -- -- Note that the returned array may be a mutated version of the original -- array. -- --
--   >>> snoc = MutArray.snocWith (* 2)
--   
-- -- Performs O(n * log n) copies to grow, but is liberal with memory -- allocation. snoc :: forall m a. (MonadIO m, Unbox a) => MutArray a -> a -> m (MutArray a) -- | Append n elements to an existing array. Any free space left -- in the array after appending n elements is lost. -- --
--   >>> appendN n initial = Fold.take n (MutArray.unsafeAppendN n initial)
--   
appendN :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) -> Fold m a (MutArray a) -- | append action mutates the array generated by action -- to append the input stream. If there is no reserved space available in -- the array it is reallocated to double the size. -- -- Note that the returned array may be a mutated version of original -- array. -- --
--   >>> append = MutArray.appendWith (* 2)
--   
append :: forall m a. (MonadIO m, Unbox a) => m (MutArray a) -> Fold m a (MutArray a) -- | O(1) Write the given element at the given index in the array. -- Performs in-place mutation of the array. -- --
--   >>> putIndex ix arr val = MutArray.modifyIndex ix arr (const (val, ()))
--   
--   >>> f = MutArray.putIndices
--   
--   >>> putIndex ix arr val = Stream.fold (f arr) (Stream.fromPure (ix, val))
--   
putIndex :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> a -> m () -- | Write the given element to the given index of the array. Does not -- check if the index is out of bounds of the array. -- -- Pre-release putIndexUnsafe :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> a -> m () -- | Modify a given index of an array using a modifier function. -- -- Pre-release modifyIndex :: forall m a b. (MonadIO m, Unbox a) => Int -> MutArray a -> (a -> (a, b)) -> m b -- | Modify a given index of an array using a modifier function. -- -- Unsafe because it does not check the bounds of the array. -- -- Pre-release modifyIndexUnsafe :: forall m a b. (MonadIO m, Unbox a) => Int -> MutArray a -> (a -> (a, b)) -> m b -- | Modify each element of an array using the supplied modifier function. -- -- This is an in-place equivalent of an immutable map operation. -- -- Pre-release modify :: forall m a. (MonadIO m, Unbox a) => MutArray a -> (a -> a) -> m () -- | O(1) Lookup the element at the given index. Index starts from -- 0. getIndex :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> m (Maybe a) -- | Return the element at the specified index without checking the bounds. -- -- Unsafe because it does not check the bounds of the array. getIndexUnsafe :: forall m a. (MonadIO m, Unbox a) => Int -> MutArray a -> m a -- | Convert a MutArray into a list. toList :: forall m a. (MonadIO m, Unbox a) => MutArray a -> m [a] -- | Convert a MutArray into a stream. -- --
--   >>> read = Stream.unfold MutArray.reader
--   
read :: forall m a. (MonadIO m, Unbox a) => MutArray a -> Stream m a -- | Convert a MutArray into a stream in reverse order. -- --
--   >>> readRev = Stream.unfold MutArray.readerRev
--   
readRev :: forall m a. (MonadIO m, Unbox a) => MutArray a -> Stream m a -- | Unfold an array into a stream. reader :: forall m a. (MonadIO m, Unbox a) => Unfold m (MutArray a) a -- | Unfold an array into a stream in reverse order. readerRev :: forall m a. (MonadIO m, Unbox a) => Unfold m (MutArray a) a -- | Cast an array having elements of type a into an array having -- elements of type b. The length of the array should be a -- multiple of the size of the target element otherwise Nothing is -- returned. cast :: forall a b. Unbox b => MutArray a -> Maybe (MutArray b) -- | Cast an MutArray a into an MutArray Word8. asBytes :: MutArray a -> MutArray Word8 -- | O(1) Get the length of the array i.e. the number of elements in -- the array. -- -- Note that byteLength is less expensive than this operation, as -- length involves a costly division operation. length :: forall a. Unbox a => MutArray a -> Int -- | The Unbox type class provides operations for serialization -- (unboxing) and deserialization (boxing) of fixed-length, non-recursive -- Haskell data types to and from their byte stream representation. -- -- Unbox uses fixed size encoding, therefore, size is independent of the -- value, it must be determined solely by the type. This restriction -- makes types with Unbox instances suitable for storing in -- arrays. Note that sum types may have multiple constructors of -- different sizes, the size of a sum type is computed as the maximum -- required by any constructor. -- -- The peekAt operation reads as many bytes from the mutable byte -- array as the size of the data type and builds a Haskell data -- type from these bytes. pokeAt operation converts a Haskell data -- type to its binary representation which consists of size -- bytes and then stores these bytes into the mutable byte array. These -- operations do not check the bounds of the array, the user of the type -- class is expected to check the bounds before peeking or poking. -- -- IMPORTANT: The serialized data's byte ordering remains the same as the -- host machine's byte order. Therefore, it can not be deserialized from -- host machines with a different byte ordering. -- -- Instances can be derived via Generics, Template Haskell, or written -- manually. Note that the data type must be non-recursive. WARNING! -- Generic and Template Haskell deriving, both hang for recursive data -- types. Deriving via Generics is more convenient but Template Haskell -- should be preferred over Generics for the following reasons: -- --
    --
  1. Instances derived via Template Haskell provide better and more -- reliable performance.
  2. --
  3. Generic deriving allows only 256 fields or constructor tags -- whereas template Haskell has no limit.
  4. --
-- -- Here is an example, for deriving an instance of this type class using -- generics: -- --
--   >>> import GHC.Generics (Generic)
--   
--   >>> :{
--   data Object = Object
--       { _int0 :: Int
--       , _int1 :: Int
--       } deriving Generic
--   :}
--   
-- --
--   >>> import Streamly.Data.MutByteArray (Unbox(..))
--   
--   >>> instance Unbox Object
--   
-- -- To derive the instance via Template Haskell: -- --
--   import Streamly.Data.MutByteArray (deriveUnbox)
--   $(deriveUnbox [d|instance Unbox Object|])
--   
-- -- See deriveUnbox for more information on deriving using Template -- Haskell. -- -- If you want to write the instance manually: -- --
--   >>> :{
--   instance Unbox Object where
--       sizeOf _ = 16
--       peekAt i arr = do
--          -- Check the array bounds
--           x0 <- peekAt i arr
--           x1 <- peekAt (i + 8) arr
--           return $ Object x0 x1
--       pokeAt i arr (Object x0 x1) = do
--          -- Check the array bounds
--           pokeAt i arr x0
--           pokeAt (i + 8) arr x1
--   :}
--   
class Unbox a -- | Get the size. Size cannot be zero, should be at least 1 byte. sizeOf :: Unbox a => Proxy a -> Int -- | Get the size. Size cannot be zero, should be at least 1 byte. sizeOf :: (Unbox a, SizeOfRep (Rep a)) => Proxy a -> Int -- | peekAt byte-offset array reads an element of type a -- from the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. peekAt :: Unbox a => Int -> MutByteArray -> IO a -- | peekAt byte-offset array reads an element of type a -- from the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. peekAt :: (Unbox a, Generic a, PeekRep (Rep a)) => Int -> MutByteArray -> IO a -- | Deprecated: Use peekAt. peekByteIndex :: Unbox a => Int -> MutByteArray -> IO a -- | pokeAt byte-offset array writes an element of type a -- to the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. pokeAt :: Unbox a => Int -> MutByteArray -> a -> IO () -- | Deprecated: Use pokeAt. pokeByteIndex :: Unbox a => Int -> MutByteArray -> a -> IO () -- | pokeAt byte-offset array writes an element of type a -- to the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. pokeAt :: (Unbox a, Generic a, PokeRep (Rep a)) => Int -> MutByteArray -> a -> IO () -- | Deprecated: Please use pinnedNew instead. newPinned :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) new :: (MonadIO m, Unbox a) => Int -> m (MutArray a) pinnedNew :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) writeN :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (MutArray a) write :: forall m a. (MonadIO m, Unbox a) => Fold m a (MutArray a) writeAppendN :: forall m a. (MonadIO m, Unbox a) => Int -> m (MutArray a) -> Fold m a (MutArray a) writeAppend :: forall m a. (MonadIO m, Unbox a) => m (MutArray a) -> Fold m a (MutArray a) -- | Fast, composable stream consumers with ability to terminate, -- supporting stream fusion. -- --

Using Folds

-- -- This module provides elementary folds and fold combinators that can be -- used to consume a stream of data and reduce it to a final value, or -- transform it in a stateful manner using scans. A data stream can be -- reduced into a stream of folded data elements by folding segments of -- the stream. Fold combinators can be used to compose multiple folds in -- parallel or to create a pipeline of folds such that the next fold -- consumes the result of the previous fold. To run these folds on a -- stream see fold, scan, postscan, -- scanMaybe, foldMany and other operations accepting -- Fold type as argument Streamly.Data.Stream. -- --

Reducing a Stream

-- -- A Fold is a consumer of a stream of values. A fold driver (such -- as fold) initializes the fold accumulator, runs the -- fold step function in a loop, processing the input stream one -- element at a time and accumulating the result. The loop continues -- until the fold terminates, at which point the accumulated result is -- returned. -- -- For example, a sum Fold represents a stream consumer that adds -- the values in the input stream: -- --
--   >>> Stream.fold Fold.sum $ Stream.fromList [1..100]
--   5050
--   
-- -- Conceptually, a Fold is a data type that mimics a strict left -- fold (foldl). The above example is similar to a left fold using -- (+) as the step and 0 as the initial value of the -- accumulator: -- --
--   >>> Data.List.foldl' (+) 0 [1..100]
--   5050
--   
-- -- Folds have an early termination capability e.g. the one -- fold terminates after consuming one element: -- --
--   >>> Stream.fold Fold.one $ Stream.fromList [1..]
--   Just 1
--   
-- -- The above example is similar to the following right fold: -- --
--   >>> Prelude.foldr (\x _ -> Just x) Nothing [1..]
--   Just 1
--   
-- -- Folds can be combined together using combinators. For example, -- to create a fold that sums first two elements in a stream: -- --
--   >>> sumTwo = Fold.take 2 Fold.sum
--   
--   >>> Stream.fold sumTwo $ Stream.fromList [1..100]
--   3
--   
-- --

Parallel Composition

-- -- Folds can be combined to run in parallel on the same input. For -- example, to compute the average of numbers in a stream without going -- through the stream twice: -- --
--   >>> avg = Fold.teeWith (/) Fold.sum (fmap fromIntegral Fold.length)
--   
--   >>> Stream.fold avg $ Stream.fromList [1.0..100.0]
--   50.5
--   
-- -- Folds can be combined so as to partition the input stream over -- multiple folds. For example, to count even and odd numbers in a -- stream: -- --
--   >>> split n = if even n then Left n else Right n
--   
--   >>> stream = fmap split $ Stream.fromList [1..100]
--   
--   >>> countEven = fmap (("Even " ++) . show) Fold.length
--   
--   >>> countOdd = fmap (("Odd "  ++) . show) Fold.length
--   
--   >>> f = Fold.partition countEven countOdd
--   
--   >>> Stream.fold f stream
--   ("Even 50","Odd 50")
--   
-- --

Sequential Composition

-- -- Terminating folds can be combined to parse the stream serially such -- that the first fold consumes the input until it terminates and the -- second fold consumes the rest of the input until it terminates: -- --
--   >>> f = Fold.splitWith (,) (Fold.take 8 Fold.toList) (Fold.takeEndBy (== '\n') Fold.toList)
--   
--   >>> Stream.fold f $ Stream.fromList "header: hello\n"
--   ("header: ","hello\n")
--   
-- --

Splitting a Stream

-- -- A Fold can be applied repeatedly on a stream to transform it to -- a stream of fold results. To split a stream on newlines: -- --
--   >>> f = Fold.takeEndBy (== '\n') Fold.toList
--   
--   >>> Stream.fold Fold.toList $ Stream.foldMany f $ Stream.fromList "Hello there!\nHow are you\n"
--   ["Hello there!\n","How are you\n"]
--   
-- -- Similarly, we can split the input of a fold too: -- --
--   >>> Stream.fold (Fold.many f Fold.toList) $ Stream.fromList "Hello there!\nHow are you\n"
--   ["Hello there!\n","How are you\n"]
--   
-- --

Folds vs. Streams

-- -- We can often use streams or folds to achieve the same goal. However, -- streams are more efficient in composition of producers (e.g. -- append or mergeBy) whereas folds are more efficient in -- composition of consumers (e.g. splitWith, partition or -- teeWith). -- -- Streams are producers, transformations on streams happen on the output -- side: -- --
--   >>> :{
--    f stream =
--          Stream.filter odd stream
--        & fmap (+1)
--        & Stream.fold Fold.sum
--   :}
--   
-- --
--   >>> f $ Stream.fromList [1..100 :: Int]
--   2550
--   
-- -- Folds are stream consumers with an input stream and an output value, -- stream transformations on folds happen on the input side: -- --
--   >>> :{
--   f =
--          Fold.filter odd
--        $ Fold.lmap (+1)
--        $ Fold.sum
--   :}
--   
-- --
--   >>> Stream.fold f $ Stream.fromList [1..100 :: Int]
--   2550
--   
-- -- Notice the similiarity in the definition of f in both cases, -- the only difference is the composition by & vs $ -- and the use lmap vs map, the difference is due to -- output vs input side transformations. -- --

Fusion Limitations

-- -- Folds support stream fusion for generating loops comparable to the -- speed of C. However, it has some limitations. For fusion to work, the -- folds must be inlined, folds must be statically known and not -- generated dynamically, folds should not be passed recursively. -- -- Another limitation is due to the quadratic complexity causing slowdown -- when too many nested compositions are used. Especially, the -- performance of the Applicative instance and splitting operations (e.g. -- splitWith) degrades quadratically (O(n^2)) when combined -- n times, roughly 8 or less sequenced operations are fine. For -- these cases folds can be converted to parsers and then used as -- ParserK. -- --

Experimental APIs

-- -- Please refer to Streamly.Internal.Data.Fold for more functions -- that have not yet been released. module Streamly.Data.Fold -- | Drive a fold using the supplied Stream, reducing the resulting -- expression strictly at each step. -- -- Definition: -- --
--   >>> drive = flip Stream.fold
--   
-- -- Example: -- --
--   >>> Fold.drive (Stream.enumerateFromTo 1 100) Fold.sum
--   5050
--   
drive :: Monad m => Stream m a -> Fold m a b -> m b -- | The type Fold m a b represents a consumer of an input stream -- of values of type a and returning a final value of type -- b in Monad m. The constructor of a fold is -- Fold step initial extract final. -- -- The fold uses an internal state of type s. The initial value -- of the state s is created by initial. This function -- is called once and only once before the fold starts consuming input. -- Any resource allocation can be done in this function. -- -- The step function is called on each input, it consumes an -- input and returns the next intermediate state (see Step) or the -- final result b if the fold terminates. -- -- If the fold is used as a scan, the extract function is used -- by the scan driver to map the current state s of the fold to -- the fold result. Thus extract can be called multiple times. -- In some folds, where scanning does not make sense, this function is -- left unimplemented; such folds cannot be used as scans. -- -- Before a fold terminates, final is called once and only once -- (unless the fold terminated in initial itself). Any resources -- allocated by initial can be released in final. In -- folds that do not require any cleanup extract and -- final are typically the same. -- -- When implementing fold combinators, care should be taken to cleanup -- any state of the argument folds held by the fold by calling the -- respective final at all exit points of the fold. Also, -- final should not be called more than once. Note that if a -- fold terminates by Done constructor, there is no state to -- cleanup. -- -- NOTE: The constructor is not yet released, smart constructors are -- provided to create folds. data Fold m a b -- | Tee is a newtype wrapper over the Fold type providing -- distributing Applicative, Semigroup, Monoid, -- Num, Floating and Fractional instances. -- -- The input received by the composed Tee is replicated and -- distributed to the constituent folds of the Tee. -- -- For example, to compute the average of numbers in a stream without -- going through the stream twice: -- --
--   >>> avg = (/) <$> (Tee Fold.sum) <*> (Tee $ fmap fromIntegral Fold.length)
--   
--   >>> Stream.fold (unTee avg) $ Stream.fromList [1.0..100.0]
--   50.5
--   
-- -- Similarly, the Semigroup and Monoid instances of -- Tee distribute the input to both the folds and combine the -- outputs using Monoid or Semigroup instances of the output types: -- --
--   >>> import Data.Monoid (Sum(..))
--   
--   >>> t = Tee Fold.one <> Tee Fold.latest
--   
--   >>> Stream.fold (unTee t) (fmap Sum $ Stream.enumerateFromTo 1.0 100.0)
--   Just (Sum {getSum = 101.0})
--   
-- -- The Num, Floating, and Fractional instances work -- in the same way. newtype Tee m a b Tee :: Fold m a b -> Tee m a b [unTee] :: Tee m a b -> Fold m a b -- | Make a fold from a left fold style pure step function and initial -- value of the accumulator. -- -- If your Fold returns only Partial (i.e. never returns a -- Done) then you can use foldl'* constructors. -- -- A fold with an extract function can be expressed using fmap: -- --
--   mkfoldlx :: Monad m => (s -> a -> s) -> s -> (s -> b) -> Fold m a b
--   mkfoldlx step initial extract = fmap extract (foldl' step initial)
--   
foldl' :: Monad m => (b -> a -> b) -> b -> Fold m a b -- | Make a fold from a left fold style monadic step function and initial -- value of the accumulator. -- -- A fold with an extract function can be expressed using rmapM: -- --
--   mkFoldlxM :: Functor m => (s -> a -> m s) -> m s -> (s -> m b) -> Fold m a b
--   mkFoldlxM step initial extract = rmapM extract (foldlM' step initial)
--   
foldlM' :: Monad m => (b -> a -> m b) -> m b -> Fold m a b -- | Make a strict left fold, for non-empty streams, using first element as -- the starting value. Returns Nothing if the stream is empty. -- -- Pre-release foldl1' :: Monad m => (a -> a -> a) -> Fold m a (Maybe a) -- | Like 'foldl1'' but with a monadic step function. -- -- Pre-release foldlM1' :: Monad m => (a -> a -> m a) -> Fold m a (Maybe a) -- | Make a fold using a right fold style step function and a terminal -- value. It performs a strict right fold via a left fold using function -- composition. Note that a strict right fold can only be useful for -- constructing strict structures in memory. For reductions this will be -- very inefficient. -- -- Definitions: -- --
--   >>> foldr' f z = fmap (flip appEndo z) $ Fold.foldMap (Endo . f)
--   
--   >>> foldr' f z = fmap ($ z) $ Fold.foldl' (\g x -> g . f x) id
--   
-- -- Example: -- --
--   >>> Stream.fold (Fold.foldr' (:) []) $ Stream.enumerateFromTo 1 5
--   [1,2,3,4,5]
--   
foldr' :: Monad m => (a -> b -> b) -> b -> Fold m a b -- | Semigroup concat. Append the elements of an input stream to a provided -- starting value. -- -- Definition: -- --
--   >>> sconcat = Fold.foldl' (<>)
--   
-- --
--   >>> semigroups = fmap Data.Monoid.Sum $ Stream.enumerateFromTo 1 10
--   
--   >>> Stream.fold (Fold.sconcat 10) semigroups
--   Sum {getSum = 65}
--   
sconcat :: (Monad m, Semigroup a) => a -> Fold m a a -- | Monoid concat. Fold an input stream consisting of monoidal elements -- using mappend and mempty. -- -- Definition: -- --
--   >>> mconcat = Fold.sconcat mempty
--   
-- --
--   >>> monoids = fmap Data.Monoid.Sum $ Stream.enumerateFromTo 1 10
--   
--   >>> Stream.fold Fold.mconcat monoids
--   Sum {getSum = 55}
--   
mconcat :: (Monad m, Monoid a) => Fold m a a -- | Definition: -- --
--   >>> foldMap f = Fold.lmap f Fold.mconcat
--   
-- -- Make a fold from a pure function that folds the output of the function -- using mappend and mempty. -- --
--   >>> sum = Fold.foldMap Data.Monoid.Sum
--   
--   >>> Stream.fold sum $ Stream.enumerateFromTo 1 10
--   Sum {getSum = 55}
--   
foldMap :: (Monad m, Monoid b) => (a -> b) -> Fold m a b -- | Definition: -- --
--   >>> foldMapM f = Fold.lmapM f Fold.mconcat
--   
-- -- Make a fold from a monadic function that folds the output of the -- function using mappend and mempty. -- --
--   >>> sum = Fold.foldMapM (return . Data.Monoid.Sum)
--   
--   >>> Stream.fold sum $ Stream.enumerateFromTo 1 10
--   Sum {getSum = 55}
--   
foldMapM :: (Monad m, Monoid b) => (a -> m b) -> Fold m a b -- | A fold that drains all its input, running the effects and discarding -- the results. -- --
--   >>> drain = Fold.drainMapM (const (return ()))
--   
--   >>> drain = Fold.foldl' (\_ _ -> ()) ()
--   
drain :: Monad m => Fold m a () -- | Definitions: -- --
--   >>> drainMapM f = Fold.lmapM f Fold.drain
--   
--   >>> drainMapM f = Fold.foldMapM (void . f)
--   
-- -- Drain all input after passing it through a monadic function. This is -- the dual of mapM_ on stream producers. drainMapM :: Monad m => (a -> m b) -> Fold m a () -- | Determine the length of the input stream. -- -- Definition: -- --
--   >>> length = Fold.lengthGeneric
--   
--   >>> length = fmap getSum $ Fold.foldMap (Sum . const  1)
--   
length :: Monad m => Fold m a Int -- | Count non-duplicate elements in the stream. -- -- Definition: -- --
--   >>> countDistinct = fmap Set.size Fold.toSet
--   
--   >>> countDistinct = Fold.postscan Fold.nub $ Fold.catMaybes $ Fold.length
--   
-- -- The memory used is proportional to the number of distinct elements in -- the stream, to guard against using too much memory use it as a scan -- and terminate if the count reaches more than a threshold. -- -- Space: <math> -- -- Pre-release countDistinct :: (Monad m, Ord a) => Fold m a Int -- | Like countDistinct but specialized to a stream of Int, -- for better performance. -- -- Definition: -- --
--   >>> countDistinctInt = fmap IntSet.size Fold.toIntSet
--   
--   >>> countDistinctInt = Fold.postscan Fold.nubInt $ Fold.catMaybes $ Fold.length
--   
-- -- Pre-release countDistinctInt :: Monad m => Fold m Int Int -- | Determine the frequency of each element in the stream. -- -- You can just collect the keys of the resulting map to get the unique -- elements in the stream. -- -- Definition: -- --
--   >>> frequency = Fold.toMap id Fold.length
--   
frequency :: (Monad m, Ord a) => Fold m a (Map a Int) -- | Determine the sum of all elements of a stream of numbers. Returns -- additive identity (0) when the stream is empty. Note that -- this is not numerically stable for floating point numbers. -- --
--   >>> sum = Fold.cumulative Fold.windowSum
--   
-- -- Same as following but numerically stable: -- --
--   >>> sum = Fold.foldl' (+) 0
--   
--   >>> sum = fmap Data.Monoid.getSum $ Fold.foldMap Data.Monoid.Sum
--   
sum :: (Monad m, Num a) => Fold m a a -- | Determine the product of all elements of a stream of numbers. Returns -- multiplicative identity (1) when the stream is empty. The -- fold terminates when it encounters (0) in its input. -- -- Same as the following but terminates on multiplication by 0: -- --
--   >>> product = fmap Data.Monoid.getProduct $ Fold.foldMap Data.Monoid.Product
--   
product :: (Monad m, Num a, Eq a) => Fold m a a -- | Compute a numerically stable arithmetic mean of all elements in the -- input stream. mean :: (Monad m, Fractional a) => Fold m a a -- | Compute an Int sized polynomial rolling hash of a stream. -- --
--   >>> rollingHash = Fold.rollingHashWithSalt Fold.defaultSalt
--   
rollingHash :: (Monad m, Enum a) => Fold m a Int64 -- | Compute an Int sized polynomial rolling hash -- --
--   H = salt * k ^ n + c1 * k ^ (n - 1) + c2 * k ^ (n - 2) + ... + cn * k ^ 0
--   
-- -- Where c1, c2, cn are the elements in the -- input stream and k is a constant. -- -- This hash is often used in Rabin-Karp string search algorithm. -- -- See https://en.wikipedia.org/wiki/Rolling_hash rollingHashWithSalt :: (Monad m, Enum a) => Int64 -> Fold m a Int64 -- | Folds the input stream to a list. -- -- Warning! working on large lists accumulated as buffers in -- memory could be very inefficient, consider using -- Streamly.Data.Array instead. -- --
--   >>> toList = Fold.foldr' (:) []
--   
toList :: Monad m => Fold m a [a] -- | Buffers the input stream to a list in the reverse order of the input. -- -- Definition: -- --
--   >>> toListRev = Fold.foldl' (flip (:)) []
--   
-- -- Warning! working on large lists accumulated as buffers in -- memory could be very inefficient, consider using Streamly.Array -- instead. toListRev :: Monad m => Fold m a [a] -- | Fold the input to a set. -- -- Definition: -- --
--   >>> toSet = Fold.foldl' (flip Set.insert) Set.empty
--   
toSet :: (Monad m, Ord a) => Fold m a (Set a) -- | Fold the input to an int set. For integer inputs this performs better -- than toSet. -- -- Definition: -- --
--   >>> toIntSet = Fold.foldl' (flip IntSet.insert) IntSet.empty
--   
toIntSet :: Monad m => Fold m Int IntSet -- | Get the top n elements using the supplied comparison -- function. -- -- To get bottom n elements instead: -- --
--   >>> bottomBy cmp = Fold.topBy (flip cmp)
--   
-- -- Example: -- --
--   >>> stream = Stream.fromList [2::Int,7,9,3,1,5,6,11,17]
--   
--   >>> Stream.fold (Fold.topBy compare 3) stream >>= MutArray.toList
--   [17,11,9]
--   
-- -- Pre-release topBy :: (MonadIO m, Unbox a) => (a -> a -> Ordering) -> Int -> Fold m a (MutArray a) -- | Returns the latest element of the input stream, if any. -- --
--   >>> latest = Fold.foldl1' (\_ x -> x)
--   
--   >>> latest = fmap getLast $ Fold.foldMap (Last . Just)
--   
latest :: Monad m => Fold m a (Maybe a) -- | Determine the maximum element in a stream using the supplied -- comparison function. maximumBy :: Monad m => (a -> a -> Ordering) -> Fold m a (Maybe a) -- | Determine the maximum element in a stream. -- -- Definitions: -- --
--   >>> maximum = Fold.maximumBy compare
--   
--   >>> maximum = Fold.foldl1' max
--   
-- -- Same as the following but without a default maximum. The Max -- Monoid uses the minBound as the default maximum: -- --
--   >>> maximum = fmap Data.Semigroup.getMax $ Fold.foldMap Data.Semigroup.Max
--   
maximum :: (Monad m, Ord a) => Fold m a (Maybe a) -- | Computes the minimum element with respect to the given comparison -- function minimumBy :: Monad m => (a -> a -> Ordering) -> Fold m a (Maybe a) -- | Determine the minimum element in a stream using the supplied -- comparison function. -- -- Definitions: -- --
--   >>> minimum = Fold.minimumBy compare
--   
--   >>> minimum = Fold.foldl1' min
--   
-- -- Same as the following but without a default minimum. The Min -- Monoid uses the maxBound as the default maximum: -- --
--   >>> maximum = fmap Data.Semigroup.getMin $ Fold.foldMap Data.Semigroup.Min
--   
minimum :: (Monad m, Ord a) => Fold m a (Maybe a) -- | Returns the index of the latest element if the element satisfies the -- given predicate. findIndices :: Monad m => (a -> Bool) -> Fold m a (Maybe Int) -- | Returns the index of the latest element if the element matches the -- given value. -- -- Definition: -- --
--   >>> elemIndices a = Fold.findIndices (== a)
--   
elemIndices :: (Monad m, Eq a) => a -> Fold m a (Maybe Int) -- | Returns the latest element omitting the first occurrence that -- satisfies the given equality predicate. -- -- Example: -- --
--   >>> input = Stream.fromList [1,3,3,5]
--   
--   >>> Stream.fold Fold.toList $ Stream.scanMaybe (Fold.deleteBy (==) 3) input
--   [1,3,5]
--   
deleteBy :: Monad m => (a -> a -> Bool) -> a -> Fold m a (Maybe a) -- | Return the latest unique element using the supplied comparison -- function. Returns Nothing if the current element is same as the -- last element otherwise returns Just. -- -- Example, strip duplicate path separators: -- --
--   >>> input = Stream.fromList "//a//b"
--   
--   >>> f x y = x == '/' && y == '/'
--   
--   >>> Stream.fold Fold.toList $ Stream.scanMaybe (Fold.uniqBy f) input
--   "/a/b"
--   
-- -- Space: O(1) -- -- Pre-release uniqBy :: Monad m => (a -> a -> Bool) -> Fold m a (Maybe a) -- | Used as a scan. Returns Just for the first occurrence of an -- element, returns Nothing for any other occurrences. -- -- Example: -- --
--   >>> stream = Stream.fromList [1::Int,1,2,3,4,4,5,1,5,7]
--   
--   >>> Stream.fold Fold.toList $ Stream.scanMaybe Fold.nub stream
--   [1,2,3,4,5,7]
--   
-- -- Pre-release nub :: (Monad m, Ord a) => Fold m a (Maybe a) -- | Like nub but specialized to a stream of Int, for better -- performance. -- -- Pre-release nubInt :: Monad m => Fold m Int (Maybe Int) -- | Take one element from the stream and stop. -- -- Definition: -- --
--   >>> one = Fold.maybe Just
--   
-- -- This is similar to the stream uncons operation. one :: Monad m => Fold m a (Maybe a) -- | Consume one element, return True if successful else return -- False. In other words, test if the input is empty or not. -- -- WARNING! It consumes one element if the stream is not empty. If that -- is not what you want please use the eof parser instead. -- -- Definition: -- --
--   >>> null = fmap isJust Fold.one
--   
null :: Monad m => Fold m a Bool -- | Return the element at the given index. -- -- Definition: -- --
--   >>> index = Fold.indexGeneric
--   
index :: Monad m => Int -> Fold m a (Maybe a) -- | Terminates with Nothing as soon as it finds an element -- different than the previous one, returns the element if the -- entire input consists of the same element. the :: (Monad m, Eq a) => Fold m a (Maybe a) -- | Returns the first element that satisfies the given predicate. find :: Monad m => (a -> Bool) -> Fold m a (Maybe a) -- | Returns the first element that satisfies the given predicate. -- -- Pre-release findM :: Monad m => (a -> m Bool) -> Fold m a (Maybe a) -- | In a stream of (key-value) pairs (a, b), return the value -- b of the first pair where the key equals the given value -- a. -- -- Definition: -- --
--   >>> lookup x = fmap snd <$> Fold.find ((== x) . fst)
--   
lookup :: (Eq a, Monad m) => a -> Fold m (a, b) (Maybe b) -- | Returns the first index that satisfies the given predicate. findIndex :: Monad m => (a -> Bool) -> Fold m a (Maybe Int) -- | Returns the first index where a given value is found in the stream. -- -- Definition: -- --
--   >>> elemIndex a = Fold.findIndex (== a)
--   
elemIndex :: (Eq a, Monad m) => a -> Fold m a (Maybe Int) -- | Return True if the given element is present in the stream. -- -- Definition: -- --
--   >>> elem a = Fold.any (== a)
--   
elem :: (Eq a, Monad m) => a -> Fold m a Bool -- | Returns True if the given element is not present in the stream. -- -- Definition: -- --
--   >>> notElem a = Fold.all (/= a)
--   
notElem :: (Eq a, Monad m) => a -> Fold m a Bool -- | Returns True if all elements of the input satisfy the -- predicate. -- -- Definition: -- --
--   >>> all p = Fold.lmap p Fold.and
--   
-- -- Example: -- --
--   >>> Stream.fold (Fold.all (== 0)) $ Stream.fromList [1,0,1]
--   False
--   
all :: Monad m => (a -> Bool) -> Fold m a Bool -- | Returns True if any element of the input satisfies the -- predicate. -- -- Definition: -- --
--   >>> any p = Fold.lmap p Fold.or
--   
-- -- Example: -- --
--   >>> Stream.fold (Fold.any (== 0)) $ Stream.fromList [1,0,1]
--   True
--   
any :: Monad m => (a -> Bool) -> Fold m a Bool -- | Returns True if all elements are True, False -- otherwise -- -- Definition: -- --
--   >>> and = Fold.all (== True)
--   
and :: Monad m => Fold m Bool Bool -- | Returns True if any element is True, False -- otherwise -- -- Definition: -- --
--   >>> or = Fold.any (== True)
--   
or :: Monad m => Fold m Bool Bool -- | Append a singleton value to the fold. -- -- See examples under addStream. -- -- Pre-release addOne :: Monad m => a -> Fold m a b -> m (Fold m a b) -- | Append a stream to a fold to build the fold accumulator incrementally. -- We can repeatedly call addStream on the same fold to continue -- building the fold and finally use drive to finish the fold and -- extract the result. Also see the addOne operation which is a -- singleton version of addStream. -- -- Definitions: -- --
--   >>> addStream stream = Fold.drive stream . Fold.duplicate
--   
-- -- Example, build a list incrementally: -- --
--   >>> :{
--   pure (Fold.toList :: Fold IO Int [Int])
--       >>= Fold.addOne 1
--       >>= Fold.addStream (Stream.enumerateFromTo 2 4)
--       >>= Fold.drive Stream.nil
--       >>= print
--   :}
--   [1,2,3,4]
--   
-- -- This can be used as an O(n) list append compared to the O(n^2) -- ++ when used for incrementally building a list. -- -- Example, build a stream incrementally: -- --
--   >>> :{
--   pure (Fold.toStream :: Fold IO Int (Stream Identity Int))
--       >>= Fold.addOne 1
--       >>= Fold.addStream (Stream.enumerateFromTo 2 4)
--       >>= Fold.drive Stream.nil
--       >>= print
--   :}
--   fromList [1,2,3,4]
--   
-- -- This can be used as an O(n) stream append compared to the O(n^2) -- <> when used for incrementally building a stream. -- -- Example, build an array incrementally: -- --
--   >>> :{
--   pure (Array.write :: Fold IO Int (Array Int))
--       >>= Fold.addOne 1
--       >>= Fold.addStream (Stream.enumerateFromTo 2 4)
--       >>= Fold.drive Stream.nil
--       >>= print
--   :}
--   fromList [1,2,3,4]
--   
-- -- Example, build an array stream incrementally: -- --
--   >>> :{
--   let f :: Fold IO Int (Stream Identity (Array Int))
--       f = Fold.groupsOf 2 (Array.writeN 3) Fold.toStream
--   in pure f
--       >>= Fold.addOne 1
--       >>= Fold.addStream (Stream.enumerateFromTo 2 4)
--       >>= Fold.drive Stream.nil
--       >>= print
--   :}
--   fromList [fromList [1,2],fromList [3,4]]
--   
addStream :: Monad m => Stream m a -> Fold m a b -> m (Fold m a b) -- | duplicate provides the ability to run a fold in parts. The -- duplicated fold consumes the input and returns the same fold as output -- instead of returning the final result, the returned fold can be run -- later to consume more input. -- -- duplicate essentially appends a stream to the fold without -- finishing the fold. Compare with snoc which appends a singleton -- value to the fold. -- -- Pre-release duplicate :: Monad m => Fold m a b -> Fold m a (Fold m a b) -- | Map a monadic function on the output of a fold. rmapM :: Monad m => (b -> m c) -> Fold m a b -> Fold m a c -- | lmap f fold maps the function f on the input of the -- fold. -- -- Definition: -- --
--   >>> lmap = Fold.lmapM return
--   
-- -- Example: -- --
--   >>> sumSquared = Fold.lmap (\x -> x * x) Fold.sum
--   
--   >>> Stream.fold sumSquared (Stream.enumerateFromTo 1 100)
--   338350
--   
lmap :: (a -> b) -> Fold m b r -> Fold m a r -- | lmapM f fold maps the monadic function f on the -- input of the fold. lmapM :: Monad m => (a -> m b) -> Fold m b r -> Fold m a r -- | Scan the input of a Fold to change it in a stateful manner -- using another Fold. The scan stops as soon as the fold -- terminates. -- -- Pre-release scan :: Monad m => Fold m a b -> Fold m b c -> Fold m a c -- | Postscan the input of a Fold to change it in a stateful manner -- using another Fold. -- --
--   postscan scanner collector
--   
-- -- Pre-release postscan :: Monad m => Fold m a b -> Fold m b c -> Fold m a c -- | Use a Maybe returning fold as a filtering scan. -- --
--   >>> scanMaybe p f = Fold.postscan p (Fold.catMaybes f)
--   
-- -- Pre-release scanMaybe :: Monad m => Fold m a (Maybe b) -> Fold m b c -> Fold m a c -- | Include only those elements that pass a predicate. -- --
--   >>> Stream.fold (Fold.filter (> 5) Fold.sum) $ Stream.fromList [1..10]
--   40
--   
-- --
--   >>> filter p = Fold.scanMaybe (Fold.filtering p)
--   
--   >>> filter p = Fold.filterM (return . p)
--   
--   >>> filter p = Fold.mapMaybe (\x -> if p x then Just x else Nothing)
--   
filter :: Monad m => (a -> Bool) -> Fold m a r -> Fold m a r -- | Like filter but with a monadic predicate. -- --
--   >>> f p x = p x >>= \r -> return $ if r then Just x else Nothing
--   
--   >>> filterM p = Fold.mapMaybeM (f p)
--   
filterM :: Monad m => (a -> m Bool) -> Fold m a r -> Fold m a r -- | mapMaybe f fold maps a Maybe returning function -- f on the input of the fold, filters out Nothing -- elements, and return the values extracted from Just. -- --
--   >>> mapMaybe f = Fold.lmap f . Fold.catMaybes
--   
--   >>> mapMaybe f = Fold.mapMaybeM (return . f)
--   
-- --
--   >>> f x = if even x then Just x else Nothing
--   
--   >>> fld = Fold.mapMaybe f Fold.toList
--   
--   >>> Stream.fold fld (Stream.enumerateFromTo 1 10)
--   [2,4,6,8,10]
--   
mapMaybe :: Monad m => (a -> Maybe b) -> Fold m b r -> Fold m a r -- | Modify a fold to receive a Maybe input, the Just values -- are unwrapped and sent to the original fold, Nothing values are -- discarded. -- --
--   >>> catMaybes = Fold.mapMaybe id
--   
--   >>> catMaybes = Fold.filter isJust . Fold.lmap fromJust
--   
catMaybes :: Monad m => Fold m a b -> Fold m (Maybe a) b -- | Discard Rights and unwrap Lefts in an Either -- stream. -- -- Pre-release catLefts :: Monad m => Fold m a c -> Fold m (Either a b) c -- | Discard Lefts and unwrap Rights in an Either -- stream. -- -- Pre-release catRights :: Monad m => Fold m b c -> Fold m (Either a b) c -- | Remove the either wrapper and flatten both lefts and as well as rights -- in the output stream. -- -- Definition: -- --
--   >>> catEithers = Fold.lmap (either id id)
--   
-- -- Pre-release catEithers :: Fold m a b -> Fold m (Either a a) b -- | Take at most n input elements and fold them using the -- supplied fold. A negative count is treated as 0. -- --
--   >>> Stream.fold (Fold.take 2 Fold.toList) $ Stream.fromList [1..10]
--   [1,2]
--   
take :: Monad m => Int -> Fold m a b -> Fold m a b -- | Take the input, stop when the predicate succeeds taking the succeeding -- element as well. -- -- Example: -- --
--   >>> input = Stream.fromList "hello\nthere\n"
--   
--   >>> line = Fold.takeEndBy (== '\n') Fold.toList
--   
--   >>> Stream.fold line input
--   "hello\n"
--   
-- --
--   >>> Stream.fold Fold.toList $ Stream.foldMany line input
--   ["hello\n","there\n"]
--   
takeEndBy :: Monad m => (a -> Bool) -> Fold m a b -> Fold m a b -- | Like takeEndBy but drops the element on which the predicate -- succeeds. -- -- Example: -- --
--   >>> input = Stream.fromList "hello\nthere\n"
--   
--   >>> line = Fold.takeEndBy_ (== '\n') Fold.toList
--   
--   >>> Stream.fold line input
--   "hello"
--   
-- --
--   >>> Stream.fold Fold.toList $ Stream.foldMany line input
--   ["hello","there"]
--   
takeEndBy_ :: Monad m => (a -> Bool) -> Fold m a b -> Fold m a b -- | Sequential fold application. Apply two folds sequentially to an input -- stream. The input is provided to the first fold, when it is done - the -- remaining input is provided to the second fold. When the second fold -- is done or if the input stream is over, the outputs of the two folds -- are combined using the supplied function. -- -- Example: -- --
--   >>> header = Fold.take 8 Fold.toList
--   
--   >>> line = Fold.takeEndBy (== '\n') Fold.toList
--   
--   >>> f = Fold.splitWith (,) header line
--   
--   >>> Stream.fold f $ Stream.fromList "header: hello\n"
--   ("header: ","hello\n")
--   
-- -- Note: This is dual to appending streams using append. -- -- Note: this implementation allows for stream fusion but has quadratic -- time complexity, because each composition adds a new branch that each -- subsequent fold's input element has to traverse, therefore, it cannot -- scale to a large number of compositions. After around 100 compositions -- the performance starts dipping rapidly compared to a CPS style -- implementation. -- -- For larger number of compositions you can convert the fold to a parser -- and use ParserK. -- -- Time: O(n^2) where n is the number of compositions. splitWith :: Monad m => (a -> b -> c) -> Fold m x a -> Fold m x b -> Fold m x c -- | Collect zero or more applications of a fold. many first -- second applies the first fold repeatedly on the input -- stream and accumulates it's results using the second fold. -- --
--   >>> two = Fold.take 2 Fold.toList
--   
--   >>> twos = Fold.many two Fold.toList
--   
--   >>> Stream.fold twos $ Stream.fromList [1..10]
--   [[1,2],[3,4],[5,6],[7,8],[9,10]]
--   
-- -- Stops when second fold stops. -- -- See also: concatMap, foldMany many :: Monad m => Fold m a b -> Fold m b c -> Fold m a c -- | groupsOf n split collect repeatedly applies the -- split fold to chunks of n items in the input stream -- and supplies the result to the collect fold. -- -- Definition: -- --
--   >>> groupsOf n split = Fold.many (Fold.take n split)
--   
-- -- Example: -- --
--   >>> twos = Fold.groupsOf 2 Fold.toList Fold.toList
--   
--   >>> Stream.fold twos $ Stream.fromList [1..10]
--   [[1,2],[3,4],[5,6],[7,8],[9,10]]
--   
-- -- Stops when collect stops. groupsOf :: Monad m => Int -> Fold m a b -> Fold m b c -> Fold m a c -- | teeWith k f1 f2 distributes its input to both f1 and -- f2 until both of them terminate and combines their output -- using k. -- -- Definition: -- --
--   >>> teeWith k f1 f2 = fmap (uncurry k) (Fold.tee f1 f2)
--   
-- -- Example: -- --
--   >>> avg = Fold.teeWith (/) Fold.sum (fmap fromIntegral Fold.length)
--   
--   >>> Stream.fold avg $ Stream.fromList [1.0..100.0]
--   50.5
--   
-- -- For applicative composition using this combinator see -- Streamly.Data.Fold.Tee. -- -- See also: Streamly.Data.Fold.Tee -- -- Note that nested applications of teeWith do not fuse. teeWith :: Monad m => (a -> b -> c) -> Fold m x a -> Fold m x b -> Fold m x c -- | Distribute one copy of the stream to each fold and zip the results. -- --
--                   |-------Fold m a b--------|
--   ---stream m a---|                         |---m (b,c)
--                   |-------Fold m a c--------|
--   
-- -- Definition: -- --
--   >>> tee = Fold.teeWith (,)
--   
-- -- Example: -- --
--   >>> t = Fold.tee Fold.sum Fold.length
--   
--   >>> Stream.fold t (Stream.enumerateFromTo 1.0 100.0)
--   (5050.0,100)
--   
tee :: Monad m => Fold m a b -> Fold m a c -> Fold m a (b, c) -- | Distribute one copy of the stream to each fold and collect the results -- in a container. -- --
--                   |-------Fold m a b--------|
--   ---stream m a---|                         |---m [b]
--                   |-------Fold m a b--------|
--                   |                         |
--                              ...
--   
-- --
--   >>> Stream.fold (Fold.distribute [Fold.sum, Fold.length]) (Stream.enumerateFromTo 1 5)
--   [15,5]
--   
-- --
--   >>> distribute = Prelude.foldr (Fold.teeWith (:)) (Fold.fromPure [])
--   
-- -- This is the consumer side dual of the producer side sequence -- operation. -- -- Stops when all the folds stop. distribute :: Monad m => [Fold m a b] -> Fold m a [b] -- | Compose two folds such that the combined fold accepts a stream of -- Either and routes the Left values to the first fold and -- Right values to the second fold. -- -- Definition: -- --
--   >>> partition = Fold.partitionBy id
--   
partition :: Monad m => Fold m b x -> Fold m c y -> Fold m (Either b c) (x, y) -- | Split the input stream based on a key field and fold each split using -- the given fold. Useful for map/reduce, bucketizing the input in -- different bins or for generating histograms. -- -- Example: -- --
--   >>> import Data.Map.Strict (Map)
--   
--   >>> :{
--    let input = Stream.fromList [("ONE",1),("ONE",1.1),("TWO",2), ("TWO",2.2)]
--        classify = Fold.toMap fst (Fold.lmap snd Fold.toList)
--     in Stream.fold classify input :: IO (Map String [Double])
--   :}
--   fromList [("ONE",[1.0,1.1]),("TWO",[2.0,2.2])]
--   
-- -- Once the classifier fold terminates for a particular key any further -- inputs in that bucket are ignored. -- -- Space used is proportional to the number of keys seen till now and -- monotonically increases because it stores whether a key has been seen -- or not. -- -- See demuxToMap for a more powerful version where you can use a -- different fold for each key. A simpler version of toMap -- retaining only the last value for a key can be written as: -- --
--   >>> toMap = Fold.foldl' (\kv (k, v) -> Map.insert k v kv) Map.empty
--   
-- -- Stops: never -- -- Pre-release toMap :: (Monad m, Ord k) => (a -> k) -> Fold m a b -> Fold m a (Map k b) -- | Same as toMap but maybe faster because it uses mutable cells as -- fold accumulators in the Map. toMapIO :: (MonadIO m, Ord k) => (a -> k) -> Fold m a b -> Fold m a (Map k b) -- | This collects all the results of demux in a Map. demuxToMap :: (Monad m, Ord k) => (a -> k) -> (a -> m (Fold m a b)) -> Fold m a (Map k b) -- | Same as demuxToMap but uses demuxIO for better -- performance. demuxToMapIO :: (MonadIO m, Ord k) => (a -> k) -> (a -> m (Fold m a b)) -> Fold m a (Map k b) -- | Folds the values for each key using the supplied fold. When scanning, -- as soon as the fold is complete, its result is available in the second -- component of the tuple. The first component of the tuple is a snapshot -- of the in-progress folds. -- -- Once the fold for a key is done, any future values of the key are -- ignored. -- -- Definition: -- --
--   >>> classify f fld = Fold.demux f (const fld)
--   
classify :: (Monad m, Ord k) => (a -> k) -> Fold m a b -> Fold m a (m (Map k b), Maybe (k, b)) -- | Same as classify except that it uses mutable IORef cells in the Map -- providing better performance. Be aware that if this is used as a scan, -- the values in the intermediate Maps would be mutable. -- -- Definitions: -- --
--   >>> classifyIO f fld = Fold.demuxIO f (const fld)
--   
classifyIO :: (MonadIO m, Ord k) => (a -> k) -> Fold m a b -> Fold m a (m (Map k b), Maybe (k, b)) -- | demux getKey getFold: In a key value stream, fold values -- corresponding to each key using a key specific fold. getFold -- is invoked to generate a key specific fold when a key is encountered -- for the first time in the stream. -- -- The first component of the output tuple is a key-value Map of -- in-progress folds. The fold returns the fold result as the second -- component of the output tuple whenever a fold terminates. -- -- If a fold terminates, another instance of the fold is started upon -- receiving an input with that key, getFold is invoked again -- whenever the key is encountered again. -- -- This can be used to scan a stream and collect the results from the -- scan output. -- -- Since the fold generator function is monadic we can add folds -- dynamically. For example, we can maintain a Map of keys to folds in an -- IORef and lookup the fold from that corresponding to a key. This Map -- can be changed dynamically, folds for new keys can be added or folds -- for old keys can be deleted or modified. -- -- Compare with classify, the fold in classify is a static -- fold. -- -- Pre-release demux :: (Monad m, Ord k) => (a -> k) -> (a -> m (Fold m a b)) -> Fold m a (m (Map k b), Maybe (k, b)) -- | This is specialized version of demux that uses mutable IO cells -- as fold accumulators for better performance. -- -- Keep in mind that the values in the returned Map may be changed by the -- ongoing fold if you are using those concurrently in another thread. demuxIO :: (MonadIO m, Ord k) => (a -> k) -> (a -> m (Fold m a b)) -> Fold m a (m (Map k b), Maybe (k, b)) -- | Send the elements of tuples in a stream of tuples through two -- different folds. -- --
--                             |-------Fold m a x--------|
--   ---------stream of (a,b)--|                         |----m (x,y)
--                             |-------Fold m b y--------|
--   
-- -- Definition: -- --
--   >>> unzip = Fold.unzipWith id
--   
-- -- This is the consumer side dual of the producer side zip -- operation. unzip :: Monad m => Fold m a x -> Fold m b y -> Fold m (a, b) (x, y) -- | Map a Fold returning function on the result of a Fold -- and run the returned fold. This operation can be used to express data -- dependencies between fold operations. -- -- Let's say the first element in the stream is a count of the following -- elements that we have to add, then: -- --
--   >>> import Data.Maybe (fromJust)
--   
--   >>> count = fmap fromJust Fold.one
--   
--   >>> total n = Fold.take n Fold.sum
--   
--   >>> Stream.fold (Fold.concatMap total count) $ Stream.fromList [10,9..1]
--   45
--   
-- -- This does not fuse completely, see refold for a fusible -- alternative. -- -- Time: O(n^2) where n is the number of compositions. -- -- See also: foldIterateM, refold concatMap :: Monad m => (b -> Fold m a c) -> Fold m a b -> Fold m a c -- | Change the underlying monad of a fold. Also known as hoist. -- -- Pre-release morphInner :: (forall x. m x -> n x) -> Fold m a b -> Fold n a b -- | Deprecated: Please use groupsOf instead chunksOf :: Monad m => Int -> Fold m a b -> Fold m b c -> Fold m a c -- | Deprecated: Please use foldr' instead. foldr :: Monad m => (a -> b -> b) -> b -> Fold m a b -- | Deprecated: Please use drainMapM instead. drainBy :: Monad m => (a -> m b) -> Fold m a () -- | Deprecated: Please use latest instead. last :: Monad m => Fold m a (Maybe a) -- | Extract the first element of the stream, if any. -- --
--   >>> head = Fold.one
--   
-- | Deprecated: Please use "one" instead head :: Monad m => Fold m a (Maybe a) -- | Flatten the monadic output of a fold to pure output. -- | Deprecated: Use "rmapM id" instead sequence :: Monad m => Fold m a (m b) -> Fold m a b -- | Map a monadic function on the output of a fold. -- | Deprecated: Use rmapM instead mapM :: Monad m => (b -> m c) -> Fold m a b -> Fold m a c -- | Compute a numerically stable (population) variance over all elements -- in the input stream. -- | Deprecated: Use the streamly-statistics package instead variance :: (Monad m, Fractional a) => Fold m a a -- | Compute a numerically stable (population) standard deviation over all -- elements in the input stream. -- | Deprecated: Use the streamly-statistics package instead stdDev :: (Monad m, Floating a) => Fold m a a -- | Deprecated: Please use "splitWith" instead serialWith :: Monad m => (a -> b -> c) -> Fold m x a -> Fold m x b -> Fold m x c module Streamly.Internal.Data.Array.Generic data Array a Array :: MutableArray# RealWorld a -> {-# UNPACK #-} !Int -> {-# UNPACK #-} !Int -> Array a -- | The internal contents of the array representing the entire array. [arrContents#] :: Array a -> MutableArray# RealWorld a -- | The starting index of this slice. [arrStart] :: Array a -> {-# UNPACK #-} !Int -- | The length of this slice. [arrLen] :: Array a -> {-# UNPACK #-} !Int nil :: Array a createOf :: MonadIO m => Int -> Fold m a (Array a) -- | Fold the whole input to a single array. -- -- Caution! Do not use this on infinite streams. create :: MonadIO m => Fold m a (Array a) writeWith :: MonadIO m => Int -> Fold m a (Array a) writeLastN :: MonadIO m => Int -> Fold m a (Array a) fromStreamN :: MonadIO m => Int -> Stream m a -> m (Array a) fromStream :: MonadIO m => Stream m a -> m (Array a) fromPureStream :: Stream Identity a -> Array a fromByteStr# :: Addr# -> Array Word8 fromListN :: Int -> [a] -> Array a fromList :: [a] -> Array a chunksOf :: forall m a. MonadIO m => Int -> Stream m a -> Stream m (Array a) length :: Array a -> Int reader :: Monad m => Unfold m (Array a) a toList :: Array a -> [a] read :: Monad m => Array a -> Stream m a readRev :: Monad m => Array a -> Stream m a foldl' :: (b -> a -> b) -> b -> Array a -> b foldr :: (a -> b -> b) -> b -> Array a -> b streamFold :: Monad m => (Stream m a -> m b) -> Array a -> m b fold :: Monad m => Fold m a b -> Array a -> m b -- | O(1) Lookup the element at the given index. Index starts from -- 0. Does not check the bounds. getIndexUnsafe :: Int -> Array a -> a -- | Lookup the element at the given index. Index starts from 0. getIndex :: Int -> Array a -> Maybe a getSliceUnsafe :: Int -> Int -> Array a -> Array a -- | Truncate the array at the beginning and end as long as the predicate -- holds true. Returns a slice of the original array. strip :: (a -> Bool) -> Array a -> Array a writeN :: MonadIO m => Int -> Fold m a (Array a) write :: MonadIO m => Fold m a (Array a) instance GHC.Classes.Eq a => GHC.Classes.Eq (Streamly.Internal.Data.Array.Generic.Array a) instance GHC.Classes.Ord a => GHC.Classes.Ord (Streamly.Internal.Data.Array.Generic.Array a) instance GHC.Show.Show a => GHC.Show.Show (Streamly.Internal.Data.Array.Generic.Array a) instance GHC.Read.Read a => GHC.Read.Read (Streamly.Internal.Data.Array.Generic.Array a) -- | Direct style re-implementation of CPS stream in -- Streamly.Internal.Data.StreamK. The symbol or suffix D -- in this module denotes the Direct style. GHC is able to INLINE -- and fuse direct style better, providing better performance than CPS -- implementation. -- --
--   import qualified Streamly.Internal.Data.Stream as D
--   
module Streamly.Internal.Data.Stream -- | A stream is a succession of Steps. A Yield produces a -- single value and the next state of the stream. Stop indicates -- there are no more values in the stream. data Step s a Yield :: a -> s -> Step s a Skip :: s -> Step s a Stop :: Step s a -- | A stream consists of a step function that generates the next step -- given a current state, and the current state. data Stream m a UnStream :: (State StreamK m a -> s -> m (Step s a)) -> s -> Stream m a pattern Stream :: (State StreamK m a -> s -> m (Step s a)) -> s -> Stream m a -- | A newtype wrapper for the Stream type with a cross product -- style monad instance. -- -- A Monad bind behaves like a for loop: -- --
--   >>> :{
--   Stream.fold Fold.toList $ Stream.unCross $ do
--       x <- Stream.mkCross $ Stream.fromList [1,2]
--       -- Perform the following actions for each x in the stream
--       return x
--   :}
--   [1,2]
--   
-- -- Nested monad binds behave like nested for loops: -- --
--   >>> :{
--   Stream.fold Fold.toList $ Stream.unCross $ do
--       x <- Stream.mkCross $ Stream.fromList [1,2]
--       y <- Stream.mkCross $ Stream.fromList [3,4]
--       -- Perform the following actions for each x, for each y
--       return (x, y)
--   :}
--   [(1,3),(1,4),(2,3),(2,4)]
--   
data CrossStream m a unCross :: CrossStream m a -> Stream m a mkCross :: Stream m a -> CrossStream m a -- | Convert a CPS encoded StreamK to direct style step encoded StreamD fromStreamK :: Applicative m => StreamK m a -> Stream m a -- | Convert a direct style step encoded StreamD to a CPS encoded StreamK toStreamK :: Monad m => Stream m a -> StreamK m a -- | Convert an Unfold into a stream by supplying it an input seed. -- --
--   >>> s = Stream.unfold Unfold.replicateM (3, putStrLn "hello")
--   
--   >>> Stream.fold Fold.drain s
--   hello
--   hello
--   hello
--   
unfold :: Applicative m => Unfold m a b -> a -> Stream m b -- | A stream that terminates without producing any output, but produces a -- side effect. -- --
--   >>> Stream.fold Fold.toList (Stream.nilM (print "nil"))
--   "nil"
--   []
--   
-- -- Pre-release nilM :: Applicative m => m b -> Stream m a -- | Like cons but fuses an effect instead of a pure value. consM :: Applicative m => m a -> Stream m a -> Stream m a infixr 5 `consM` -- | Create a singleton stream from a pure value. -- --
--   >>> fromPure a = a `Stream.cons` Stream.nil
--   
--   >>> fromPure = pure
--   
--   >>> fromPure = Stream.fromEffect . pure
--   
fromPure :: Applicative m => a -> Stream m a -- | Create a singleton stream from a monadic action. -- --
--   >>> fromEffect m = m `Stream.consM` Stream.nil
--   
--   >>> fromEffect = Stream.sequence . Stream.fromPure
--   
-- --
--   >>> Stream.fold Fold.drain $ Stream.fromEffect (putStrLn "hello")
--   hello
--   
fromEffect :: Applicative m => m a -> Stream m a -- | Construct a stream from a list of pure values. fromList :: Applicative m => [a] -> Stream m a -- | Decompose a stream into its head and tail. If the stream is empty, -- returns Nothing. If the stream is non-empty, returns Just -- (a, ma), where a is the head of the stream and -- ma its tail. -- -- Properties: -- --
--   >>> Nothing <- Stream.uncons Stream.nil
--   
--   >>> Just ("a", t) <- Stream.uncons (Stream.cons "a" Stream.nil)
--   
-- -- This can be used to consume the stream in an imperative manner one -- element at a time, as it just breaks down the stream into individual -- elements and we can loop over them as we deem fit. For example, this -- can be used to convert a streamly stream into other stream types. -- -- All the folds in this module can be expressed in terms of -- uncons, however, this is generally less efficient than specific -- folds because it takes apart the stream one element at a time, -- therefore, does not take adavantage of stream fusion. -- -- foldBreak is a more general way of consuming a stream -- piecemeal. -- --
--   >>> :{
--   uncons xs = do
--       r <- Stream.foldBreak Fold.one xs
--       return $ case r of
--           (Nothing, _) -> Nothing
--           (Just h, t) -> Just (h, t)
--   :}
--   
uncons :: Monad m => Stream m a -> m (Maybe (a, Stream m a)) -- | Fold a stream using the supplied left Fold and reducing the -- resulting expression strictly at each step. The behavior is similar to -- foldl'. A Fold can terminate early without consuming the -- full stream. See the documentation of individual Folds for -- termination behavior. -- -- Definitions: -- --
--   >>> fold f = fmap fst . Stream.foldBreak f
--   
--   >>> fold f = Stream.parse (Parser.fromFold f)
--   
-- -- Example: -- --
--   >>> Stream.fold Fold.sum (Stream.enumerateFromTo 1 100)
--   5050
--   
fold :: Monad m => Fold m a b -> Stream m a -> m b -- | Like fold but also returns the remaining stream. The resulting -- stream would be nil if the stream finished before the fold. foldBreak :: Monad m => Fold m a b -> Stream m a -> m (b, Stream m a) -- | Append a stream to a fold lazily to build an accumulator -- incrementally. -- -- Example, to continue folding a list of streams on the same sum fold: -- --
--   >>> streams = [Stream.fromList [1..5], Stream.fromList [6..10]]
--   
--   >>> f = Prelude.foldl Stream.foldAddLazy Fold.sum streams
--   
--   >>> Stream.fold f Stream.nil
--   55
--   
foldAddLazy :: Monad m => Fold m a b -> Stream m a -> Fold m a b -- |
--   >>> foldAdd = flip Fold.addStream
--   
foldAdd :: Monad m => Fold m a b -> Stream m a -> m (Fold m a b) -- | Fold resulting in either breaking the stream or continuation of the -- fold. Instead of supplying the input stream in one go we can run the -- fold multiple times, each time supplying the next segment of the input -- stream. If the fold has not yet finished it returns a fold that can be -- run again otherwise it returns the fold result and the residual -- stream. -- -- Internal foldEither :: Monad m => Fold m a b -> Stream m a -> m (Either (Fold m a b) (b, Stream m a)) foldl' :: Monad m => (b -> a -> b) -> b -> Stream m a -> m b foldlM' :: Monad m => (b -> a -> m b) -> m b -> Stream m a -> m b foldlx' :: Monad m => (x -> a -> x) -> x -> (x -> b) -> Stream m a -> m b foldlMx' :: Monad m => (x -> a -> m x) -> m x -> (x -> m b) -> Stream m a -> m b -- | Right associative/lazy pull fold. foldrM build final stream -- constructs an output structure using the step function build. -- build is invoked with the next input element and the -- remaining (lazy) tail of the output structure. It builds a lazy output -- expression using the two. When the "tail structure" in the output -- expression is evaluated it calls build again thus lazily -- consuming the input stream until either the output expression -- built by build is free of the "tail" or the input is -- exhausted in which case final is used as the terminating case -- for the output structure. For more details see the description in the -- previous section. -- -- Example, determine if any element is odd in a stream: -- --
--   >>> s = Stream.fromList (2:4:5:undefined)
--   
--   >>> step x xs = if odd x then return True else xs
--   
--   >>> Stream.foldrM step (return False) s
--   True
--   
foldrM :: Monad m => (a -> m b -> m b) -> m b -> Stream m a -> m b foldrMx :: Monad m => (a -> m x -> m x) -> m x -> (m x -> m b) -> Stream m a -> m b -- | Right fold, lazy for lazy monads and pure streams, and strict for -- strict monads. -- -- Please avoid using this routine in strict monads like IO unless you -- need a strict right fold. This is provided only for use in lazy monads -- (e.g. Identity) or pure streams. Note that with this signature it is -- not possible to implement a lazy foldr when the monad m is -- strict. In that case it would be strict in its accumulator and -- therefore would necessarily consume all its input. -- --
--   >>> foldr f z = Stream.foldrM (\a b -> f a <$> b) (return z)
--   
-- -- Note: This is similar to Fold.foldr' (the right fold via left fold), -- but could be more efficient. foldr :: Monad m => (a -> b -> b) -> b -> Stream m a -> m b foldrS :: Monad m => (a -> Stream m b -> Stream m b) -> Stream m b -> Stream m a -> Stream m b -- | Definitions: -- --
--   >>> drain = Stream.fold Fold.drain
--   
--   >>> drain = Stream.foldrM (\_ xs -> xs) (return ())
--   
-- -- Run a stream, discarding the results. drain :: Monad m => Stream m a -> m () -- | Definitions: -- --
--   >>> toList = Stream.foldr (:) []
--   
--   >>> toList = Stream.fold Fold.toList
--   
-- -- Convert a stream into a list in the underlying monad. The list can be -- consumed lazily in a lazy monad (e.g. Identity). In a strict -- monad (e.g. IO) the whole list is generated and buffered before it can -- be consumed. -- -- Warning! working on large lists accumulated as buffers in -- memory could be very inefficient, consider using -- Streamly.Data.Array instead. -- -- Note that this could a bit more efficient compared to Stream.fold -- Fold.toList, and it can fuse with pure list consumers. toList :: Monad m => Stream m a -> m [a] map :: Monad m => (a -> b) -> Stream m a -> Stream m b -- |
--   >>> mapM f = Stream.sequence . fmap f
--   
-- -- Apply a monadic function to each element of the stream and replace it -- with the output of the resulting action. -- --
--   >>> s = Stream.fromList ["a", "b", "c"]
--   
--   >>> Stream.fold Fold.drain $ Stream.mapM putStr s
--   abc
--   
mapM :: Monad m => (a -> m b) -> Stream m a -> Stream m b -- | Take first n elements from the stream and discard the rest. take :: Applicative m => Int -> Stream m a -> Stream m a -- | End the stream as soon as the predicate fails on an element. takeWhile :: Monad m => (a -> Bool) -> Stream m a -> Stream m a -- | Same as takeWhile but with a monadic predicate. takeWhileM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a takeEndBy :: Monad m => (a -> Bool) -> Stream m a -> Stream m a takeEndByM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a -- | Like zipWith but using a monadic zipping function. zipWithM :: Monad m => (a -> b -> m c) -> Stream m a -> Stream m b -> Stream m c -- | WARNING! O(n^2) time complexity wrt number of streams. Suitable for -- statically fusing a small number of streams. Use the O(n) complexity -- StreamK.zipWith otherwise. -- -- Stream a is evaluated first, followed by stream b, -- the resulting elements a and b are then zipped using -- the supplied zip function and the result c is yielded to the -- consumer. -- -- If stream a or stream b ends, the zipped stream -- ends. If stream b ends first, the element a from -- previous evaluation of stream a is discarded. -- --
--   >>> s1 = Stream.fromList [1,2,3]
--   
--   >>> s2 = Stream.fromList [4,5,6]
--   
--   >>> Stream.fold Fold.toList $ Stream.zipWith (+) s1 s2
--   [5,7,9]
--   
zipWith :: Monad m => (a -> b -> c) -> Stream m a -> Stream m b -> Stream m c -- | Apply a stream of functions to a stream of values and flatten the -- results. -- -- Note that the second stream is evaluated multiple times. -- --
--   >>> crossApply = Stream.crossWith id
--   
crossApply :: Functor f => Stream f (a -> b) -> Stream f a -> Stream f b crossApplyFst :: Functor f => Stream f a -> Stream f b -> Stream f a crossApplySnd :: Functor f => Stream f a -> Stream f b -> Stream f b -- | Definition: -- --
--   >>> crossWith f m1 m2 = fmap f m1 `Stream.crossApply` m2
--   
-- -- Note that the second stream is evaluated multiple times. crossWith :: Monad m => (a -> b -> c) -> Stream m a -> Stream m b -> Stream m c -- | Given a Stream m a and Stream m b generate a stream -- with all possible combinations of the tuple (a, b). -- -- Definition: -- --
--   >>> cross = Stream.crossWith (,)
--   
-- -- The second stream is evaluated multiple times. If that is not desired -- it can be cached in an Array and then generated from the array -- before calling this function. Caching may also improve performance if -- the stream is expensive to evaluate. -- -- See cross for a much faster fused alternative. -- -- Time: O(m x n) -- -- Pre-release cross :: Monad m => Stream m a -> Stream m b -> Stream m (a, b) data ConcatMapUState o i ConcatMapUOuter :: o -> ConcatMapUState o i ConcatMapUInner :: o -> i -> ConcatMapUState o i -- | unfoldMany unfold stream uses unfold to map the -- input stream elements to streams and then flattens the generated -- streams into a single output stream. -- -- Like concatMap but uses an Unfold for stream generation. -- Unlike concatMap this can fuse the Unfold code with the -- inner loop and therefore provide many times better performance. unfoldMany :: Monad m => Unfold m a b -> Stream m a -> Stream m b -- | Given a stream value in the underlying monad, lift and join the -- underlying monad with the stream monad. -- --
--   >>> concatEffect = Stream.concat . Stream.fromEffect
--   
--   >>> concatEffect eff = Stream.concatMapM (\() -> eff) (Stream.fromPure ())
--   
-- -- See also: concat, sequence concatEffect :: Monad m => m (Stream m a) -> Stream m a -- | Map a stream producing function on each element of the stream and then -- flatten the results into a single stream. -- --
--   >>> concatMap f = Stream.concatMapM (return . f)
--   
--   >>> concatMap f = Stream.concat . fmap f
--   
--   >>> concatMap f = Stream.unfoldMany (Unfold.lmap f Unfold.fromStream)
--   
-- -- See unfoldMany for a fusible alternative. concatMap :: Monad m => (a -> Stream m b) -> Stream m a -> Stream m b -- | Map a stream producing monadic function on each element of the stream -- and then flatten the results into a single stream. Since the stream -- generation function is monadic, unlike concatMap, it can -- produce an effect at the beginning of each iteration of the inner -- loop. -- -- See unfoldMany for a fusible alternative. concatMapM :: Monad m => (a -> m (Stream m b)) -> Stream m a -> Stream m b -- | Flatten a stream of streams to a single stream. -- --
--   >>> concat = Stream.concatMap id
--   
-- -- Pre-release concat :: Monad m => Stream m (Stream m a) -> Stream m a -- | Same as concatIterateDfs but more efficient due to stream -- fusion. -- -- Example, list a directory tree using DFS: -- --
--   >>> f = Unfold.either Dir.eitherReaderPaths Unfold.nil
--   
--   >>> input = Stream.fromPure (Left ".")
--   
--   >>> ls = Stream.unfoldIterateDfs f input
--   
-- -- Pre-release unfoldIterateDfs :: Monad m => Unfold m a a -> Stream m a -> Stream m a -- | Like unfoldIterateDfs but uses breadth first style traversal. -- -- Pre-release unfoldIterateBfs :: Monad m => Unfold m a a -> Stream m a -> Stream m a -- | Like unfoldIterateBfs but processes the children in reverse -- order, therefore, may be slightly faster. -- -- Pre-release unfoldIterateBfsRev :: Monad m => Unfold m a a -> Stream m a -> Stream m a -- | Generate a stream from an initial state, scan and concat the stream, -- generate a stream again from the final state of the previous scan and -- repeat the process. concatIterateScan :: Monad m => (b -> a -> m b) -> (b -> m (Maybe (b, Stream m a))) -> b -> Stream m a -- | Traverse the stream in depth first style (DFS). Map each element in -- the input stream to a stream and flatten, recursively map the -- resulting elements as well to a stream and flatten until no more -- streams are generated. -- -- Example, list a directory tree using DFS: -- --
--   >>> f = either (Just . Dir.readEitherPaths) (const Nothing)
--   
--   >>> input = Stream.fromPure (Left ".")
--   
--   >>> ls = Stream.concatIterateDfs f input
--   
-- -- This is equivalent to using concatIterateWith StreamK.append. -- -- Pre-release concatIterateDfs :: Monad m => (a -> Maybe (Stream m a)) -> Stream m a -> Stream m a -- | Similar to concatIterateDfs except that it traverses the stream -- in breadth first style (BFS). First, all the elements in the input -- stream are emitted, and then their traversals are emitted. -- -- Example, list a directory tree using BFS: -- --
--   >>> f = either (Just . Dir.readEitherPaths) (const Nothing)
--   
--   >>> input = Stream.fromPure (Left ".")
--   
--   >>> ls = Stream.concatIterateBfs f input
--   
-- -- Pre-release concatIterateBfs :: Monad m => (a -> Maybe (Stream m a)) -> Stream m a -> Stream m a -- | Same as concatIterateBfs except that the traversal of the last -- element on a level is emitted first and then going backwards up to the -- first element (reversed ordering). This may be slightly faster than -- concatIterateBfs. concatIterateBfsRev :: Monad m => (a -> Maybe (Stream m a)) -> Stream m a -> Stream m a data FoldMany s fs b a FoldManyStart :: s -> FoldMany s fs b a FoldManyFirst :: fs -> s -> FoldMany s fs b a FoldManyLoop :: s -> fs -> FoldMany s fs b a FoldManyYield :: b -> FoldMany s fs b a -> FoldMany s fs b a FoldManyDone :: FoldMany s fs b a data FoldManyPost s fs b a FoldManyPostStart :: s -> FoldManyPost s fs b a FoldManyPostLoop :: s -> fs -> FoldManyPost s fs b a FoldManyPostYield :: b -> FoldManyPost s fs b a -> FoldManyPost s fs b a FoldManyPostDone :: FoldManyPost s fs b a -- | Apply a Fold repeatedly on a stream and emit the results in the -- output stream. -- -- Definition: -- --
--   >>> foldMany f = Stream.parseMany (Parser.fromFold f)
--   
-- -- Example, empty stream: -- --
--   >>> f = Fold.take 2 Fold.sum
--   
--   >>> fmany = Stream.fold Fold.toList . Stream.foldMany f
--   
--   >>> fmany $ Stream.fromList []
--   []
--   
-- -- Example, last fold empty: -- --
--   >>> fmany $ Stream.fromList [1..4]
--   [3,7]
--   
-- -- Example, last fold non-empty: -- --
--   >>> fmany $ Stream.fromList [1..5]
--   [3,7,5]
--   
-- -- Note that using a closed fold e.g. Fold.take 0, would result -- in an infinite stream on a non-empty input stream. foldMany :: Monad m => Fold m a b -> Stream m a -> Stream m b -- | Like foldMany but evaluates the fold even if the fold did not -- receive any input, therefore, always results in a non-empty output -- even on an empty stream (default result of the fold). -- -- Example, empty stream: -- --
--   >>> f = Fold.take 2 Fold.sum
--   
--   >>> fmany = Stream.fold Fold.toList . Stream.foldManyPost f
--   
--   >>> fmany $ Stream.fromList []
--   [0]
--   
-- -- Example, last fold empty: -- --
--   >>> fmany $ Stream.fromList [1..4]
--   [3,7,0]
--   
-- -- Example, last fold non-empty: -- --
--   >>> fmany $ Stream.fromList [1..5]
--   [3,7,5]
--   
-- -- Note that using a closed fold e.g. Fold.take 0, would result -- in an infinite stream without consuming the input. -- -- Pre-release foldManyPost :: Monad m => Fold m a b -> Stream m a -> Stream m b -- | Group the input stream into groups of n elements each and -- then fold each group using the provided fold function. -- --
--   groupsOf n f = foldMany (FL.take n f)
--   
-- --
--   >>> Stream.toList $ Stream.groupsOf 2 Fold.sum (Stream.enumerateFromTo 1 10)
--   [3,7,11,15,19]
--   
-- -- This can be considered as an n-fold version of take where we -- apply take repeatedly on the leftover stream until the stream -- exhausts. groupsOf :: Monad m => Int -> Fold m a b -> Stream m a -> Stream m b -- | Like foldMany but for the Refold type. The supplied -- action is used as the initial value for each refold. -- -- Internal refoldMany :: Monad m => Refold m x a b -> m x -> Stream m a -> Stream m b -- | Like foldIterateM but using the Refold type instead. -- This could be much more efficient due to stream fusion. -- -- Internal refoldIterateM :: Monad m => Refold m b a b -> m b -> Stream m a -> Stream m b -- | Binary BFS style reduce, folds a level entirely using the supplied -- fold function, collecting the outputs as next level of the tree, then -- repeats the same process on the next level. The last elements of a -- previously folded level are folded first. reduceIterateBfs :: Monad m => (a -> a -> m a) -> Stream m a -> m (Maybe a) -- | N-Ary BFS style iterative fold, if the input stream finished before -- the fold then it returns Left otherwise Right. If the fold returns -- Left we terminate. -- -- Unimplemented foldIterateBfs :: Fold m a (Either a a) -> Stream m a -> m (Maybe a) -- | Like splitOnSuffix but generates a stream of (index, len) -- tuples marking the places where the predicate matches in the stream. -- -- Pre-release indexOnSuffix :: Monad m => (a -> Bool) -> Stream m a -> Stream m (Int, Int) -- | Compare two streams for equality eqBy :: Monad m => (a -> b -> Bool) -> Stream m a -> Stream m b -> m Bool -- | Compare two streams lexicographically. cmpBy :: Monad m => (a -> b -> Ordering) -> Stream m a -> Stream m b -> m Ordering -- | Deprecated: Please use indexOnSuffix instead. sliceOnSuffix :: Monad m => (a -> Bool) -> Stream m a -> Stream m (Int, Int) -- | A stream that terminates without producing any output or side effect. -- --
--   >>> Stream.toList Stream.nil
--   []
--   
nil :: Applicative m => Stream m a -- | A stream that terminates without producing any output, but produces a -- side effect. -- --
--   >>> Stream.fold Fold.toList (Stream.nilM (print "nil"))
--   "nil"
--   []
--   
-- -- Pre-release nilM :: Applicative m => m b -> Stream m a -- | WARNING! O(n^2) time complexity wrt number of elements. Use the O(n) -- complexity StreamK.cons unless you want to statically fuse just -- a few elements. -- -- Fuse a pure value at the head of an existing stream:: -- --
--   >>> s = 1 `Stream.cons` Stream.fromList [2,3]
--   
--   >>> Stream.toList s
--   [1,2,3]
--   
-- -- Definition: -- --
--   >>> cons x xs = return x `Stream.consM` xs
--   
cons :: Applicative m => a -> Stream m a -> Stream m a infixr 5 `cons` -- | Like cons but fuses an effect instead of a pure value. consM :: Applicative m => m a -> Stream m a -> Stream m a infixr 5 `consM` -- | Convert an Unfold into a stream by supplying it an input seed. -- --
--   >>> s = Stream.unfold Unfold.replicateM (3, putStrLn "hello")
--   
--   >>> Stream.fold Fold.drain s
--   hello
--   hello
--   hello
--   
unfold :: Applicative m => Unfold m a b -> a -> Stream m b -- | Build a stream by unfolding a pure step function step -- starting from a seed s. The step function returns the next -- element in the stream and the next seed value. When it is done it -- returns Nothing and the stream ends. For example, -- --
--   >>> :{
--   let f b =
--           if b > 2
--           then Nothing
--           else Just (b, b + 1)
--   in Stream.toList $ Stream.unfoldr f 0
--   :}
--   [0,1,2]
--   
unfoldr :: Monad m => (s -> Maybe (a, s)) -> s -> Stream m a -- | Build a stream by unfolding a monadic step function starting -- from a seed. The step function returns the next element in the stream -- and the next seed value. When it is done it returns Nothing and -- the stream ends. For example, -- --
--   >>> :{
--   let f b =
--           if b > 2
--           then return Nothing
--           else return (Just (b, b + 1))
--   in Stream.toList $ Stream.unfoldrM f 0
--   :}
--   [0,1,2]
--   
unfoldrM :: Monad m => (s -> m (Maybe (a, s))) -> s -> Stream m a -- | Create a singleton stream from a pure value. -- --
--   >>> fromPure a = a `Stream.cons` Stream.nil
--   
--   >>> fromPure = pure
--   
--   >>> fromPure = Stream.fromEffect . pure
--   
fromPure :: Applicative m => a -> Stream m a -- | Create a singleton stream from a monadic action. -- --
--   >>> fromEffect m = m `Stream.consM` Stream.nil
--   
--   >>> fromEffect = Stream.sequence . Stream.fromPure
--   
-- --
--   >>> Stream.fold Fold.drain $ Stream.fromEffect (putStrLn "hello")
--   hello
--   
fromEffect :: Applicative m => m a -> Stream m a -- | Generate an infinite stream by repeating a pure value. -- --
--   >>> repeat x = Stream.repeatM (pure x)
--   
repeat :: Monad m => a -> Stream m a -- |
--   >>> repeatM = Stream.sequence . Stream.repeat
--   
-- -- Generate a stream by repeatedly executing a monadic action forever. -- --
--   >>> :{
--   repeatAction =
--          Stream.repeatM (threadDelay 1000000 >> print 1)
--        & Stream.take 10
--        & Stream.fold Fold.drain
--   :}
--   
repeatM :: Monad m => m a -> Stream m a -- |
--   >>> replicate n = Stream.take n . Stream.repeat
--   
--   >>> replicate n x = Stream.replicateM n (pure x)
--   
-- -- Generate a stream of length n by repeating a value n -- times. replicate :: Monad m => Int -> a -> Stream m a -- |
--   >>> replicateM n = Stream.sequence . Stream.replicate n
--   
-- -- Generate a stream by performing a monadic action n times. replicateM :: Monad m => Int -> m a -> Stream m a -- | For floating point numbers if the increment is less than the precision -- then it just gets lost. Therefore we cannot always increment it -- correctly by just repeated addition. 9007199254740992 + 1 + 1 :: -- Double => 9.007199254740992e15 9007199254740992 + 2 :: Double => -- 9.007199254740994e15 -- -- Instead we accumulate the increment counter and compute the increment -- every time before adding it to the starting number. -- -- This works for Integrals as well as floating point numbers, but -- enumerateFromStepIntegral is faster for integrals. enumerateFromStepNum :: (Monad m, Num a) => a -> a -> Stream m a enumerateFromNum :: (Monad m, Num a) => a -> Stream m a enumerateFromThenNum :: (Monad m, Num a) => a -> a -> Stream m a -- |
--   enumerate = enumerateFrom minBound
--   
-- -- Enumerate a Bounded type from its minBound to -- maxBound enumerate :: (Monad m, Bounded a, Enumerable a) => Stream m a -- |
--   >>> enumerateTo = Stream.enumerateFromTo minBound
--   
-- -- Enumerate a Bounded type from its minBound to specified -- value. enumerateTo :: (Monad m, Bounded a, Enumerable a) => a -> Stream m a -- |
--   >>> enumerateFromBounded from = Stream.enumerateFromTo from maxBound
--   
-- -- enumerateFrom for Bounded Enum types. enumerateFromBounded :: (Monad m, Enumerable a, Bounded a) => a -> Stream m a -- | enumerateFromTo for Enum types not larger than -- Int. enumerateFromToSmall :: (Monad m, Enum a) => a -> a -> Stream m a -- | enumerateFromThenTo for Enum types not larger than -- Int. enumerateFromThenToSmall :: (Monad m, Enum a) => a -> a -> a -> Stream m a -- | enumerateFromThen for Enum types not larger than -- Int. -- -- Note: We convert the Enum to Int and enumerate the -- Int. If a type is bounded but does not have a Bounded -- instance then we can go on enumerating it beyond the legal values of -- the type, resulting in the failure of toEnum when converting -- back to Enum. Therefore we require a Bounded instance -- for this function to be safely used. enumerateFromThenSmallBounded :: (Monad m, Enumerable a, Bounded a) => a -> a -> Stream m a -- | Enumerate an Integral type. enumerateFromIntegral from -- generates a stream whose first element is from and the -- successive elements are in increments of 1. The stream is -- bounded by the size of the Integral type. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromIntegral (0 :: Int)
--   [0,1,2,3]
--   
enumerateFromIntegral :: (Monad m, Integral a, Bounded a) => a -> Stream m a -- | Enumerate an Integral type in steps. -- enumerateFromThenIntegral from then generates a stream whose -- first element is from, the second element is then -- and the successive elements are in increments of then - from. -- The stream is bounded by the size of the Integral type. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromThenIntegral (0 :: Int) 2
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromThenIntegral (0 :: Int) (-2)
--   [0,-2,-4,-6]
--   
enumerateFromThenIntegral :: (Monad m, Integral a, Bounded a) => a -> a -> Stream m a -- | Enumerate an Integral type up to a given limit. -- enumerateFromToIntegral from to generates a finite stream -- whose first element is from and successive elements are in -- increments of 1 up to to. -- --
--   >>> Stream.toList $ Stream.enumerateFromToIntegral 0 4
--   [0,1,2,3,4]
--   
enumerateFromToIntegral :: (Monad m, Integral a) => a -> a -> Stream m a -- | Enumerate an Integral type in steps up to a given limit. -- enumerateFromThenToIntegral from then to generates a finite -- stream whose first element is from, the second element is -- then and the successive elements are in increments of -- then - from up to to. -- --
--   >>> Stream.toList $ Stream.enumerateFromThenToIntegral 0 2 6
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.enumerateFromThenToIntegral 0 (-2) (-6)
--   [0,-2,-4,-6]
--   
enumerateFromThenToIntegral :: (Monad m, Integral a) => a -> a -> a -> Stream m a -- | enumerateFromStepIntegral from step generates an infinite -- stream whose first element is from and the successive -- elements are in increments of step. -- -- CAUTION: This function is not safe for finite integral types. It does -- not check for overflow, underflow or bounds. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromStepIntegral 0 2
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.take 3 $ Stream.enumerateFromStepIntegral 0 (-2)
--   [0,-2,-4]
--   
enumerateFromStepIntegral :: (Integral a, Monad m) => a -> a -> Stream m a -- | Numerically stable enumeration from a Fractional number in -- steps of size 1. enumerateFromFractional from -- generates a stream whose first element is from and the -- successive elements are in increments of 1. No overflow or -- underflow checks are performed. -- -- This is the equivalent to enumFrom for Fractional types. -- For example: -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromFractional 1.1
--   [1.1,2.1,3.1,4.1]
--   
enumerateFromFractional :: (Monad m, Fractional a) => a -> Stream m a -- | Numerically stable enumeration from a Fractional number to a -- given limit. enumerateFromToFractional from to generates a -- finite stream whose first element is from and successive -- elements are in increments of 1 up to to. -- -- This is the equivalent of enumFromTo for Fractional -- types. For example: -- --
--   >>> Stream.toList $ Stream.enumerateFromToFractional 1.1 4
--   [1.1,2.1,3.1,4.1]
--   
-- --
--   >>> Stream.toList $ Stream.enumerateFromToFractional 1.1 4.6
--   [1.1,2.1,3.1,4.1,5.1]
--   
-- -- Notice that the last element is equal to the specified to -- value after rounding to the nearest integer. enumerateFromToFractional :: (Monad m, Fractional a, Ord a) => a -> a -> Stream m a -- | Numerically stable enumeration from a Fractional number in -- steps. enumerateFromThenFractional from then generates a -- stream whose first element is from, the second element is -- then and the successive elements are in increments of -- then - from. No overflow or underflow checks are performed. -- -- This is the equivalent of enumFromThen for Fractional -- types. For example: -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromThenFractional 1.1 2.1
--   [1.1,2.1,3.1,4.1]
--   
-- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromThenFractional 1.1 (-2.1)
--   [1.1,-2.1,-5.300000000000001,-8.500000000000002]
--   
enumerateFromThenFractional :: (Monad m, Fractional a) => a -> a -> Stream m a -- | Numerically stable enumeration from a Fractional number in -- steps up to a given limit. enumerateFromThenToFractional from then -- to generates a finite stream whose first element is -- from, the second element is then and the successive -- elements are in increments of then - from up to to. -- -- This is the equivalent of enumFromThenTo for Fractional -- types. For example: -- --
--   >>> Stream.toList $ Stream.enumerateFromThenToFractional 0.1 2 6
--   [0.1,2.0,3.9,5.799999999999999]
--   
-- --
--   >>> Stream.toList $ Stream.enumerateFromThenToFractional 0.1 (-2) (-6)
--   [0.1,-2.0,-4.1000000000000005,-6.200000000000001]
--   
enumerateFromThenToFractional :: (Monad m, Fractional a, Ord a) => a -> a -> a -> Stream m a -- | Types that can be enumerated as a stream. The operations in this type -- class are equivalent to those in the Enum type class, except -- that these generate a stream instead of a list. Use the functions in -- Streamly.Internal.Data.Stream.Enumeration module to define new -- instances. class Enum a => Enumerable a -- | enumerateFrom from generates a stream starting with the -- element from, enumerating up to maxBound when the type -- is Bounded or generating an infinite stream when the type is -- not Bounded. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFrom (0 :: Int)
--   [0,1,2,3]
--   
-- -- For Fractional types, enumeration is numerically stable. -- However, no overflow or underflow checks are performed. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFrom 1.1
--   [1.1,2.1,3.1,4.1]
--   
enumerateFrom :: (Enumerable a, Monad m) => a -> Stream m a -- | Generate a finite stream starting with the element from, -- enumerating the type up to the value to. If to is -- smaller than from then an empty stream is returned. -- --
--   >>> Stream.toList $ Stream.enumerateFromTo 0 4
--   [0,1,2,3,4]
--   
-- -- For Fractional types, the last element is equal to the -- specified to value after rounding to the nearest integral -- value. -- --
--   >>> Stream.toList $ Stream.enumerateFromTo 1.1 4
--   [1.1,2.1,3.1,4.1]
--   
-- --
--   >>> Stream.toList $ Stream.enumerateFromTo 1.1 4.6
--   [1.1,2.1,3.1,4.1,5.1]
--   
enumerateFromTo :: (Enumerable a, Monad m) => a -> a -> Stream m a -- | enumerateFromThen from then generates a stream whose first -- element is from, the second element is then and the -- successive elements are in increments of then - from. -- Enumeration can occur downwards or upwards depending on whether -- then comes before or after from. For Bounded -- types the stream ends when maxBound is reached, for unbounded -- types it keeps enumerating infinitely. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromThen 0 2
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromThen 0 (-2)
--   [0,-2,-4,-6]
--   
enumerateFromThen :: (Enumerable a, Monad m) => a -> a -> Stream m a -- | enumerateFromThenTo from then to generates a finite stream -- whose first element is from, the second element is -- then and the successive elements are in increments of -- then - from up to to. Enumeration can occur -- downwards or upwards depending on whether then comes before -- or after from. -- --
--   >>> Stream.toList $ Stream.enumerateFromThenTo 0 2 6
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.enumerateFromThenTo 0 (-2) (-6)
--   [0,-2,-4,-6]
--   
enumerateFromThenTo :: (Enumerable a, Monad m) => a -> a -> a -> Stream m a -- | times returns a stream of time value tuples with clock of 10 -- ms granularity. The first component of the tuple is an absolute time -- reference (epoch) denoting the start of the stream and the second -- component is a time relative to the reference. -- --
--   >>> f = Fold.drainMapM (\x -> print x >> threadDelay 1000000)
--   
--   >>> Stream.fold f $ Stream.take 3 $ Stream.times
--   (AbsTime (TimeSpec {sec = ..., nsec = ...}),RelTime64 (NanoSecond64 ...))
--   (AbsTime (TimeSpec {sec = ..., nsec = ...}),RelTime64 (NanoSecond64 ...))
--   (AbsTime (TimeSpec {sec = ..., nsec = ...}),RelTime64 (NanoSecond64 ...))
--   
-- -- Note: This API is not safe on 32-bit machines. -- -- Pre-release times :: MonadIO m => Stream m (AbsTime, RelTime64) -- | timesWith g returns a stream of time value tuples. The first -- component of the tuple is an absolute time reference (epoch) denoting -- the start of the stream and the second component is a time relative to -- the reference. -- -- The argument g specifies the granularity of the relative time -- in seconds. A lower granularity clock gives higher precision but is -- more expensive in terms of CPU usage. Any granularity lower than 1 ms -- is treated as 1 ms. -- --
--   >>> import Control.Concurrent (threadDelay)
--   
--   >>> f = Fold.drainMapM (\x -> print x >> threadDelay 1000000)
--   
--   >>> Stream.fold f $ Stream.take 3 $ Stream.timesWith 0.01
--   (AbsTime (TimeSpec {sec = ..., nsec = ...}),RelTime64 (NanoSecond64 ...))
--   (AbsTime (TimeSpec {sec = ..., nsec = ...}),RelTime64 (NanoSecond64 ...))
--   (AbsTime (TimeSpec {sec = ..., nsec = ...}),RelTime64 (NanoSecond64 ...))
--   
-- -- Note: This API is not safe on 32-bit machines. -- -- Pre-release timesWith :: MonadIO m => Double -> Stream m (AbsTime, RelTime64) -- | absTimes returns a stream of absolute timestamps using a -- clock of 10 ms granularity. -- --
--   >>> f = Fold.drainMapM print
--   
--   >>> Stream.fold f $ Stream.delayPre 1 $ Stream.take 3 $ Stream.absTimes
--   AbsTime (TimeSpec {sec = ..., nsec = ...})
--   AbsTime (TimeSpec {sec = ..., nsec = ...})
--   AbsTime (TimeSpec {sec = ..., nsec = ...})
--   
-- -- Note: This API is not safe on 32-bit machines. -- -- Pre-release absTimes :: MonadIO m => Stream m AbsTime -- | absTimesWith g returns a stream of absolute timestamps using -- a clock of granularity g specified in seconds. A low -- granularity clock is more expensive in terms of CPU usage. Any -- granularity lower than 1 ms is treated as 1 ms. -- --
--   >>> f = Fold.drainMapM print
--   
--   >>> Stream.fold f $ Stream.delayPre 1 $ Stream.take 3 $ Stream.absTimesWith 0.01
--   AbsTime (TimeSpec {sec = ..., nsec = ...})
--   AbsTime (TimeSpec {sec = ..., nsec = ...})
--   AbsTime (TimeSpec {sec = ..., nsec = ...})
--   
-- -- Note: This API is not safe on 32-bit machines. -- -- Pre-release absTimesWith :: MonadIO m => Double -> Stream m AbsTime -- | relTimes returns a stream of relative time values starting -- from 0, using a clock of granularity 10 ms. -- --
--   >>> f = Fold.drainMapM print
--   
--   >>> Stream.fold f $ Stream.delayPre 1 $ Stream.take 3 $ Stream.relTimes
--   RelTime64 (NanoSecond64 ...)
--   RelTime64 (NanoSecond64 ...)
--   RelTime64 (NanoSecond64 ...)
--   
-- -- Note: This API is not safe on 32-bit machines. -- -- Pre-release relTimes :: MonadIO m => Stream m RelTime64 -- | relTimesWith g returns a stream of relative time values -- starting from 0, using a clock of granularity g specified in -- seconds. A low granularity clock is more expensive in terms of CPU -- usage. Any granularity lower than 1 ms is treated as 1 ms. -- --
--   >>> f = Fold.drainMapM print
--   
--   >>> Stream.fold f $ Stream.delayPre 1 $ Stream.take 3 $ Stream.relTimesWith 0.01
--   RelTime64 (NanoSecond64 ...)
--   RelTime64 (NanoSecond64 ...)
--   RelTime64 (NanoSecond64 ...)
--   
-- -- Note: This API is not safe on 32-bit machines. -- -- Pre-release relTimesWith :: MonadIO m => Double -> Stream m RelTime64 -- | durations g returns a stream of relative time values -- measuring the time elapsed since the immediate predecessor element of -- the stream was generated. The first element of the stream is always 0. -- durations uses a clock of granularity g specified in -- seconds. A low granularity clock is more expensive in terms of CPU -- usage. The minimum granularity is 1 millisecond. Durations lower than -- 1 ms will be 0. -- -- Note: This API is not safe on 32-bit machines. -- -- Unimplemented durations :: Double -> t m RelTime64 -- | Generate a singleton event at or after the specified absolute time. -- Note that this is different from a threadDelay, a threadDelay starts -- from the time when the action is evaluated, whereas if we use AbsTime -- based timeout it will immediately expire if the action is evaluated -- too late. -- -- Unimplemented timeout :: AbsTime -> t m () fromIndices :: Monad m => (Int -> a) -> Stream m a fromIndicesM :: Monad m => (Int -> m a) -> Stream m a generate :: Monad m => Int -> (Int -> a) -> Stream m a generateM :: Monad m => Int -> (Int -> m a) -> Stream m a -- | Generate an infinite stream with x as the first element and -- each successive element derived by applying the function f on -- the previous element. -- --
--   >>> Stream.toList $ Stream.take 5 $ Stream.iterate (+1) 1
--   [1,2,3,4,5]
--   
iterate :: Monad m => (a -> a) -> a -> Stream m a -- | Generate an infinite stream with the first element generated by the -- action m and each successive element derived by applying the -- monadic function f on the previous element. -- --
--   >>> :{
--   Stream.iterateM (\x -> print x >> return (x + 1)) (return 0)
--       & Stream.take 3
--       & Stream.toList
--   :}
--   0
--   1
--   [0,1,2]
--   
iterateM :: Monad m => (a -> m a) -> m a -> Stream m a -- | Construct a stream from a list of pure values. fromList :: Applicative m => [a] -> Stream m a -- | Convert a list of monadic actions to a Stream fromListM :: Monad m => [m a] -> Stream m a -- |
--   >>> fromFoldable = Prelude.foldr Stream.cons Stream.nil
--   
-- -- Construct a stream from a Foldable containing pure values: -- -- /WARNING: O(n^2), suitable only for a small number of elements in the -- stream/ fromFoldable :: (Monad m, Foldable f) => f a -> Stream m a -- |
--   >>> fromFoldableM = Prelude.foldr Stream.consM Stream.nil
--   
-- -- Construct a stream from a Foldable containing pure values: -- -- /WARNING: O(n^2), suitable only for a small number of elements in the -- stream/ fromFoldableM :: (Monad m, Foldable f) => f (m a) -> Stream m a -- | Keep reading Storable elements from an immutable Ptr -- onwards. -- -- Unsafe: The caller is responsible for safe addressing. -- -- Pre-release fromPtr :: forall m a. (Monad m, Storable a) => Ptr a -> Stream m a -- | Take n Storable elements starting from an immutable -- Ptr onwards. -- --
--   >>> fromPtrN n = Stream.take n . Stream.fromPtr
--   
-- -- Unsafe: The caller is responsible for safe addressing. -- -- Pre-release fromPtrN :: (Monad m, Storable a) => Int -> Ptr a -> Stream m a -- | Read bytes from an immutable Addr# until a 0 byte is -- encountered, the 0 byte is not included in the stream. -- --
--   >>> :set -XMagicHash
--   
--   >>> fromByteStr# addr = Stream.takeWhile (/= 0) $ Stream.fromPtr $ Ptr addr
--   
-- -- Unsafe: The caller is responsible for safe addressing. -- -- Note that this is completely safe when reading from Haskell string -- literals because they are guaranteed to be NULL terminated: -- --
--   >>> Stream.toList $ Stream.fromByteStr# "\1\2\3\0"#
--   [1,2,3]
--   
fromByteStr# :: Monad m => Addr# -> Stream m Word8 -- | Convert a CPS encoded StreamK to direct style step encoded StreamD fromStreamK :: Applicative m => StreamK m a -> Stream m a -- | Convert a direct style step encoded StreamD to a CPS encoded StreamK toStreamK :: Monad m => Stream m a -> StreamK m a -- | Fold a stream using the supplied left Fold and reducing the -- resulting expression strictly at each step. The behavior is similar to -- foldl'. A Fold can terminate early without consuming the -- full stream. See the documentation of individual Folds for -- termination behavior. -- -- Definitions: -- --
--   >>> fold f = fmap fst . Stream.foldBreak f
--   
--   >>> fold f = Stream.parse (Parser.fromFold f)
--   
-- -- Example: -- --
--   >>> Stream.fold Fold.sum (Stream.enumerateFromTo 1 100)
--   5050
--   
fold :: Monad m => Fold m a b -> Stream m a -> m b -- | Parse a stream using the supplied Parser. -- -- Parsers (See Streamly.Internal.Data.Parser) are more powerful -- folds that add backtracking and error functionality to terminating -- folds. Unlike folds, parsers may not always result in a valid output, -- they may result in an error. For example: -- --
--   >>> Stream.parse (Parser.takeEQ 1 Fold.drain) Stream.nil
--   Left (ParseError "takeEQ: Expecting exactly 1 elements, input terminated on 0")
--   
-- -- Note: parse p is not the same as head . parseMany p -- on an empty stream. parse :: Monad m => Parser a m b -> Stream m a -> m (Either ParseError b) -- | Run a Parse over a stream. parseD :: Monad m => Parser a m b -> Stream m a -> m (Either ParseError b) -- | Parse a stream using the supplied Parser. parseBreak :: Monad m => Parser a m b -> Stream m a -> m (Either ParseError b, Stream m a) -- | Run a Parse over a stream and return rest of the Stream. parseBreakD :: Monad m => Parser a m b -> Stream m a -> m (Either ParseError b, Stream m a) -- | Decompose a stream into its head and tail. If the stream is empty, -- returns Nothing. If the stream is non-empty, returns Just -- (a, ma), where a is the head of the stream and -- ma its tail. -- -- Properties: -- --
--   >>> Nothing <- Stream.uncons Stream.nil
--   
--   >>> Just ("a", t) <- Stream.uncons (Stream.cons "a" Stream.nil)
--   
-- -- This can be used to consume the stream in an imperative manner one -- element at a time, as it just breaks down the stream into individual -- elements and we can loop over them as we deem fit. For example, this -- can be used to convert a streamly stream into other stream types. -- -- All the folds in this module can be expressed in terms of -- uncons, however, this is generally less efficient than specific -- folds because it takes apart the stream one element at a time, -- therefore, does not take adavantage of stream fusion. -- -- foldBreak is a more general way of consuming a stream -- piecemeal. -- --
--   >>> :{
--   uncons xs = do
--       r <- Stream.foldBreak Fold.one xs
--       return $ case r of
--           (Nothing, _) -> Nothing
--           (Just h, t) -> Just (h, t)
--   :}
--   
uncons :: Monad m => Stream m a -> m (Maybe (a, Stream m a)) -- | Right associative/lazy pull fold. foldrM build final stream -- constructs an output structure using the step function build. -- build is invoked with the next input element and the -- remaining (lazy) tail of the output structure. It builds a lazy output -- expression using the two. When the "tail structure" in the output -- expression is evaluated it calls build again thus lazily -- consuming the input stream until either the output expression -- built by build is free of the "tail" or the input is -- exhausted in which case final is used as the terminating case -- for the output structure. For more details see the description in the -- previous section. -- -- Example, determine if any element is odd in a stream: -- --
--   >>> s = Stream.fromList (2:4:5:undefined)
--   
--   >>> step x xs = if odd x then return True else xs
--   
--   >>> Stream.foldrM step (return False) s
--   True
--   
foldrM :: Monad m => (a -> m b -> m b) -> m b -> Stream m a -> m b -- | Right fold, lazy for lazy monads and pure streams, and strict for -- strict monads. -- -- Please avoid using this routine in strict monads like IO unless you -- need a strict right fold. This is provided only for use in lazy monads -- (e.g. Identity) or pure streams. Note that with this signature it is -- not possible to implement a lazy foldr when the monad m is -- strict. In that case it would be strict in its accumulator and -- therefore would necessarily consume all its input. -- --
--   >>> foldr f z = Stream.foldrM (\a b -> f a <$> b) (return z)
--   
-- -- Note: This is similar to Fold.foldr' (the right fold via left fold), -- but could be more efficient. foldr :: Monad m => (a -> b -> b) -> b -> Stream m a -> m b foldrMx :: Monad m => (a -> m x -> m x) -> m x -> (m x -> m b) -> Stream m a -> m b foldr1 :: Monad m => (a -> a -> a) -> Stream m a -> m (Maybe a) foldlM' :: Monad m => (b -> a -> m b) -> m b -> Stream m a -> m b foldl' :: Monad m => (b -> a -> b) -> b -> Stream m a -> m b foldlMx' :: Monad m => (x -> a -> m x) -> m x -> (x -> m b) -> Stream m a -> m b foldlx' :: Monad m => (x -> a -> x) -> x -> (x -> b) -> Stream m a -> m b -- | Definitions: -- --
--   >>> drain = Stream.fold Fold.drain
--   
--   >>> drain = Stream.foldrM (\_ xs -> xs) (return ())
--   
-- -- Run a stream, discarding the results. drain :: Monad m => Stream m a -> m () -- | Execute a monadic action for each element of the Stream mapM_ :: Monad m => (a -> m b) -> Stream m a -> m () null :: Monad m => Stream m a -> m Bool head :: Monad m => Stream m a -> m (Maybe a) headElse :: Monad m => a -> Stream m a -> m a tail :: Monad m => Stream m a -> m (Maybe (Stream m a)) last :: Monad m => Stream m a -> m (Maybe a) elem :: (Monad m, Eq a) => a -> Stream m a -> m Bool notElem :: (Monad m, Eq a) => a -> Stream m a -> m Bool all :: Monad m => (a -> Bool) -> Stream m a -> m Bool any :: Monad m => (a -> Bool) -> Stream m a -> m Bool maximum :: (Monad m, Ord a) => Stream m a -> m (Maybe a) maximumBy :: Monad m => (a -> a -> Ordering) -> Stream m a -> m (Maybe a) minimum :: (Monad m, Ord a) => Stream m a -> m (Maybe a) minimumBy :: Monad m => (a -> a -> Ordering) -> Stream m a -> m (Maybe a) lookup :: (Monad m, Eq a) => a -> Stream m (a, b) -> m (Maybe b) findM :: Monad m => (a -> m Bool) -> Stream m a -> m (Maybe a) find :: Monad m => (a -> Bool) -> Stream m a -> m (Maybe a) (!!) :: Monad m => Stream m a -> Int -> m (Maybe a) the :: (Eq a, Monad m) => Stream m a -> m (Maybe a) -- | Definitions: -- --
--   >>> toList = Stream.foldr (:) []
--   
--   >>> toList = Stream.fold Fold.toList
--   
-- -- Convert a stream into a list in the underlying monad. The list can be -- consumed lazily in a lazy monad (e.g. Identity). In a strict -- monad (e.g. IO) the whole list is generated and buffered before it can -- be consumed. -- -- Warning! working on large lists accumulated as buffers in -- memory could be very inefficient, consider using -- Streamly.Data.Array instead. -- -- Note that this could a bit more efficient compared to Stream.fold -- Fold.toList, and it can fuse with pure list consumers. toList :: Monad m => Stream m a -> m [a] toListRev :: Monad m => Stream m a -> m [a] -- | Compare two streams for equality eqBy :: Monad m => (a -> b -> Bool) -> Stream m a -> Stream m b -> m Bool -- | Compare two streams lexicographically. cmpBy :: Monad m => (a -> b -> Ordering) -> Stream m a -> Stream m b -> m Ordering -- | Returns True if the first stream is the same as or a prefix of -- the second. A stream is a prefix of itself. -- --
--   >>> Stream.isPrefixOf (Stream.fromList "hello") (Stream.fromList "hello" :: Stream IO Char)
--   True
--   
isPrefixOf :: (Monad m, Eq a) => Stream m a -> Stream m a -> m Bool -- | Returns True if the first stream is an infix of the second. A -- stream is considered an infix of itself. -- --
--   >>> s = Stream.fromList "hello" :: Stream IO Char
--   
--   >>> Stream.isInfixOf s s
--   True
--   
-- -- Space: O(n) worst case where n is the length of the -- infix. -- -- Pre-release -- -- Requires Storable constraint isInfixOf :: (MonadIO m, Eq a, Enum a, Storable a, Unbox a) => Stream m a -> Stream m a -> m Bool -- | Returns True if the first stream is a suffix of the second. A -- stream is considered a suffix of itself. -- --
--   >>> Stream.isSuffixOf (Stream.fromList "hello") (Stream.fromList "hello" :: Stream IO Char)
--   True
--   
-- -- Space: O(n), buffers entire input stream and the suffix. -- -- Pre-release -- -- Suboptimal - Help wanted. isSuffixOf :: (Monad m, Eq a) => Stream m a -> Stream m a -> m Bool -- | Much faster than isSuffixOf. isSuffixOfUnbox :: (MonadIO m, Eq a, Unbox a) => Stream m a -> Stream m a -> m Bool -- | Returns True if all the elements of the first stream occur, in -- order, in the second stream. The elements do not have to occur -- consecutively. A stream is a subsequence of itself. -- --
--   >>> Stream.isSubsequenceOf (Stream.fromList "hlo") (Stream.fromList "hello" :: Stream IO Char)
--   True
--   
isSubsequenceOf :: (Monad m, Eq a) => Stream m a -> Stream m a -> m Bool -- | stripPrefix prefix input strips the prefix stream -- from the input stream if it is a prefix of input. Returns -- Nothing if the input does not start with the given prefix, -- stripped input otherwise. Returns Just nil when the prefix is -- the same as the input stream. -- -- Space: O(1) stripPrefix :: (Monad m, Eq a) => Stream m a -> Stream m a -> m (Maybe (Stream m a)) -- | Drops the given suffix from a stream. Returns Nothing if the -- stream does not end with the given suffix. Returns Just nil -- when the suffix is the same as the stream. -- -- It may be more efficient to convert the stream to an Array and use -- stripSuffix on that especially if the elements have a Storable or Prim -- instance. -- -- Space: O(n), buffers the entire input stream as well as the -- suffix -- -- Pre-release stripSuffix :: (Monad m, Eq a) => Stream m a -> Stream m a -> m (Maybe (Stream m a)) -- | Much faster than stripSuffix. stripSuffixUnbox :: (MonadIO m, Eq a, Unbox a) => Stream m a -> Stream m a -> m (Maybe (Stream m a)) -- | Like gbracket but with following differences: -- -- -- -- Inhibits stream fusion -- -- Pre-release gbracket_ :: Monad m => m c -> (c -> m d) -> (c -> e -> Stream m b -> m (Stream m b)) -> (forall s. m s -> m (Either e s)) -> (c -> Stream m b) -> Stream m b -- | Run the alloc action m c with async exceptions disabled but -- keeping blocking operations interruptible (see mask). Use the -- output c as input to c -> Stream m b to generate -- an output stream. When generating the stream use the supplied -- try operation forall s. m s -> m (Either e s) to -- catch synchronous exceptions. If an exception occurs run the exception -- handler c -> e -> Stream m b -> m (Stream m b). Note -- that gbracket does not rethrow the exception, it has to be done -- by the exception handler if desired. -- -- The cleanup action c -> m d, runs whenever the stream ends -- normally, due to a sync or async exception or if it gets garbage -- collected after a partial lazy evaluation. See bracket for -- the semantics of the cleanup action. -- -- gbracket can express all other exception handling combinators. -- -- Inhibits stream fusion -- -- Pre-release gbracket :: MonadIO m => IO c -> (c -> IO d1) -> (c -> e -> Stream m b -> IO (Stream m b)) -> (c -> IO d2) -> (forall s. m s -> m (Either e s)) -> (c -> Stream m b) -> Stream m b -- | Run the action m b before the stream yields its first -- element. -- -- Same as the following but more efficient due to fusion: -- --
--   >>> before action xs = Stream.nilM action <> xs
--   
--   >>> before action xs = Stream.concatMap (const xs) (Stream.fromEffect action)
--   
before :: Monad m => m b -> Stream m a -> Stream m a -- | Like after, with following differences: -- -- -- -- Same as the following, but with stream fusion: -- --
--   >>> afterUnsafe action xs = xs <> Stream.nilM action
--   
-- -- Pre-release afterUnsafe :: Monad m => m b -> Stream m a -> Stream m a -- | Run the action IO b whenever the stream is evaluated to -- completion, or if it is garbage collected after a partial lazy -- evaluation. -- -- The semantics of the action IO b are similar to the semantics -- of cleanup action in bracketIO. -- -- See also afterUnsafe afterIO :: MonadIO m => IO b -> Stream m a -> Stream m a -- | Like bracket but with following differences: -- -- -- -- Inhibits stream fusion -- -- Pre-release bracketUnsafe :: MonadCatch m => m b -> (b -> m c) -> (b -> Stream m a) -> Stream m a -- | Like bracketIO but can use 3 separate cleanup actions depending -- on the mode of termination: -- --
    --
  1. When the stream stops normally
  2. --
  3. When the stream is garbage collected
  4. --
  5. When the stream encounters an exception
  6. --
-- -- bracketIO3 before onStop onGC onException action runs -- action using the result of before. If the stream -- stops, onStop action is executed, if the stream is abandoned -- onGC is executed, if the stream encounters an exception -- onException is executed. -- -- The exception is not caught, it is rethrown. -- -- Inhibits stream fusion -- -- Pre-release bracketIO3 :: (MonadIO m, MonadCatch m) => IO b -> (b -> IO c) -> (b -> IO d) -> (b -> IO e) -> (b -> Stream m a) -> Stream m a -- | Run the alloc action IO b with async exceptions disabled but -- keeping blocking operations interruptible (see mask). Use the -- output b of the IO action as input to the function b -- -> Stream m a to generate an output stream. -- -- b is usually a resource under the IO monad, e.g. a file -- handle, that requires a cleanup after use. The cleanup action b -- -> IO c, runs whenever (1) the stream ends normally, (2) due -- to a sync or async exception or, (3) if it gets garbage collected -- after a partial lazy evaluation. The exception is not caught, it is -- rethrown. -- -- bracketIO only guarantees that the cleanup action runs, and it -- runs with async exceptions enabled. The action must ensure that it can -- successfully cleanup the resource in the face of sync or async -- exceptions. -- -- When the stream ends normally or on a sync exception, cleanup action -- runs immediately in the current thread context, whereas in other cases -- it runs in the GC context, therefore, cleanup may be delayed until the -- GC gets to run. An example where GC based cleanup happens is when a -- stream is being folded but the fold terminates without draining the -- entire stream or if the consumer of the stream encounters an -- exception. -- -- Observes exceptions only in the stream generation, and not in stream -- consumers. -- -- See also: bracketUnsafe -- -- Inhibits stream fusion bracketIO :: (MonadIO m, MonadCatch m) => IO b -> (b -> IO c) -> (b -> Stream m a) -> Stream m a -- | Run the action m b if the stream evaluation is aborted due to -- an exception. The exception is not caught, simply rethrown. -- -- Observes exceptions only in the stream generation, and not in stream -- consumers. -- -- Inhibits stream fusion onException :: MonadCatch m => m b -> Stream m a -> Stream m a -- | Like finally with following differences: -- -- -- -- Inhibits stream fusion -- -- Pre-release finallyUnsafe :: MonadCatch m => m b -> Stream m a -> Stream m a -- | Run the action IO b whenever the stream stream stops -- normally, aborts due to an exception or if it is garbage collected -- after a partial lazy evaluation. -- -- The semantics of running the action IO b are similar to the -- cleanup action semantics described in bracketIO. -- --
--   >>> finallyIO release = Stream.bracketIO (return ()) (const release)
--   
-- -- See also finallyUnsafe -- -- Inhibits stream fusion finallyIO :: (MonadIO m, MonadCatch m) => IO b -> Stream m a -> Stream m a -- | Like handle but the exception handler is also provided with the -- stream that generated the exception as input. The exception handler -- can thus re-evaluate the stream to retry the action that failed. The -- exception handler can again call ghandle on it to retry the -- action multiple times. -- -- This is highly experimental. In a stream of actions we can map the -- stream with a retry combinator to retry each action on failure. -- -- Inhibits stream fusion -- -- Pre-release ghandle :: (MonadCatch m, Exception e) => (e -> Stream m a -> m (Stream m a)) -> Stream m a -> Stream m a -- | When evaluating a stream if an exception occurs, stream evaluation -- aborts and the specified exception handler is run with the exception -- as argument. The exception is caught and handled unless the handler -- decides to rethrow it. Note that exception handling is not applied to -- the stream returned by the exception handler. -- -- Observes exceptions only in the stream generation, and not in stream -- consumers. -- -- Inhibits stream fusion handle :: (MonadCatch m, Exception e) => (e -> m (Stream m a)) -> Stream m a -> Stream m a -- | Transform the inner monad of a stream using a natural transformation. -- -- Example, generalize the inner monad from Identity to any other: -- --
--   >>> generalizeInner = Stream.morphInner (return . runIdentity)
--   
-- -- Also known as hoist. morphInner :: Monad n => (forall x. m x -> n x) -> Stream m a -> Stream n a -- | Generalize the inner monad of the stream from Identity to any -- monad. -- -- Definition: -- --
--   >>> generalizeInner = Stream.morphInner (return . runIdentity)
--   
generalizeInner :: Monad m => Stream Identity a -> Stream m a -- | Lift the inner monad m of a stream Stream m a to -- t m using the supplied lift function. liftInnerWith :: Monad (t m) => (forall b. m b -> t m b) -> Stream m a -> Stream (t m) a -- | Evaluate the inner monad of a stream using the supplied runner -- function. runInnerWith :: Monad m => (forall b. t m b -> m b) -> Stream (t m) a -> Stream m a -- | Evaluate the inner monad of a stream using the supplied stateful -- runner function and the initial state. The state returned by an -- invocation of the runner is supplied as input state to the next -- invocation. runInnerWithState :: Monad m => (forall b. s -> t m b -> m (b, s)) -> m s -> Stream (t m) a -> Stream m (s, a) -- | Lazy left fold to a transformer monad. foldlT :: (Monad m, Monad (s m), MonadTrans s) => (s m b -> a -> s m b) -> s m b -> Stream m a -> s m b -- | Right fold to a transformer monad. This is the most general right fold -- function. foldrS is a special case of foldrT, however -- foldrS implementation can be more efficient: -- --
--   >>> foldrS = Stream.foldrT
--   
-- --
--   >>> step f x xs = lift $ f x (runIdentityT xs)
--   
--   >>> foldrM f z s = runIdentityT $ Stream.foldrT (step f) (lift z) s
--   
-- -- foldrT can be used to translate streamly streams to other -- transformer monads e.g. to a different streaming type. -- -- Pre-release foldrT :: (Monad m, Monad (t m), MonadTrans t) => (a -> t m b -> t m b) -> t m b -> Stream m a -> t m b -- | Lift the inner monad m of Stream m a to t m -- where t is a monad transformer. liftInner :: (Monad m, MonadTrans t, Monad (t m)) => Stream m a -> Stream (t m) a -- | Evaluate the inner monad of a stream as ReaderT. runReaderT :: Monad m => m s -> Stream (ReaderT s m) a -> Stream m a -- | Run a stream transformation using a given environment. usingReaderT :: Monad m => m r -> (Stream (ReaderT r m) a -> Stream (ReaderT r m) a) -> Stream m a -> Stream m a -- | Evaluate the inner monad of a stream as StateT. -- --
--   >>> evalStateT s = fmap snd . Stream.runStateT s
--   
evalStateT :: Monad m => m s -> Stream (StateT s m) a -> Stream m a -- | Evaluate the inner monad of a stream as StateT and emit the -- resulting state and value pair after each step. runStateT :: Monad m => m s -> Stream (StateT s m) a -> Stream m (s, a) -- | Run a stateful (StateT) stream transformation using a given state. -- --
--   >>> usingStateT s f = Stream.evalStateT s . f . Stream.liftInner
--   
-- -- See also: scan usingStateT :: Monad m => m s -> (Stream (StateT s m) a -> Stream (StateT s m) a) -> Stream m a -> Stream m a data AppendState s1 s2 AppendFirst :: s1 -> AppendState s1 s2 AppendSecond :: s2 -> AppendState s1 s2 -- | WARNING! O(n^2) time complexity wrt number of streams. Suitable for -- statically fusing a small number of streams. Use the O(n) complexity -- StreamK.append otherwise. -- -- Fuses two streams sequentially, yielding all elements from the first -- stream, and then all elements from the second stream. -- --
--   >>> s1 = Stream.fromList [1,2]
--   
--   >>> s2 = Stream.fromList [3,4]
--   
--   >>> Stream.fold Fold.toList $ s1 `Stream.append` s2
--   [1,2,3,4]
--   
append :: Monad m => Stream m a -> Stream m a -> Stream m a data InterleaveState s1 s2 InterleaveFirst :: s1 -> s2 -> InterleaveState s1 s2 InterleaveSecond :: s1 -> s2 -> InterleaveState s1 s2 InterleaveSecondOnly :: s2 -> InterleaveState s1 s2 InterleaveFirstOnly :: s1 -> InterleaveState s1 s2 -- | WARNING! O(n^2) time complexity wrt number of streams. Suitable for -- statically fusing a small number of streams. Use the O(n) complexity -- StreamK.interleave otherwise. -- -- Interleaves two streams, yielding one element from each stream -- alternately. When one stream stops the rest of the other stream is -- used in the output stream. interleave :: Monad m => Stream m a -> Stream m a -> Stream m a -- | Like interleave but stops interleaving as soon as any of the -- two streams stops. interleaveMin :: Monad m => Stream m a -> Stream m a -> Stream m a -- | Interleaves the outputs of two streams, yielding elements from each -- stream alternately, starting from the first stream and ending at the -- first stream. If the second stream is longer than the first, elements -- from the second stream are infixed with elements from the first -- stream. If the first stream is longer then it continues yielding -- elements even after the second stream has finished. -- --
--   >>> :set -XOverloadedStrings
--   
--   >>> import Data.Functor.Identity (Identity)
--   
--   >>> Stream.interleaveFst "abc" ",,,," :: Stream Identity Char
--   fromList "a,b,c"
--   
--   >>> Stream.interleaveFst "abc" "," :: Stream Identity Char
--   fromList "a,bc"
--   
-- -- interleaveFst is a dual of interleaveFstSuffix. -- -- Do not use dynamically. -- -- Pre-release interleaveFst :: Monad m => Stream m a -> Stream m a -> Stream m a -- | Interleaves the outputs of two streams, yielding elements from each -- stream alternately, starting from the first stream. As soon as the -- first stream finishes, the output stops, discarding the remaining part -- of the second stream. In this case, the last element in the resulting -- stream would be from the second stream. If the second stream finishes -- early then the first stream still continues to yield elements until it -- finishes. -- --
--   >>> :set -XOverloadedStrings
--   
--   >>> import Data.Functor.Identity (Identity)
--   
--   >>> Stream.interleaveFstSuffix "abc" ",,,," :: Stream Identity Char
--   fromList "a,b,c,"
--   
--   >>> Stream.interleaveFstSuffix "abc" "," :: Stream Identity Char
--   fromList "a,bc"
--   
-- -- interleaveFstSuffix is a dual of interleaveFst. -- -- Do not use dynamically. -- -- Pre-release interleaveFstSuffix :: Monad m => Stream m a -> Stream m a -> Stream m a -- | Schedule the execution of two streams in a fair round-robin manner, -- executing each stream once, alternately. Execution of a stream may not -- necessarily result in an output, a stream may choose to Skip -- producing an element until later giving the other stream a chance to -- run. Therefore, this combinator fairly interleaves the execution of -- two streams rather than fairly interleaving the output of the two -- streams. This can be useful in co-operative multitasking without using -- explicit threads. This can be used as an alternative to -- async. -- -- Do not use dynamically. -- -- Pre-release roundRobin :: Monad m => Stream m a -> Stream m a -> Stream m a -- | WARNING! O(n^2) time complexity wrt number of streams. Suitable for -- statically fusing a small number of streams. Use the O(n) complexity -- StreamK.zipWith otherwise. -- -- Stream a is evaluated first, followed by stream b, -- the resulting elements a and b are then zipped using -- the supplied zip function and the result c is yielded to the -- consumer. -- -- If stream a or stream b ends, the zipped stream -- ends. If stream b ends first, the element a from -- previous evaluation of stream a is discarded. -- --
--   >>> s1 = Stream.fromList [1,2,3]
--   
--   >>> s2 = Stream.fromList [4,5,6]
--   
--   >>> Stream.fold Fold.toList $ Stream.zipWith (+) s1 s2
--   [5,7,9]
--   
zipWith :: Monad m => (a -> b -> c) -> Stream m a -> Stream m b -> Stream m c -- | Like zipWith but using a monadic zipping function. zipWithM :: Monad m => (a -> b -> m c) -> Stream m a -> Stream m b -> Stream m c -- | WARNING! O(n^2) time complexity wrt number of streams. Suitable for -- statically fusing a small number of streams. Use the O(n) complexity -- StreamK.mergeBy otherwise. -- -- Merge two streams using a comparison function. The head elements of -- both the streams are compared and the smaller of the two elements is -- emitted, if both elements are equal then the element from the first -- stream is used first. -- -- If the streams are sorted in ascending order, the resulting stream -- would also remain sorted in ascending order. -- --
--   >>> s1 = Stream.fromList [1,3,5]
--   
--   >>> s2 = Stream.fromList [2,4,6,8]
--   
--   >>> Stream.fold Fold.toList $ Stream.mergeBy compare s1 s2
--   [1,2,3,4,5,6,8]
--   
mergeBy :: Monad m => (a -> a -> Ordering) -> Stream m a -> Stream m a -> Stream m a -- | Like mergeBy but with a monadic comparison function. -- -- Example, to merge two streams randomly: -- --
--   > randomly _ _ = randomIO >>= x -> return $ if x then LT else GT
--   > Stream.toList $ Stream.mergeByM randomly (Stream.fromList [1,1,1,1]) (Stream.fromList [2,2,2,2])
--   [2,1,2,2,2,1,1,1]
--   
-- -- Example, merge two streams in a proportion of 2:1: -- --
--   >>> :{
--   do
--    let s1 = Stream.fromList [1,1,1,1,1,1]
--        s2 = Stream.fromList [2,2,2]
--    let proportionately m n = do
--         ref <- newIORef $ cycle $ Prelude.concat [Prelude.replicate m LT, Prelude.replicate n GT]
--         return $ \_ _ -> do
--            r <- readIORef ref
--            writeIORef ref $ Prelude.tail r
--            return $ Prelude.head r
--    f <- proportionately 2 1
--    xs <- Stream.fold Fold.toList $ Stream.mergeByM f s1 s2
--    print xs
--   :}
--   [1,1,2,1,1,2,1,1,2]
--   
mergeByM :: Monad m => (a -> a -> m Ordering) -> Stream m a -> Stream m a -> Stream m a -- | Like mergeByM but stops merging as soon as any of the two -- streams stops. -- -- Unimplemented mergeMinBy :: (a -> a -> m Ordering) -> Stream m a -> Stream m a -> Stream m a -- | Like mergeByM but stops merging as soon as the first stream -- stops. -- -- Unimplemented mergeFstBy :: (a -> a -> m Ordering) -> Stream m a -> Stream m a -> Stream m a -- | Map a stream producing function on each element of the stream and then -- flatten the results into a single stream. -- --
--   >>> concatMap f = Stream.concatMapM (return . f)
--   
--   >>> concatMap f = Stream.concat . fmap f
--   
--   >>> concatMap f = Stream.unfoldMany (Unfold.lmap f Unfold.fromStream)
--   
-- -- See unfoldMany for a fusible alternative. concatMap :: Monad m => (a -> Stream m b) -> Stream m a -> Stream m b -- | Map a stream producing monadic function on each element of the stream -- and then flatten the results into a single stream. Since the stream -- generation function is monadic, unlike concatMap, it can -- produce an effect at the beginning of each iteration of the inner -- loop. -- -- See unfoldMany for a fusible alternative. concatMapM :: Monad m => (a -> m (Stream m b)) -> Stream m a -> Stream m b -- | unfoldMany unfold stream uses unfold to map the -- input stream elements to streams and then flattens the generated -- streams into a single output stream. -- -- Like concatMap but uses an Unfold for stream generation. -- Unlike concatMap this can fuse the Unfold code with the -- inner loop and therefore provide many times better performance. unfoldMany :: Monad m => Unfold m a b -> Stream m a -> Stream m b data ConcatUnfoldInterleaveState o i ConcatUnfoldInterleaveOuter :: o -> [i] -> ConcatUnfoldInterleaveState o i ConcatUnfoldInterleaveInner :: o -> [i] -> ConcatUnfoldInterleaveState o i ConcatUnfoldInterleaveInnerL :: [i] -> [i] -> ConcatUnfoldInterleaveState o i ConcatUnfoldInterleaveInnerR :: [i] -> [i] -> ConcatUnfoldInterleaveState o i -- | This does not pair streams like mergeMapWith, instead, it goes through -- each stream one by one and yields one element from each stream. After -- it goes to the last stream it reverses the traversal to come back to -- the first stream yielding elements from each stream on its way back to -- the first stream and so on. -- --
--   >>> lists = Stream.fromList [[1,1],[2,2],[3,3],[4,4],[5,5]]
--   
--   >>> interleaved = Stream.unfoldInterleave Unfold.fromList lists
--   
--   >>> Stream.fold Fold.toList interleaved
--   [1,2,3,4,5,5,4,3,2,1]
--   
-- -- Note that this is order of magnitude more efficient than "mergeMapWith -- interleave" because of fusion. unfoldInterleave :: Monad m => Unfold m a b -> Stream m a -> Stream m b -- | unfoldInterleave switches to the next stream whenever a value -- from a stream is yielded, it does not switch on a Skip. So if a -- stream keeps skipping for long time other streams won't get a chance -- to run. unfoldRoundRobin switches on Skip as well. So it -- basically schedules each stream fairly irrespective of whether it -- produces a value or not. unfoldRoundRobin :: Monad m => Unfold m a b -> Stream m a -> Stream m b -- | Unfold the elements of a stream, intersperse the given element between -- the unfolded streams and then concat them into a single stream. -- --
--   >>> unwords = Stream.interpose ' '
--   
-- -- Pre-release interpose :: Monad m => c -> Unfold m b c -> Stream m b -> Stream m c interposeM :: Monad m => m c -> Unfold m b c -> Stream m b -> Stream m c -- | Unfold the elements of a stream, append the given element after each -- unfolded stream and then concat them into a single stream. -- --
--   >>> unlines = Stream.interposeSuffix '\n'
--   
-- -- Pre-release interposeSuffix :: Monad m => c -> Unfold m b c -> Stream m b -> Stream m c interposeSuffixM :: Monad m => m c -> Unfold m b c -> Stream m b -> Stream m c -- | interleaveFst followed by unfold and concat. -- -- Pre-release gintercalate :: Monad m => Unfold m a c -> Stream m a -> Unfold m b c -> Stream m b -> Stream m c -- | interleaveFstSuffix followed by unfold and concat. -- -- Pre-release gintercalateSuffix :: Monad m => Unfold m a c -> Stream m a -> Unfold m b c -> Stream m b -> Stream m c -- | intersperse followed by unfold and concat. -- --
--   >>> intercalate u a = Stream.unfoldMany u . Stream.intersperse a
--   
--   >>> intersperse = Stream.intercalate Unfold.identity
--   
--   >>> unwords = Stream.intercalate Unfold.fromList " "
--   
-- --
--   >>> input = Stream.fromList ["abc", "def", "ghi"]
--   
--   >>> Stream.fold Fold.toList $ Stream.intercalate Unfold.fromList " " input
--   "abc def ghi"
--   
intercalate :: Monad m => Unfold m b c -> b -> Stream m b -> Stream m c -- | intersperseMSuffix followed by unfold and concat. -- --
--   >>> intercalateSuffix u a = Stream.unfoldMany u . Stream.intersperseMSuffix a
--   
--   >>> intersperseMSuffix = Stream.intercalateSuffix Unfold.identity
--   
--   >>> unlines = Stream.intercalateSuffix Unfold.fromList "\n"
--   
-- --
--   >>> input = Stream.fromList ["abc", "def", "ghi"]
--   
--   >>> Stream.fold Fold.toList $ Stream.intercalateSuffix Unfold.fromList "\n" input
--   "abc\ndef\nghi\n"
--   
intercalateSuffix :: Monad m => Unfold m b c -> b -> Stream m b -> Stream m c -- | Apply a Fold repeatedly on a stream and emit the results in the -- output stream. -- -- Definition: -- --
--   >>> foldMany f = Stream.parseMany (Parser.fromFold f)
--   
-- -- Example, empty stream: -- --
--   >>> f = Fold.take 2 Fold.sum
--   
--   >>> fmany = Stream.fold Fold.toList . Stream.foldMany f
--   
--   >>> fmany $ Stream.fromList []
--   []
--   
-- -- Example, last fold empty: -- --
--   >>> fmany $ Stream.fromList [1..4]
--   [3,7]
--   
-- -- Example, last fold non-empty: -- --
--   >>> fmany $ Stream.fromList [1..5]
--   [3,7,5]
--   
-- -- Note that using a closed fold e.g. Fold.take 0, would result -- in an infinite stream on a non-empty input stream. foldMany :: Monad m => Fold m a b -> Stream m a -> Stream m b -- | Like foldMany but for the Refold type. The supplied -- action is used as the initial value for each refold. -- -- Internal refoldMany :: Monad m => Refold m x a b -> m x -> Stream m a -> Stream m b -- | Apply a stream of folds to an input stream and emit the results in the -- output stream. -- -- Unimplemented foldSequence :: Stream m (Fold m a b) -> Stream m a -> Stream m b -- | Iterate a fold generator on a stream. The initial value b is -- used to generate the first fold, the fold is applied on the stream and -- the result of the fold is used to generate the next fold and so on. -- --
--   >>> import Data.Monoid (Sum(..))
--   
--   >>> f x = return (Fold.take 2 (Fold.sconcat x))
--   
--   >>> s = fmap Sum $ Stream.fromList [1..10]
--   
--   >>> Stream.fold Fold.toList $ fmap getSum $ Stream.foldIterateM f (pure 0) s
--   [3,10,21,36,55,55]
--   
-- -- This is the streaming equivalent of monad like sequenced application -- of folds where next fold is dependent on the previous fold. -- -- Pre-release foldIterateM :: Monad m => (b -> m (Fold m a b)) -> m b -> Stream m a -> Stream m b -- | Like foldIterateM but using the Refold type instead. -- This could be much more efficient due to stream fusion. -- -- Internal refoldIterateM :: Monad m => Refold m b a b -> m b -> Stream m a -> Stream m b -- | Apply a Parser repeatedly on a stream and emit the parsed -- values in the output stream. -- -- Example: -- --
--   >>> s = Stream.fromList [1..10]
--   
--   >>> parser = Parser.takeBetween 0 2 Fold.sum
--   
--   >>> Stream.fold Fold.toList $ Stream.parseMany parser s
--   [Right 3,Right 7,Right 11,Right 15,Right 19]
--   
-- -- This is the streaming equivalent of the many parse combinator. -- -- Known Issues: When the parser fails there is no way to get the -- remaining stream. parseMany :: Monad m => Parser a m b -> Stream m a -> Stream m (Either ParseError b) parseManyD :: Monad m => Parser a m b -> Stream m a -> Stream m (Either ParseError b) -- | Apply a stream of parsers to an input stream and emit the results in -- the output stream. -- -- Unimplemented parseSequence :: Stream m (Parser a m b) -> Stream m a -> Stream m b -- | parseManyTill collect test stream tries the parser -- test on the input, if test fails it backtracks and -- tries collect, after collect succeeds test -- is tried again and so on. The parser stops when test -- succeeds. The output of test is discarded and the output of -- collect is emitted in the output stream. The parser fails if -- collect fails. -- -- Unimplemented parseManyTill :: Parser a m b -> Parser a m x -> Stream m a -> Stream m b -- | Iterate a parser generating function on a stream. The initial value -- b is used to generate the first parser, the parser is applied -- on the stream and the result is used to generate the next parser and -- so on. -- --
--   >>> import Data.Monoid (Sum(..))
--   
--   >>> s = Stream.fromList [1..10]
--   
--   >>> Stream.fold Fold.toList $ fmap getSum $ Stream.catRights $ Stream.parseIterate (\b -> Parser.takeBetween 0 2 (Fold.sconcat b)) (Sum 0) $ fmap Sum s
--   [3,10,21,36,55,55]
--   
-- -- This is the streaming equivalent of monad like sequenced application -- of parsers where next parser is dependent on the previous parser. -- -- Pre-release parseIterate :: Monad m => (b -> Parser a m b) -> b -> Stream m a -> Stream m (Either ParseError b) parseIterateD :: Monad m => (b -> Parser a m b) -> b -> Stream m a -> Stream m (Either ParseError b) -- | Group the input stream into groups of n elements each and -- then fold each group using the provided fold function. -- --
--   groupsOf n f = foldMany (FL.take n f)
--   
-- --
--   >>> Stream.toList $ Stream.groupsOf 2 Fold.sum (Stream.enumerateFromTo 1 10)
--   [3,7,11,15,19]
--   
-- -- This can be considered as an n-fold version of take where we -- apply take repeatedly on the leftover stream until the stream -- exhausts. groupsOf :: Monad m => Int -> Fold m a b -> Stream m a -> Stream m b -- | Deprecated: Please use groupsWhile instead. Please note the change -- in the argument order of the comparison function. groupsBy :: Monad m => (a -> a -> Bool) -> Fold m a b -> Stream m a -> Stream m b -- | The argument order of the comparison function in groupsWhile is -- different than that of groupsBy. -- -- In groupsBy the comparison function takes the next element as -- the first argument and the previous element as the second argument. In -- groupsWhile the first argument is the previous element and -- second argument is the next element. groupsWhile :: Monad m => (a -> a -> Bool) -> Fold m a b -> Stream m a -> Stream m b groupsRollingBy :: Monad m => (a -> a -> Bool) -> Fold m a b -> Stream m a -> Stream m b -- | Split the stream after stripping leading, trailing, and repeated -- separators as per the fold supplied. Therefore, ".a..b." with -- . as the separator would be parsed as ["a","b"]. In -- other words, its like parsing words from whitespace separated text. wordsBy :: Monad m => (a -> Bool) -> Fold m a b -> Stream m a -> Stream m b splitOnSeq :: forall m a b. (MonadIO m, Storable a, Unbox a, Enum a, Eq a) => Array a -> Fold m a b -> Stream m a -> Stream m b splitOnSuffixSeq :: forall m a b. (MonadIO m, Storable a, Unbox a, Enum a, Eq a) => Bool -> Array a -> Fold m a b -> Stream m a -> Stream m b -- | Split post any one of the given patterns. -- -- Unimplemented splitOnSuffixSeqAny :: [Array a] -> Fold m a b -> Stream m a -> Stream m b -- | Split on a prefixed separator element, dropping the separator. The -- supplied Fold is applied on the split segments. -- --
--   > splitOnPrefix' p xs = Stream.toList $ Stream.splitOnPrefix p (Fold.toList) (Stream.fromList xs)
--   > splitOnPrefix' (== .) ".a.b"
--   ["a","b"]
--   
-- -- An empty stream results in an empty output stream: > -- splitOnPrefix' (== .) "" [] -- -- An empty segment consisting of only a prefix is folded to the default -- output of the fold: -- --
--   > splitOnPrefix' (== .) "."
--   [""]
--   
--   > splitOnPrefix' (== .) ".a.b."
--   ["a","b",""]
--   
--   > splitOnPrefix' (== .) ".a..b"
--   ["a","","b"]
--   
-- -- A prefix is optional at the beginning of the stream: -- --
--   > splitOnPrefix' (== .) "a"
--   ["a"]
--   
--   > splitOnPrefix' (== .) "a.b"
--   ["a","b"]
--   
-- -- splitOnPrefix is an inverse of intercalatePrefix with -- a single element: -- --
--   Stream.intercalatePrefix (Stream.fromPure '.') Unfold.fromList . Stream.splitOnPrefix (== '.') Fold.toList === id
--   
-- -- Assuming the input stream does not contain the separator: -- --
--   Stream.splitOnPrefix (== '.') Fold.toList . Stream.intercalatePrefix (Stream.fromPure '.') Unfold.fromList === id
--   
-- -- Unimplemented splitOnPrefix :: (a -> Bool) -> Fold m a b -> Stream m a -> Stream m b -- | Split on any one of the given patterns. -- -- Unimplemented splitOnAny :: [Array a] -> Fold m a b -> Stream m a -> Stream m b -- | Performs infix separator style splitting. splitInnerBy :: Monad m => (f a -> m (f a, Maybe (f a))) -> (f a -> f a -> m (f a)) -> Stream m (f a) -> Stream m (f a) -- | Performs infix separator style splitting. splitInnerBySuffix :: Monad m => (f a -> Bool) -> (f a -> m (f a, Maybe (f a))) -> (f a -> f a -> m (f a)) -> Stream m (f a) -> Stream m (f a) intersectBySorted :: Monad m => (a -> a -> Ordering) -> Stream m a -> Stream m a -> Stream m a -- | Drop prefix from the input stream if present. -- -- Space: O(1) -- -- Unimplemented dropPrefix :: Stream m a -> Stream m a -> Stream m a -- | Drop all matching infix from the input stream if present. Infix stream -- may be consumed multiple times. -- -- Space: O(n) where n is the length of the infix. -- -- Unimplemented dropInfix :: Stream m a -> Stream m a -> Stream m a -- | Drop suffix from the input stream if present. Suffix stream may be -- consumed multiple times. -- -- Space: O(n) where n is the length of the suffix. -- -- Unimplemented dropSuffix :: Stream m a -> Stream m a -> Stream m a -- | Use a Pipe to transform a stream. -- -- Pre-release transform :: Monad m => Pipe m a b -> Stream m a -> Stream m b map :: Monad m => (a -> b) -> Stream m a -> Stream m b -- |
--   >>> mapM f = Stream.sequence . fmap f
--   
-- -- Apply a monadic function to each element of the stream and replace it -- with the output of the resulting action. -- --
--   >>> s = Stream.fromList ["a", "b", "c"]
--   
--   >>> Stream.fold Fold.drain $ Stream.mapM putStr s
--   abc
--   
mapM :: Monad m => (a -> m b) -> Stream m a -> Stream m b -- |
--   >>> sequence = Stream.mapM id
--   
-- -- Replace the elements of a stream of monadic actions with the outputs -- of those actions. -- --
--   >>> s = Stream.fromList [putStr "a", putStr "b", putStrLn "c"]
--   
--   >>> Stream.fold Fold.drain $ Stream.sequence s
--   abc
--   
sequence :: Monad m => Stream m (m a) -> Stream m a -- | Tap the data flowing through a stream into a Fold. For example, -- you may add a tap to log the contents flowing through the stream. The -- fold is used only for effects, its result is discarded. -- --
--                     Fold m a b
--                         |
--   -----stream m a ---------------stream m a-----
--   
-- --
--   >>> s = Stream.enumerateFromTo 1 2
--   
--   >>> Stream.fold Fold.drain $ Stream.tap (Fold.drainMapM print) s
--   1
--   2
--   
-- -- Compare with trace. tap :: Monad m => Fold m a b -> Stream m a -> Stream m a tapOffsetEvery :: Monad m => Int -> Int -> Fold m a b -> Stream m a -> Stream m a -- | Apply a monadic function to each element flowing through the stream -- and discard the results. -- --
--   >>> s = Stream.enumerateFromTo 1 2
--   
--   >>> Stream.fold Fold.drain $ Stream.trace print s
--   1
--   2
--   
-- -- Compare with tap. trace :: Monad m => (a -> m b) -> Stream m a -> Stream m a -- | Perform a side effect before yielding each element of the stream and -- discard the results. -- --
--   >>> s = Stream.enumerateFromTo 1 2
--   
--   >>> Stream.fold Fold.drain $ Stream.trace_ (print "got here") s
--   "got here"
--   "got here"
--   
-- -- Same as intersperseMPrefix_ but always serial. -- -- See also: trace -- -- Pre-release trace_ :: Monad m => m b -> Stream m a -> Stream m a foldrS :: Monad m => (a -> Stream m b -> Stream m b) -> Stream m b -> Stream m a -> Stream m b foldlS :: Monad m => (Stream m b -> a -> Stream m b) -> Stream m b -> Stream m a -> Stream m b -- | Postscan a stream using the given monadic fold. -- -- The following example extracts the input stream up to a point where -- the running average of elements is no more than 10: -- --
--   >>> import Data.Maybe (fromJust)
--   
--   >>> let avg = Fold.teeWith (/) Fold.sum (fmap fromIntegral Fold.length)
--   
--   >>> s = Stream.enumerateFromTo 1.0 100.0
--   
--   >>> :{
--    Stream.fold Fold.toList
--     $ fmap (fromJust . fst)
--     $ Stream.takeWhile (\(_,x) -> x <= 10)
--     $ Stream.postscan (Fold.tee Fold.latest avg) s
--   :}
--   [1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0]
--   
postscan :: Monad m => Fold m a b -> Stream m a -> Stream m b -- | Strict left scan. Scan a stream using the given monadic fold. -- --
--   >>> s = Stream.fromList [1..10]
--   
--   >>> Stream.fold Fold.toList $ Stream.takeWhile (< 10) $ Stream.scan Fold.sum s
--   [0,1,3,6]
--   
-- -- See also: usingStateT scan :: Monad m => Fold m a b -> Stream m a -> Stream m b -- | Like scan but restarts scanning afresh when the scanning fold -- terminates. scanMany :: Monad m => Fold m a b -> Stream m a -> Stream m b -- | Split on an infixed separator element, dropping the separator. The -- supplied Fold is applied on the split segments. Splits the -- stream on separator elements determined by the supplied predicate, -- separator is considered as infixed between two segments: -- --
--   >>> splitOn' p xs = Stream.fold Fold.toList $ Stream.splitOn p Fold.toList (Stream.fromList xs)
--   
--   >>> splitOn' (== '.') "a.b"
--   ["a","b"]
--   
-- -- An empty stream is folded to the default value of the fold: -- --
--   >>> splitOn' (== '.') ""
--   [""]
--   
-- -- If one or both sides of the separator are missing then the empty -- segment on that side is folded to the default output of the fold: -- --
--   >>> splitOn' (== '.') "."
--   ["",""]
--   
-- --
--   >>> splitOn' (== '.') ".a"
--   ["","a"]
--   
-- --
--   >>> splitOn' (== '.') "a."
--   ["a",""]
--   
-- --
--   >>> splitOn' (== '.') "a..b"
--   ["a","","b"]
--   
-- -- splitOn is an inverse of intercalating single element: -- --
--   Stream.intercalate (Stream.fromPure '.') Unfold.fromList . Stream.splitOn (== '.') Fold.toList === id
--   
-- -- Assuming the input stream does not contain the separator: -- --
--   Stream.splitOn (== '.') Fold.toList . Stream.intercalate (Stream.fromPure '.') Unfold.fromList === id
--   
splitOn :: Monad m => (a -> Bool) -> Fold m a b -> Stream m a -> Stream m b -- | Like scanl' but with a monadic step function and a monadic -- seed. scanlM' :: Monad m => (b -> a -> m b) -> m b -> Stream m a -> Stream m b -- | scanlMAfter' accumulate initial done stream is like -- scanlM' except that it provides an additional done -- function to be applied on the accumulator when the stream stops. The -- result of done is also emitted in the stream. -- -- This function can be used to allocate a resource in the beginning of -- the scan and release it when the stream ends or to flush the internal -- state of the scan at the end. -- -- Pre-release scanlMAfter' :: Monad m => (b -> a -> m b) -> m b -> (b -> m b) -> Stream m a -> Stream m b -- | Strict left scan. Like map, scanl' too is a one to one -- transformation, however it adds an extra element. -- --
--   >>> Stream.toList $ Stream.scanl' (+) 0 $ Stream.fromList [1,2,3,4]
--   [0,1,3,6,10]
--   
-- --
--   >>> Stream.toList $ Stream.scanl' (flip (:)) [] $ Stream.fromList [1,2,3,4]
--   [[],[1],[2,1],[3,2,1],[4,3,2,1]]
--   
-- -- The output of scanl' is the initial value of the accumulator -- followed by all the intermediate steps and the final result of -- foldl'. -- -- By streaming the accumulated state after each fold step, we can share -- the state across multiple stages of stream composition. Each stage can -- modify or extend the state, do some processing with it and emit it for -- the next stage, thus modularizing the stream processing. This can be -- useful in stateful or event-driven programming. -- -- Consider the following monolithic example, computing the sum and the -- product of the elements in a stream in one go using a foldl': -- --
--   >>> Stream.fold (Fold.foldl' (\(s, p) x -> (s + x, p * x)) (0,1)) $ Stream.fromList [1,2,3,4]
--   (10,24)
--   
-- -- Using scanl' we can make it modular by computing the sum in -- the first stage and passing it down to the next stage for computing -- the product: -- --
--   >>> :{
--     Stream.fold (Fold.foldl' (\(_, p) (s, x) -> (s, p * x)) (0,1))
--     $ Stream.scanl' (\(s, _) x -> (s + x, x)) (0,1)
--     $ Stream.fromList [1,2,3,4]
--   :}
--   (10,24)
--   
-- -- IMPORTANT: scanl' evaluates the accumulator to WHNF. To avoid -- building lazy expressions inside the accumulator, it is recommended -- that a strict data structure is used for accumulator. -- --
--   >>> scanl' step z = Stream.scan (Fold.foldl' step z)
--   
--   >>> scanl' f z xs = Stream.scanlM' (\a b -> return (f a b)) (return z) xs
--   
-- -- See also: usingStateT scanl' :: Monad m => (b -> a -> b) -> b -> Stream m a -> Stream m b scanlM :: Monad m => (b -> a -> m b) -> m b -> Stream m a -> Stream m b scanl :: Monad m => (b -> a -> b) -> b -> Stream m a -> Stream m b -- | Like scanl1' but with a monadic step function. scanl1M' :: Monad m => (a -> a -> m a) -> Stream m a -> Stream m a -- | Like scanl' but for a non-empty stream. The first element of -- the stream is used as the initial value of the accumulator. Does -- nothing if the stream is empty. -- --
--   >>> Stream.toList $ Stream.scanl1' (+) $ Stream.fromList [1,2,3,4]
--   [1,3,6,10]
--   
scanl1' :: Monad m => (a -> a -> a) -> Stream m a -> Stream m a scanl1M :: Monad m => (a -> a -> m a) -> Stream m a -> Stream m a scanl1 :: Monad m => (a -> a -> a) -> Stream m a -> Stream m a prescanl' :: Monad m => (b -> a -> b) -> b -> Stream m a -> Stream m b prescanlM' :: Monad m => (b -> a -> m b) -> m b -> Stream m a -> Stream m b postscanl :: Monad m => (a -> b -> a) -> a -> Stream m b -> Stream m a postscanlM :: Monad m => (b -> a -> m b) -> m b -> Stream m a -> Stream m b postscanl' :: Monad m => (a -> b -> a) -> a -> Stream m b -> Stream m a postscanlM' :: Monad m => (b -> a -> m b) -> m b -> Stream m a -> Stream m b postscanlMAfter' :: Monad m => (b -> a -> m b) -> m b -> (b -> m b) -> Stream m a -> Stream m b postscanlx' :: Monad m => (x -> a -> x) -> x -> (x -> b) -> Stream m a -> Stream m b postscanlMx' :: Monad m => (x -> a -> m x) -> m x -> (x -> m b) -> Stream m a -> Stream m b scanlMx' :: Monad m => (x -> a -> m x) -> m x -> (x -> m b) -> Stream m a -> Stream m b scanlx' :: Monad m => (x -> a -> x) -> x -> (x -> b) -> Stream m a -> Stream m b -- | Modify a Stream m a -> Stream m a stream transformation -- that accepts a predicate (a -> b) to accept ((s, a) -- -> b) instead, provided a transformation Stream m a -> -- Stream m (s, a). Convenient to filter with index or time. -- --
--   >>> filterWithIndex = Stream.with Stream.indexed Stream.filter
--   
-- -- Pre-release with :: Monad m => (Stream m a -> Stream m (s, a)) -> (((s, a) -> b) -> Stream m (s, a) -> Stream m (s, a)) -> ((s, a) -> b) -> Stream m a -> Stream m a -- | Use a filtering fold on a stream. -- --
--   >>> scanMaybe f = Stream.catMaybes . Stream.postscan f
--   
scanMaybe :: Monad m => Fold m a (Maybe b) -> Stream m a -> Stream m b -- | Include only those elements that pass a predicate. -- --
--   >>> filter p = Stream.filterM (return . p)
--   
--   >>> filter p = Stream.mapMaybe (\x -> if p x then Just x else Nothing)
--   
--   >>> filter p = Stream.scanMaybe (Fold.filtering p)
--   
filter :: Monad m => (a -> Bool) -> Stream m a -> Stream m a -- | Same as filter but with a monadic predicate. -- --
--   >>> f p x = p x >>= \r -> return $ if r then Just x else Nothing
--   
--   >>> filterM p = Stream.mapMaybeM (f p)
--   
filterM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a -- | Deletes the first occurrence of the element in the stream that -- satisfies the given equality predicate. -- --
--   >>> input = Stream.fromList [1,3,3,5]
--   
--   >>> Stream.fold Fold.toList $ Stream.deleteBy (==) 3 input
--   [1,3,5]
--   
deleteBy :: Monad m => (a -> a -> Bool) -> a -> Stream m a -> Stream m a -- | Drop repeated elements that are adjacent to each other using the -- supplied comparison function. -- --
--   >>> uniq = Stream.uniqBy (==)
--   
-- -- To strip duplicate path separators: -- --
--   >>> input = Stream.fromList "//a//b"
--   
--   >>> f x y = x == '/' && y == '/'
--   
--   >>> Stream.fold Fold.toList $ Stream.uniqBy f input
--   "/a/b"
--   
-- -- Space: O(1) -- -- Pre-release uniqBy :: Monad m => (a -> a -> Bool) -> Stream m a -> Stream m a -- | Drop repeated elements that are adjacent to each other. -- --
--   >>> uniq = Stream.uniqBy (==)
--   
uniq :: (Eq a, Monad m) => Stream m a -> Stream m a -- | Strip all leading and trailing occurrences of an element passing a -- predicate and make all other consecutive occurrences uniq. -- --
--   > prune p = Stream.dropWhileAround p $ Stream.uniqBy (x y -> p x && p y)
--   
-- --
--   > Stream.prune isSpace (Stream.fromList "  hello      world!   ")
--   "hello world!"
--   
-- -- Space: O(1) -- -- Unimplemented prune :: (a -> Bool) -> Stream m a -> Stream m a -- | Emit only repeated elements, once. -- -- Unimplemented repeated :: Stream m a -> Stream m a -- | Take first n elements from the stream and discard the rest. take :: Applicative m => Int -> Stream m a -> Stream m a -- | End the stream as soon as the predicate fails on an element. takeWhile :: Monad m => (a -> Bool) -> Stream m a -> Stream m a -- | Same as takeWhile but with a monadic predicate. takeWhileM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a -- | Take all consecutive elements at the end of the stream for which the -- predicate is true. -- -- O(n) space, where n is the number elements taken. -- -- Unimplemented takeWhileLast :: (a -> Bool) -> Stream m a -> Stream m a -- | Like takeWhile and takeWhileLast combined. -- -- O(n) space, where n is the number elements taken from the end. -- -- Unimplemented takeWhileAround :: (a -> Bool) -> Stream m a -> Stream m a -- | Discard first n elements from the stream and take the rest. drop :: Monad m => Int -> Stream m a -> Stream m a -- | Drop elements in the stream as long as the predicate succeeds and then -- take the rest of the stream. dropWhile :: Monad m => (a -> Bool) -> Stream m a -> Stream m a -- | Same as dropWhile but with a monadic predicate. dropWhileM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a -- | Drop n elements at the end of the stream. -- -- O(n) space, where n is the number elements dropped. -- -- Unimplemented dropLast :: Int -> Stream m a -> Stream m a -- | Drop all consecutive elements at the end of the stream for which the -- predicate is true. -- -- O(n) space, where n is the number elements dropped. -- -- Unimplemented dropWhileLast :: (a -> Bool) -> Stream m a -> Stream m a -- | Like dropWhile and dropWhileLast combined. -- -- O(n) space, where n is the number elements dropped from the end. -- -- Unimplemented dropWhileAround :: (a -> Bool) -> Stream m a -> Stream m a -- | insertBy cmp elem stream inserts elem before the -- first element in stream that is less than elem when -- compared using cmp. -- --
--   >>> insertBy cmp x = Stream.mergeBy cmp (Stream.fromPure x)
--   
-- --
--   >>> input = Stream.fromList [1,3,5]
--   
--   >>> Stream.fold Fold.toList $ Stream.insertBy compare 2 input
--   [1,2,3,5]
--   
insertBy :: Monad m => (a -> a -> Ordering) -> a -> Stream m a -> Stream m a -- | Insert a pure value between successive elements of a stream. -- --
--   >>> input = Stream.fromList "hello"
--   
--   >>> Stream.fold Fold.toList $ Stream.intersperse ',' input
--   "h,e,l,l,o"
--   
intersperse :: Monad m => a -> Stream m a -> Stream m a -- | Insert an effect and its output before consuming an element of a -- stream except the first one. -- --
--   >>> input = Stream.fromList "hello"
--   
--   >>> Stream.fold Fold.toList $ Stream.trace putChar $ Stream.intersperseM (putChar '.' >> return ',') input
--   h.,e.,l.,l.,o"h,e,l,l,o"
--   
-- -- Be careful about the order of effects. In the above example we used -- trace after the intersperse, if we use it before the intersperse the -- output would be he.l.l.o."h,e,l,l,o". -- --
--   >>> Stream.fold Fold.toList $ Stream.intersperseM (putChar '.' >> return ',') $ Stream.trace putChar input
--   he.l.l.o."h,e,l,l,o"
--   
intersperseM :: Monad m => m a -> Stream m a -> Stream m a -- | Intersperse a monadic action into the input stream after every -- n elements. -- --
--   > input = Stream.fromList "hello"
--   > Stream.fold Fold.toList $ Stream.intersperseMWith 2 (return ',') input
--   
-- -- "he,ll,o" -- -- Unimplemented intersperseMWith :: Int -> m a -> Stream m a -> Stream m a -- | Insert an effect and its output after consuming an element of a -- stream. -- --
--   >>> input = Stream.fromList "hello"
--   
--   >>> Stream.fold Fold.toList $ Stream.trace putChar $ Stream.intersperseMSuffix (putChar '.' >> return ',') input
--   h.,e.,l.,l.,o.,"h,e,l,l,o,"
--   
-- -- Pre-release intersperseMSuffix :: forall m a. Monad m => m a -> Stream m a -> Stream m a -- | Like intersperseMSuffix but intersperses an effectful action -- into the input stream after every n elements and after the -- last element. -- --
--   >>> input = Stream.fromList "hello"
--   
--   >>> Stream.fold Fold.toList $ Stream.intersperseMSuffixWith 2 (return ',') input
--   "he,ll,o,"
--   
-- -- Pre-release intersperseMSuffixWith :: forall m a. Monad m => Int -> m a -> Stream m a -> Stream m a -- | Insert a side effect before consuming an element of a stream except -- the first one. -- --
--   >>> input = Stream.fromList "hello"
--   
--   >>> Stream.fold Fold.drain $ Stream.trace putChar $ Stream.intersperseM_ (putChar '.') input
--   h.e.l.l.o
--   
-- -- Pre-release intersperseM_ :: Monad m => m b -> Stream m a -> Stream m a -- | Insert a side effect after consuming an element of a stream. -- --
--   >>> input = Stream.fromList "hello"
--   
--   >>> Stream.fold Fold.toList $ Stream.intersperseMSuffix_ (threadDelay 1000000) input
--   "hello"
--   
-- -- Pre-release intersperseMSuffix_ :: Monad m => m b -> Stream m a -> Stream m a -- | Insert a side effect before consuming an element of a stream. -- -- Definition: -- --
--   >>> intersperseMPrefix_ m = Stream.mapM (\x -> void m >> return x)
--   
-- --
--   >>> input = Stream.fromList "hello"
--   
--   >>> Stream.fold Fold.toList $ Stream.trace putChar $ Stream.intersperseMPrefix_ (putChar '.' >> return ',') input
--   .h.e.l.l.o"hello"
--   
-- -- Same as trace_. -- -- Pre-release intersperseMPrefix_ :: Monad m => m b -> Stream m a -> Stream m a -- | Introduce a delay of specified seconds between elements of the stream. -- -- Definition: -- --
--   >>> sleep n = liftIO $ threadDelay $ round $ n * 1000000
--   
--   >>> delay = Stream.intersperseM_ . sleep
--   
-- -- Example: -- --
--   >>> input = Stream.enumerateFromTo 1 3
--   
--   >>> Stream.fold (Fold.drainMapM print) $ Stream.delay 1 input
--   1
--   2
--   3
--   
delay :: MonadIO m => Double -> Stream m a -> Stream m a -- | Introduce a delay of specified seconds before consuming an element of -- a stream. -- -- Definition: -- --
--   >>> sleep n = liftIO $ threadDelay $ round $ n * 1000000
--   
--   >>> delayPre = Stream.intersperseMPrefix_. sleep
--   
-- -- Example: -- --
--   >>> input = Stream.enumerateFromTo 1 3
--   
--   >>> Stream.fold (Fold.drainMapM print) $ Stream.delayPre 1 input
--   1
--   2
--   3
--   
-- -- Pre-release delayPre :: MonadIO m => Double -> Stream m a -> Stream m a -- | Introduce a delay of specified seconds after consuming an element of a -- stream. -- -- Definition: -- --
--   >>> sleep n = liftIO $ threadDelay $ round $ n * 1000000
--   
--   >>> delayPost = Stream.intersperseMSuffix_ . sleep
--   
-- -- Example: -- --
--   >>> input = Stream.enumerateFromTo 1 3
--   
--   >>> Stream.fold (Fold.drainMapM print) $ Stream.delayPost 1 input
--   1
--   2
--   3
--   
-- -- Pre-release delayPost :: MonadIO m => Double -> Stream m a -> Stream m a -- | Returns the elements of the stream in reverse order. The stream must -- be finite. Note that this necessarily buffers the entire stream in -- memory. -- -- Definition: -- --
--   >>> reverse m = Stream.concatEffect $ Stream.fold Fold.toListRev m >>= return . Stream.fromList
--   
reverse :: Monad m => Stream m a -> Stream m a -- | Like reverse but several times faster, requires an Unbox -- instance. -- -- O(n) space -- -- Pre-release reverseUnbox :: (MonadIO m, Unbox a) => Stream m a -> Stream m a -- | Buffer until the next element in sequence arrives. The function -- argument determines the difference in sequence numbers. This could be -- useful in implementing sequenced streams, for example, TCP reassembly. -- -- Unimplemented reassembleBy :: Fold m a b -> (a -> a -> Int) -> Stream m a -> Stream m b -- |
--   >>> f = Fold.foldl' (\(i, _) x -> (i + 1, x)) (-1,undefined)
--   
--   >>> indexed = Stream.postscan f
--   
--   >>> indexed = Stream.zipWith (,) (Stream.enumerateFrom 0)
--   
--   >>> indexedR n = fmap (\(i, a) -> (n - i, a)) . indexed
--   
-- -- Pair each element in a stream with its index, starting from index 0. -- --
--   >>> Stream.fold Fold.toList $ Stream.indexed $ Stream.fromList "hello"
--   [(0,'h'),(1,'e'),(2,'l'),(3,'l'),(4,'o')]
--   
indexed :: Monad m => Stream m a -> Stream m (Int, a) -- |
--   >>> f n = Fold.foldl' (\(i, _) x -> (i - 1, x)) (n + 1,undefined)
--   
--   >>> indexedR n = Stream.postscan (f n)
--   
-- --
--   >>> s n = Stream.enumerateFromThen n (n - 1)
--   
--   >>> indexedR n = Stream.zipWith (,) (s n)
--   
-- -- Pair each element in a stream with its index, starting from the given -- index n and counting down. -- --
--   >>> Stream.fold Fold.toList $ Stream.indexedR 10 $ Stream.fromList "hello"
--   [(10,'h'),(9,'e'),(8,'l'),(7,'l'),(6,'o')]
--   
indexedR :: Monad m => Int -> Stream m a -> Stream m (Int, a) -- | Pair each element in a stream with an absolute timestamp, using a -- clock of specified granularity. The timestamp is generated just before -- the element is consumed. -- --
--   >>> Stream.fold Fold.toList $ Stream.timestampWith 0.01 $ Stream.delay 1 $ Stream.enumerateFromTo 1 3
--   [(AbsTime (TimeSpec {sec = ..., nsec = ...}),1),(AbsTime (TimeSpec {sec = ..., nsec = ...}),2),(AbsTime (TimeSpec {sec = ..., nsec = ...}),3)]
--   
-- -- Pre-release timestampWith :: MonadIO m => Double -> Stream m a -> Stream m (AbsTime, a) timestamped :: MonadIO m => Stream m a -> Stream m (AbsTime, a) -- | Pair each element in a stream with relative times starting from 0, -- using a clock with the specified granularity. The time is measured -- just before the element is consumed. -- --
--   >>> Stream.fold Fold.toList $ Stream.timeIndexWith 0.01 $ Stream.delay 1 $ Stream.enumerateFromTo 1 3
--   [(RelTime64 (NanoSecond64 ...),1),(RelTime64 (NanoSecond64 ...),2),(RelTime64 (NanoSecond64 ...),3)]
--   
-- -- Pre-release timeIndexWith :: MonadIO m => Double -> Stream m a -> Stream m (RelTime64, a) -- | Pair each element in a stream with relative times starting from 0, -- using a 10 ms granularity clock. The time is measured just before the -- element is consumed. -- --
--   >>> Stream.fold Fold.toList $ Stream.timeIndexed $ Stream.delay 1 $ Stream.enumerateFromTo 1 3
--   [(RelTime64 (NanoSecond64 ...),1),(RelTime64 (NanoSecond64 ...),2),(RelTime64 (NanoSecond64 ...),3)]
--   
-- -- Pre-release timeIndexed :: MonadIO m => Stream m a -> Stream m (RelTime64, a) -- | Find all the indices where the element in the stream satisfies the -- given predicate. -- --
--   >>> findIndices p = Stream.scanMaybe (Fold.findIndices p)
--   
findIndices :: Monad m => (a -> Bool) -> Stream m a -> Stream m Int -- | Find all the indices where the value of the element in the stream is -- equal to the given value. -- --
--   >>> elemIndices a = Stream.findIndices (== a)
--   
elemIndices :: (Monad m, Eq a) => a -> Stream m a -> Stream m Int slicesBy :: Monad m => (a -> Bool) -> Stream m a -> Stream m (Int, Int) -- | Apply a function on every two successive elements of a stream. The -- first argument of the map function is the previous element and the -- second argument is the current element. When the current element is -- the first element, the previous element is Nothing. -- -- Pre-release rollingMap :: Monad m => (Maybe a -> a -> b) -> Stream m a -> Stream m b -- | Like rollingMap but with an effectful map function. -- -- Pre-release rollingMapM :: Monad m => (Maybe a -> a -> m b) -> Stream m a -> Stream m b -- | Like rollingMap but requires at least two elements in the -- stream, returns an empty stream otherwise. -- -- This is the stream equivalent of the list idiom zipWith f xs (tail -- xs). -- -- Pre-release rollingMap2 :: Monad m => (a -> a -> b) -> Stream m a -> Stream m b -- | Map a Maybe returning function to a stream, filter out the -- Nothing elements, and return a stream of values extracted from -- Just. -- -- Equivalent to: -- --
--   >>> mapMaybe f = Stream.catMaybes . fmap f
--   
mapMaybe :: Monad m => (a -> Maybe b) -> Stream m a -> Stream m b -- | Like mapMaybe but maps a monadic function. -- -- Equivalent to: -- --
--   >>> mapMaybeM f = Stream.catMaybes . Stream.mapM f
--   
-- --
--   >>> mapM f = Stream.mapMaybeM (\x -> Just <$> f x)
--   
mapMaybeM :: Monad m => (a -> m (Maybe b)) -> Stream m a -> Stream m b -- | In a stream of Maybes, discard Nothings and unwrap -- Justs. -- --
--   >>> catMaybes = Stream.mapMaybe id
--   
--   >>> catMaybes = fmap fromJust . Stream.filter isJust
--   
-- -- Pre-release catMaybes :: Monad m => Stream m (Maybe a) -> Stream m a -- | Discard Rights and unwrap Lefts in an Either -- stream. -- --
--   >>> catLefts = fmap (fromLeft undefined) . Stream.filter isLeft
--   
-- -- Pre-release catLefts :: Monad m => Stream m (Either a b) -> Stream m a -- | Discard Lefts and unwrap Rights in an Either -- stream. -- --
--   >>> catRights = fmap (fromRight undefined) . Stream.filter isRight
--   
-- -- Pre-release catRights :: Monad m => Stream m (Either a b) -> Stream m b -- | Remove the either wrapper and flatten both lefts and as well as rights -- in the output stream. -- --
--   >>> catEithers = fmap (either id id)
--   
-- -- Pre-release catEithers :: Monad m => Stream m (Either a a) -> Stream m a -- | strideFromthen offset stride takes the element at -- offset index and then every element at strides of -- stride. -- --
--   >>> Stream.fold Fold.toList $ Stream.strideFromThen 2 3 $ Stream.enumerateFromTo 0 10
--   [2,5,8]
--   
strideFromThen :: Monad m => Int -> Int -> Stream m a -> Stream m a -- | filterInStreamGenericBy retains only those elements in the -- second stream that are present in the first stream. -- --
--   >>> Stream.fold Fold.toList $ Stream.filterInStreamGenericBy (==) (Stream.fromList [1,2,2,4]) (Stream.fromList [2,1,1,3])
--   [2,1,1]
--   
-- --
--   >>> Stream.fold Fold.toList $ Stream.filterInStreamGenericBy (==) (Stream.fromList [2,1,1,3]) (Stream.fromList [1,2,2,4])
--   [1,2,2]
--   
-- -- Similar to the list intersectBy operation but with the stream argument -- order flipped. -- -- The first stream must be finite and must not block. Second stream is -- processed only after the first stream is fully realized. -- -- Space: O(n) where n is the number of elements in the second -- stream. -- -- Time: O(m x n) where m is the number of elements in the first -- stream and n is the number of elements in the second stream. -- -- Pre-release filterInStreamGenericBy :: Monad m => (a -> a -> Bool) -> Stream m a -> Stream m a -> Stream m a -- | Delete all elements of the first stream from the seconds stream. If an -- element occurs multiple times in the first stream as many occurrences -- of it are deleted from the second stream. -- --
--   >>> Stream.fold Fold.toList $ Stream.deleteInStreamGenericBy (==) (Stream.fromList [1,2,3]) (Stream.fromList [1,2,2])
--   [2]
--   
-- -- The following laws hold: -- --
--   deleteInStreamGenericBy (==) s1 (s1 `append` s2) === s2
--   deleteInStreamGenericBy (==) s1 (s1 `interleave` s2) === s2
--   
-- -- Same as the list // operation but with argument order flipped. -- -- The first stream must be finite and must not block. Second stream is -- processed only after the first stream is fully realized. -- -- Space: O(m) where m is the number of elements in the first -- stream. -- -- Time: O(m x n) where m is the number of elements in the first -- stream and n is the number of elements in the second stream. -- -- Pre-release deleteInStreamGenericBy :: Monad m => (a -> a -> Bool) -> Stream m a -> Stream m a -> Stream m a -- | This essentially appends to the second stream all the occurrences of -- elements in the first stream that are not already present in the -- second stream. -- -- Equivalent to the following except that s2 is evaluated only -- once: -- --
--   >>> unionWithStreamGenericBy eq s1 s2 = s2 `Stream.append` (Stream.deleteInStreamGenericBy eq s2 s1)
--   
-- -- Example: -- --
--   >>> Stream.fold Fold.toList $ Stream.unionWithStreamGenericBy (==) (Stream.fromList [1,1,2,3]) (Stream.fromList [1,2,2,4])
--   [1,2,2,4,3]
--   
-- -- Space: O(n) -- -- Time: O(m x n) -- -- Pre-release unionWithStreamGenericBy :: MonadIO m => (a -> a -> Bool) -> Stream m a -> Stream m a -> Stream m a -- | Like filterInStreamGenericBy but assumes that the input streams -- are sorted in ascending order. To use it on streams sorted in -- descending order pass an inverted comparison function returning GT for -- less than and LT for greater than. -- -- Space: O(1) -- -- Time: O(m+n) -- -- Pre-release filterInStreamAscBy :: Monad m => (a -> a -> Ordering) -> Stream m a -> Stream m a -> Stream m a -- | A more efficient deleteInStreamGenericBy for streams sorted in -- ascending order. -- -- Space: O(1) -- -- Unimplemented deleteInStreamAscBy :: (a -> a -> Ordering) -> Stream m a -> Stream m a -> Stream m a -- | A more efficient unionWithStreamGenericBy for sorted streams. -- -- Space: O(1) -- -- Unimplemented unionWithStreamAscBy :: (a -> a -> Ordering) -> Stream m a -> Stream m a -> Stream m a -- | Like cross but emits only those tuples where a == b -- using the supplied equality predicate. -- -- Definition: -- --
--   >>> joinInnerGeneric eq s1 s2 = Stream.filter (\(a, b) -> a `eq` b) $ Stream.cross s1 s2
--   
-- -- You should almost always prefer joinInnerOrd over -- joinInnerGeneric if possible. joinInnerOrd is an order -- of magnitude faster but may take more space for caching the second -- stream. -- -- See joinInnerGeneric for a much faster fused alternative. -- -- Time: O(m x n) -- -- Pre-release joinInnerGeneric :: Monad m => (a -> b -> Bool) -> Stream m a -> Stream m b -> Stream m (a, b) -- | A more efficient joinInner for sorted streams. -- -- Space: O(1) -- -- Time: O(m + n) -- -- Unimplemented joinInnerAscBy :: (a -> b -> Ordering) -> Stream m a -> Stream m b -> Stream m (a, b) -- | A more efficient joinLeft for sorted streams. -- -- Space: O(1) -- -- Time: O(m + n) -- -- Unimplemented joinLeftAscBy :: (a -> b -> Ordering) -> Stream m a -> Stream m b -> Stream m (a, Maybe b) -- | A more efficient joinOuter for sorted streams. -- -- Space: O(1) -- -- Time: O(m + n) -- -- Unimplemented joinOuterAscBy :: (a -> b -> Ordering) -> Stream m a -> Stream m b -> Stream m (Maybe a, Maybe b) -- | The memory used is proportional to the number of unique elements in -- the stream. If we want to limit the memory we can just use "take" to -- limit the uniq elements in the stream. nub :: (Monad m, Ord a) => Stream m a -> Stream m a -- | Like joinInner but emit (a, Just b), and additionally, -- for those a's that are not equal to any b emit -- (a, Nothing). -- -- The second stream is evaluated multiple times. If the stream is a -- consume-once stream then the caller should cache it in an Array -- before calling this function. Caching may also improve performance if -- the stream is expensive to evaluate. -- --
--   >>> joinRightGeneric eq = flip (Stream.joinLeftGeneric eq)
--   
-- -- Space: O(n) assuming the second stream is cached in memory. -- -- Time: O(m x n) -- -- Unimplemented joinLeftGeneric :: Monad m => (a -> b -> Bool) -> Stream m a -> Stream m b -> Stream m (a, Maybe b) -- | Like joinLeft but emits a (Just a, Just b). Like -- joinLeft, for those a's that are not equal to any -- b emit (Just a, Nothing), but additionally, for -- those b's that are not equal to any a emit -- (Nothing, Just b). -- -- For space efficiency use the smaller stream as the second stream. -- -- Space: O(n) -- -- Time: O(m x n) -- -- Pre-release joinOuterGeneric :: MonadIO m => (a -> b -> Bool) -> Stream m a -> Stream m b -> Stream m (Maybe a, Maybe b) -- | Like joinInner but uses a Map for efficiency. -- -- If the input streams have duplicate keys, the behavior is undefined. -- -- For space efficiency use the smaller stream as the second stream. -- -- Space: O(n) -- -- Time: O(m + n) -- -- Pre-release joinInner :: (Monad m, Ord k) => Stream m (k, a) -> Stream m (k, b) -> Stream m (k, a, b) -- | A more efficient joinLeft using a hashmap for efficiency. -- -- Space: O(n) -- -- Time: O(m + n) -- -- Pre-release joinLeft :: (Ord k, Monad m) => Stream m (k, a) -> Stream m (k, b) -> Stream m (k, a, Maybe b) -- | Like joinOuter but uses a Map for efficiency. -- -- Space: O(m + n) -- -- Time: O(m + n) -- -- Pre-release joinOuter :: (Ord k, MonadIO m) => Stream m (k, a) -> Stream m (k, b) -> Stream m (k, Maybe a, Maybe b) -- | Deprecated: Please use Streamly.Internal.Data.Stream -- instead. module Streamly.Internal.Data.Stream.StreamD -- | Combinators to efficiently manipulate streams of mutable arrays. -- -- We can either push these in the MutArray module with a "chunks" prefix -- or keep this as a separate module and release it. -- | Deprecated: Please use Streamly.Internal.Data.MutArray -- instead. module Streamly.Internal.Data.MutArray.Stream -- | chunksOf n stream groups the elements in the input stream -- into arrays of n elements each. -- -- Same as the following but may be more efficient: -- --
--   >>> chunksOf n = Stream.foldMany (MutArray.createOf n)
--   
-- -- Pre-release chunksOf :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> Stream m (MutArray a) -- | Like chunksOf but creates pinned arrays. pinnedChunksOf :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> Stream m (MutArray a) -- | Deprecated: Please use buildChunks instead. writeChunks :: (MonadIO m, Unbox a) => Int -> Fold m a (StreamK n (MutArray a)) -- | Generate a stream of array slices using a predicate. The array element -- matching the predicate is dropped. -- -- Pre-release splitOn :: (MonadIO m, Unbox a) => (a -> Bool) -> MutArray a -> Stream m (MutArray a) -- | This mutates the first array (if it has space) to append values from -- the second one. This would work for immutable arrays as well because -- an immutable array never has space so a new array is allocated instead -- of mutating it. -- -- | Coalesce adjacent arrays in incoming stream to form bigger arrays of -- a maximum specified size. Note that if a single array is bigger than -- the specified size we do not split it to fit. When we coalesce -- multiple arrays if the size would exceed the specified size we do not -- coalesce therefore the actual array size may be less than the -- specified chunk size. packArraysChunksOf :: (MonadIO m, Unbox a) => Int -> Stream m (MutArray a) -> Stream m (MutArray a) data SpliceState s arr SpliceInitial :: s -> SpliceState s arr SpliceBuffering :: s -> arr -> SpliceState s arr SpliceYielding :: arr -> SpliceState s arr -> SpliceState s arr SpliceFinish :: SpliceState s arr lpackArraysChunksOf :: (MonadIO m, Unbox a) => Int -> Fold m (MutArray a) () -> Fold m (MutArray a) () -- | Coalesce adjacent arrays in incoming stream to form bigger arrays of a -- maximum specified size in bytes. -- -- Internal compact :: (MonadIO m, Unbox a) => Int -> Stream m (MutArray a) -> Stream m (MutArray a) -- | Coalesce adjacent arrays in incoming stream to form bigger arrays of a -- maximum specified size in bytes. -- -- Internal compactLE :: (MonadIO m, Unbox a) => Int -> Stream m (MutArray a) -> Stream m (Either ParseError (MutArray a)) -- | Like compactLE but generates arrays of exactly equal to the -- size specified except for the last array in the stream which could be -- shorter. -- -- Unimplemented compactEQ :: Int -> Stream m (MutArray a) -> Stream m (MutArray a) -- | Like compactLE but generates arrays of size greater than or -- equal to the specified except for the last array in the stream which -- could be shorter. -- -- Internal compactGE :: (MonadIO m, Unbox a) => Int -> Stream m (MutArray a) -> Stream m (MutArray a) -- | Deprecated: Please use "unfoldMany reader" instead. flattenArrays :: forall m a. (MonadIO m, Unbox a) => Stream m (MutArray a) -> Stream m a -- | Deprecated: Please use "unfoldMany readerRev" instead. flattenArraysRev :: forall m a. (MonadIO m, Unbox a) => Stream m (MutArray a) -> Stream m a -- | Deprecated: Please use fromChunksK instead. fromArrayStreamK :: (Unbox a, MonadIO m) => StreamK m (MutArray a) -> m (MutArray a) -- | Streams represented as state machines, that fuse together when -- composed statically, eliminating function calls or intermediate -- constructor allocations - generating tight, efficient loops. Suitable -- for high performance looping operations. -- -- If you need to call these operations recursively in a loop (i.e. -- composed dynamically) then it is recommended to use the continuation -- passing style (CPS) stream operations from the -- Streamly.Data.StreamK module. Stream and -- StreamK types are interconvertible. See more details in the -- documentation below regarding Stream vs StreamK. -- -- Please refer to Streamly.Internal.Data.Stream for more -- functions that have not yet been released. -- -- Checkout the https://github.com/composewell/streamly-examples -- repository for many more real world examples of stream programming. module Streamly.Data.Stream -- | A stream consists of a step function that generates the next step -- given a current state, and the current state. data Stream m a -- | A stream that terminates without producing any output or side effect. -- --
--   >>> Stream.toList Stream.nil
--   []
--   
nil :: Applicative m => Stream m a -- | A stream that terminates without producing any output, but produces a -- side effect. -- --
--   >>> Stream.fold Fold.toList (Stream.nilM (print "nil"))
--   "nil"
--   []
--   
-- -- Pre-release nilM :: Applicative m => m b -> Stream m a -- | WARNING! O(n^2) time complexity wrt number of elements. Use the O(n) -- complexity StreamK.cons unless you want to statically fuse just -- a few elements. -- -- Fuse a pure value at the head of an existing stream:: -- --
--   >>> s = 1 `Stream.cons` Stream.fromList [2,3]
--   
--   >>> Stream.toList s
--   [1,2,3]
--   
-- -- Definition: -- --
--   >>> cons x xs = return x `Stream.consM` xs
--   
cons :: Applicative m => a -> Stream m a -> Stream m a infixr 5 `cons` -- | Like cons but fuses an effect instead of a pure value. consM :: Applicative m => m a -> Stream m a -> Stream m a infixr 5 `consM` -- | Build a stream by unfolding a pure step function step -- starting from a seed s. The step function returns the next -- element in the stream and the next seed value. When it is done it -- returns Nothing and the stream ends. For example, -- --
--   >>> :{
--   let f b =
--           if b > 2
--           then Nothing
--           else Just (b, b + 1)
--   in Stream.toList $ Stream.unfoldr f 0
--   :}
--   [0,1,2]
--   
unfoldr :: Monad m => (s -> Maybe (a, s)) -> s -> Stream m a -- | Build a stream by unfolding a monadic step function starting -- from a seed. The step function returns the next element in the stream -- and the next seed value. When it is done it returns Nothing and -- the stream ends. For example, -- --
--   >>> :{
--   let f b =
--           if b > 2
--           then return Nothing
--           else return (Just (b, b + 1))
--   in Stream.toList $ Stream.unfoldrM f 0
--   :}
--   [0,1,2]
--   
unfoldrM :: Monad m => (s -> m (Maybe (a, s))) -> s -> Stream m a -- | Create a singleton stream from a pure value. -- --
--   >>> fromPure a = a `Stream.cons` Stream.nil
--   
--   >>> fromPure = pure
--   
--   >>> fromPure = Stream.fromEffect . pure
--   
fromPure :: Applicative m => a -> Stream m a -- | Create a singleton stream from a monadic action. -- --
--   >>> fromEffect m = m `Stream.consM` Stream.nil
--   
--   >>> fromEffect = Stream.sequence . Stream.fromPure
--   
-- --
--   >>> Stream.fold Fold.drain $ Stream.fromEffect (putStrLn "hello")
--   hello
--   
fromEffect :: Applicative m => m a -> Stream m a -- | Generate an infinite stream by repeating a pure value. -- --
--   >>> repeat x = Stream.repeatM (pure x)
--   
repeat :: Monad m => a -> Stream m a -- |
--   >>> repeatM = Stream.sequence . Stream.repeat
--   
-- -- Generate a stream by repeatedly executing a monadic action forever. -- --
--   >>> :{
--   repeatAction =
--          Stream.repeatM (threadDelay 1000000 >> print 1)
--        & Stream.take 10
--        & Stream.fold Fold.drain
--   :}
--   
repeatM :: Monad m => m a -> Stream m a -- |
--   >>> replicate n = Stream.take n . Stream.repeat
--   
--   >>> replicate n x = Stream.replicateM n (pure x)
--   
-- -- Generate a stream of length n by repeating a value n -- times. replicate :: Monad m => Int -> a -> Stream m a -- |
--   >>> replicateM n = Stream.sequence . Stream.replicate n
--   
-- -- Generate a stream by performing a monadic action n times. replicateM :: Monad m => Int -> m a -> Stream m a -- | Types that can be enumerated as a stream. The operations in this type -- class are equivalent to those in the Enum type class, except -- that these generate a stream instead of a list. Use the functions in -- Streamly.Internal.Data.Stream.Enumeration module to define new -- instances. class Enum a => Enumerable a -- | enumerateFrom from generates a stream starting with the -- element from, enumerating up to maxBound when the type -- is Bounded or generating an infinite stream when the type is -- not Bounded. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFrom (0 :: Int)
--   [0,1,2,3]
--   
-- -- For Fractional types, enumeration is numerically stable. -- However, no overflow or underflow checks are performed. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFrom 1.1
--   [1.1,2.1,3.1,4.1]
--   
enumerateFrom :: (Enumerable a, Monad m) => a -> Stream m a -- | Generate a finite stream starting with the element from, -- enumerating the type up to the value to. If to is -- smaller than from then an empty stream is returned. -- --
--   >>> Stream.toList $ Stream.enumerateFromTo 0 4
--   [0,1,2,3,4]
--   
-- -- For Fractional types, the last element is equal to the -- specified to value after rounding to the nearest integral -- value. -- --
--   >>> Stream.toList $ Stream.enumerateFromTo 1.1 4
--   [1.1,2.1,3.1,4.1]
--   
-- --
--   >>> Stream.toList $ Stream.enumerateFromTo 1.1 4.6
--   [1.1,2.1,3.1,4.1,5.1]
--   
enumerateFromTo :: (Enumerable a, Monad m) => a -> a -> Stream m a -- | enumerateFromThen from then generates a stream whose first -- element is from, the second element is then and the -- successive elements are in increments of then - from. -- Enumeration can occur downwards or upwards depending on whether -- then comes before or after from. For Bounded -- types the stream ends when maxBound is reached, for unbounded -- types it keeps enumerating infinitely. -- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromThen 0 2
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.take 4 $ Stream.enumerateFromThen 0 (-2)
--   [0,-2,-4,-6]
--   
enumerateFromThen :: (Enumerable a, Monad m) => a -> a -> Stream m a -- | enumerateFromThenTo from then to generates a finite stream -- whose first element is from, the second element is -- then and the successive elements are in increments of -- then - from up to to. Enumeration can occur -- downwards or upwards depending on whether then comes before -- or after from. -- --
--   >>> Stream.toList $ Stream.enumerateFromThenTo 0 2 6
--   [0,2,4,6]
--   
-- --
--   >>> Stream.toList $ Stream.enumerateFromThenTo 0 (-2) (-6)
--   [0,-2,-4,-6]
--   
enumerateFromThenTo :: (Enumerable a, Monad m) => a -> a -> a -> Stream m a -- |
--   enumerate = enumerateFrom minBound
--   
-- -- Enumerate a Bounded type from its minBound to -- maxBound enumerate :: (Monad m, Bounded a, Enumerable a) => Stream m a -- |
--   >>> enumerateTo = Stream.enumerateFromTo minBound
--   
-- -- Enumerate a Bounded type from its minBound to specified -- value. enumerateTo :: (Monad m, Bounded a, Enumerable a) => a -> Stream m a -- | Generate an infinite stream with x as the first element and -- each successive element derived by applying the function f on -- the previous element. -- --
--   >>> Stream.toList $ Stream.take 5 $ Stream.iterate (+1) 1
--   [1,2,3,4,5]
--   
iterate :: Monad m => (a -> a) -> a -> Stream m a -- | Generate an infinite stream with the first element generated by the -- action m and each successive element derived by applying the -- monadic function f on the previous element. -- --
--   >>> :{
--   Stream.iterateM (\x -> print x >> return (x + 1)) (return 0)
--       & Stream.take 3
--       & Stream.toList
--   :}
--   0
--   1
--   [0,1,2]
--   
iterateM :: Monad m => (a -> m a) -> m a -> Stream m a -- | Construct a stream from a list of pure values. fromList :: Applicative m => [a] -> Stream m a -- | Convert an Unfold into a stream by supplying it an input seed. -- --
--   >>> s = Stream.unfold Unfold.replicateM (3, putStrLn "hello")
--   
--   >>> Stream.fold Fold.drain s
--   hello
--   hello
--   hello
--   
unfold :: Applicative m => Unfold m a b -> a -> Stream m b -- | Decompose a stream into its head and tail. If the stream is empty, -- returns Nothing. If the stream is non-empty, returns Just -- (a, ma), where a is the head of the stream and -- ma its tail. -- -- Properties: -- --
--   >>> Nothing <- Stream.uncons Stream.nil
--   
--   >>> Just ("a", t) <- Stream.uncons (Stream.cons "a" Stream.nil)
--   
-- -- This can be used to consume the stream in an imperative manner one -- element at a time, as it just breaks down the stream into individual -- elements and we can loop over them as we deem fit. For example, this -- can be used to convert a streamly stream into other stream types. -- -- All the folds in this module can be expressed in terms of -- uncons, however, this is generally less efficient than specific -- folds because it takes apart the stream one element at a time, -- therefore, does not take adavantage of stream fusion. -- -- foldBreak is a more general way of consuming a stream -- piecemeal. -- --
--   >>> :{
--   uncons xs = do
--       r <- Stream.foldBreak Fold.one xs
--       return $ case r of
--           (Nothing, _) -> Nothing
--           (Just h, t) -> Just (h, t)
--   :}
--   
uncons :: Monad m => Stream m a -> m (Maybe (a, Stream m a)) -- | Fold a stream using the supplied left Fold and reducing the -- resulting expression strictly at each step. The behavior is similar to -- foldl'. A Fold can terminate early without consuming the -- full stream. See the documentation of individual Folds for -- termination behavior. -- -- Definitions: -- --
--   >>> fold f = fmap fst . Stream.foldBreak f
--   
--   >>> fold f = Stream.parse (Parser.fromFold f)
--   
-- -- Example: -- --
--   >>> Stream.fold Fold.sum (Stream.enumerateFromTo 1 100)
--   5050
--   
fold :: Monad m => Fold m a b -> Stream m a -> m b -- | Like fold but also returns the remaining stream. The resulting -- stream would be nil if the stream finished before the fold. foldBreak :: Monad m => Fold m a b -> Stream m a -> m (b, Stream m a) -- | Parse a stream using the supplied Parser. -- -- Parsers (See Streamly.Internal.Data.Parser) are more powerful -- folds that add backtracking and error functionality to terminating -- folds. Unlike folds, parsers may not always result in a valid output, -- they may result in an error. For example: -- --
--   >>> Stream.parse (Parser.takeEQ 1 Fold.drain) Stream.nil
--   Left (ParseError "takeEQ: Expecting exactly 1 elements, input terminated on 0")
--   
-- -- Note: parse p is not the same as head . parseMany p -- on an empty stream. parse :: Monad m => Parser a m b -> Stream m a -> m (Either ParseError b) -- | Right associative/lazy pull fold. foldrM build final stream -- constructs an output structure using the step function build. -- build is invoked with the next input element and the -- remaining (lazy) tail of the output structure. It builds a lazy output -- expression using the two. When the "tail structure" in the output -- expression is evaluated it calls build again thus lazily -- consuming the input stream until either the output expression -- built by build is free of the "tail" or the input is -- exhausted in which case final is used as the terminating case -- for the output structure. For more details see the description in the -- previous section. -- -- Example, determine if any element is odd in a stream: -- --
--   >>> s = Stream.fromList (2:4:5:undefined)
--   
--   >>> step x xs = if odd x then return True else xs
--   
--   >>> Stream.foldrM step (return False) s
--   True
--   
foldrM :: Monad m => (a -> m b -> m b) -> m b -> Stream m a -> m b -- | Right fold, lazy for lazy monads and pure streams, and strict for -- strict monads. -- -- Please avoid using this routine in strict monads like IO unless you -- need a strict right fold. This is provided only for use in lazy monads -- (e.g. Identity) or pure streams. Note that with this signature it is -- not possible to implement a lazy foldr when the monad m is -- strict. In that case it would be strict in its accumulator and -- therefore would necessarily consume all its input. -- --
--   >>> foldr f z = Stream.foldrM (\a b -> f a <$> b) (return z)
--   
-- -- Note: This is similar to Fold.foldr' (the right fold via left fold), -- but could be more efficient. foldr :: Monad m => (a -> b -> b) -> b -> Stream m a -> m b -- | Definitions: -- --
--   >>> toList = Stream.foldr (:) []
--   
--   >>> toList = Stream.fold Fold.toList
--   
-- -- Convert a stream into a list in the underlying monad. The list can be -- consumed lazily in a lazy monad (e.g. Identity). In a strict -- monad (e.g. IO) the whole list is generated and buffered before it can -- be consumed. -- -- Warning! working on large lists accumulated as buffers in -- memory could be very inefficient, consider using -- Streamly.Data.Array instead. -- -- Note that this could a bit more efficient compared to Stream.fold -- Fold.toList, and it can fuse with pure list consumers. toList :: Monad m => Stream m a -> m [a] -- |
--   >>> sequence = Stream.mapM id
--   
-- -- Replace the elements of a stream of monadic actions with the outputs -- of those actions. -- --
--   >>> s = Stream.fromList [putStr "a", putStr "b", putStrLn "c"]
--   
--   >>> Stream.fold Fold.drain $ Stream.sequence s
--   abc
--   
sequence :: Monad m => Stream m (m a) -> Stream m a -- |
--   >>> mapM f = Stream.sequence . fmap f
--   
-- -- Apply a monadic function to each element of the stream and replace it -- with the output of the resulting action. -- --
--   >>> s = Stream.fromList ["a", "b", "c"]
--   
--   >>> Stream.fold Fold.drain $ Stream.mapM putStr s
--   abc
--   
mapM :: Monad m => (a -> m b) -> Stream m a -> Stream m b -- | Apply a monadic function to each element flowing through the stream -- and discard the results. -- --
--   >>> s = Stream.enumerateFromTo 1 2
--   
--   >>> Stream.fold Fold.drain $ Stream.trace print s
--   1
--   2
--   
-- -- Compare with tap. trace :: Monad m => (a -> m b) -> Stream m a -> Stream m a -- | Tap the data flowing through a stream into a Fold. For example, -- you may add a tap to log the contents flowing through the stream. The -- fold is used only for effects, its result is discarded. -- --
--                     Fold m a b
--                         |
--   -----stream m a ---------------stream m a-----
--   
-- --
--   >>> s = Stream.enumerateFromTo 1 2
--   
--   >>> Stream.fold Fold.drain $ Stream.tap (Fold.drainMapM print) s
--   1
--   2
--   
-- -- Compare with trace. tap :: Monad m => Fold m a b -> Stream m a -> Stream m a -- | Introduce a delay of specified seconds between elements of the stream. -- -- Definition: -- --
--   >>> sleep n = liftIO $ threadDelay $ round $ n * 1000000
--   
--   >>> delay = Stream.intersperseM_ . sleep
--   
-- -- Example: -- --
--   >>> input = Stream.enumerateFromTo 1 3
--   
--   >>> Stream.fold (Fold.drainMapM print) $ Stream.delay 1 input
--   1
--   2
--   3
--   
delay :: MonadIO m => Double -> Stream m a -> Stream m a -- | Strict left scan. Scan a stream using the given monadic fold. -- --
--   >>> s = Stream.fromList [1..10]
--   
--   >>> Stream.fold Fold.toList $ Stream.takeWhile (< 10) $ Stream.scan Fold.sum s
--   [0,1,3,6]
--   
-- -- See also: usingStateT scan :: Monad m => Fold m a b -> Stream m a -> Stream m b -- | Postscan a stream using the given monadic fold. -- -- The following example extracts the input stream up to a point where -- the running average of elements is no more than 10: -- --
--   >>> import Data.Maybe (fromJust)
--   
--   >>> let avg = Fold.teeWith (/) Fold.sum (fmap fromIntegral Fold.length)
--   
--   >>> s = Stream.enumerateFromTo 1.0 100.0
--   
--   >>> :{
--    Stream.fold Fold.toList
--     $ fmap (fromJust . fst)
--     $ Stream.takeWhile (\(_,x) -> x <= 10)
--     $ Stream.postscan (Fold.tee Fold.latest avg) s
--   :}
--   [1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0]
--   
postscan :: Monad m => Fold m a b -> Stream m a -> Stream m b -- |
--   >>> f = Fold.foldl' (\(i, _) x -> (i + 1, x)) (-1,undefined)
--   
--   >>> indexed = Stream.postscan f
--   
--   >>> indexed = Stream.zipWith (,) (Stream.enumerateFrom 0)
--   
--   >>> indexedR n = fmap (\(i, a) -> (n - i, a)) . indexed
--   
-- -- Pair each element in a stream with its index, starting from index 0. -- --
--   >>> Stream.fold Fold.toList $ Stream.indexed $ Stream.fromList "hello"
--   [(0,'h'),(1,'e'),(2,'l'),(3,'l'),(4,'o')]
--   
indexed :: Monad m => Stream m a -> Stream m (Int, a) -- | insertBy cmp elem stream inserts elem before the -- first element in stream that is less than elem when -- compared using cmp. -- --
--   >>> insertBy cmp x = Stream.mergeBy cmp (Stream.fromPure x)
--   
-- --
--   >>> input = Stream.fromList [1,3,5]
--   
--   >>> Stream.fold Fold.toList $ Stream.insertBy compare 2 input
--   [1,2,3,5]
--   
insertBy :: Monad m => (a -> a -> Ordering) -> a -> Stream m a -> Stream m a -- | Insert an effect and its output before consuming an element of a -- stream except the first one. -- --
--   >>> input = Stream.fromList "hello"
--   
--   >>> Stream.fold Fold.toList $ Stream.trace putChar $ Stream.intersperseM (putChar '.' >> return ',') input
--   h.,e.,l.,l.,o"h,e,l,l,o"
--   
-- -- Be careful about the order of effects. In the above example we used -- trace after the intersperse, if we use it before the intersperse the -- output would be he.l.l.o."h,e,l,l,o". -- --
--   >>> Stream.fold Fold.toList $ Stream.intersperseM (putChar '.' >> return ',') $ Stream.trace putChar input
--   he.l.l.o."h,e,l,l,o"
--   
intersperseM :: Monad m => m a -> Stream m a -> Stream m a -- | Insert a side effect before consuming an element of a stream except -- the first one. -- --
--   >>> input = Stream.fromList "hello"
--   
--   >>> Stream.fold Fold.drain $ Stream.trace putChar $ Stream.intersperseM_ (putChar '.') input
--   h.e.l.l.o
--   
-- -- Pre-release intersperseM_ :: Monad m => m b -> Stream m a -> Stream m a -- | Insert a pure value between successive elements of a stream. -- --
--   >>> input = Stream.fromList "hello"
--   
--   >>> Stream.fold Fold.toList $ Stream.intersperse ',' input
--   "h,e,l,l,o"
--   
intersperse :: Monad m => a -> Stream m a -> Stream m a -- | Map a Maybe returning function to a stream, filter out the -- Nothing elements, and return a stream of values extracted from -- Just. -- -- Equivalent to: -- --
--   >>> mapMaybe f = Stream.catMaybes . fmap f
--   
mapMaybe :: Monad m => (a -> Maybe b) -> Stream m a -> Stream m b -- | Like mapMaybe but maps a monadic function. -- -- Equivalent to: -- --
--   >>> mapMaybeM f = Stream.catMaybes . Stream.mapM f
--   
-- --
--   >>> mapM f = Stream.mapMaybeM (\x -> Just <$> f x)
--   
mapMaybeM :: Monad m => (a -> m (Maybe b)) -> Stream m a -> Stream m b -- | Include only those elements that pass a predicate. -- --
--   >>> filter p = Stream.filterM (return . p)
--   
--   >>> filter p = Stream.mapMaybe (\x -> if p x then Just x else Nothing)
--   
--   >>> filter p = Stream.scanMaybe (Fold.filtering p)
--   
filter :: Monad m => (a -> Bool) -> Stream m a -> Stream m a -- | Same as filter but with a monadic predicate. -- --
--   >>> f p x = p x >>= \r -> return $ if r then Just x else Nothing
--   
--   >>> filterM p = Stream.mapMaybeM (f p)
--   
filterM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a -- | In a stream of Maybes, discard Nothings and unwrap -- Justs. -- --
--   >>> catMaybes = Stream.mapMaybe id
--   
--   >>> catMaybes = fmap fromJust . Stream.filter isJust
--   
-- -- Pre-release catMaybes :: Monad m => Stream m (Maybe a) -> Stream m a -- | Discard Rights and unwrap Lefts in an Either -- stream. -- --
--   >>> catLefts = fmap (fromLeft undefined) . Stream.filter isLeft
--   
-- -- Pre-release catLefts :: Monad m => Stream m (Either a b) -> Stream m a -- | Discard Lefts and unwrap Rights in an Either -- stream. -- --
--   >>> catRights = fmap (fromRight undefined) . Stream.filter isRight
--   
-- -- Pre-release catRights :: Monad m => Stream m (Either a b) -> Stream m b -- | Remove the either wrapper and flatten both lefts and as well as rights -- in the output stream. -- --
--   >>> catEithers = fmap (either id id)
--   
-- -- Pre-release catEithers :: Monad m => Stream m (Either a a) -> Stream m a -- | Use a filtering fold on a stream. -- --
--   >>> scanMaybe f = Stream.catMaybes . Stream.postscan f
--   
scanMaybe :: Monad m => Fold m a (Maybe b) -> Stream m a -> Stream m b -- | Take first n elements from the stream and discard the rest. take :: Applicative m => Int -> Stream m a -> Stream m a -- | End the stream as soon as the predicate fails on an element. takeWhile :: Monad m => (a -> Bool) -> Stream m a -> Stream m a -- | Same as takeWhile but with a monadic predicate. takeWhileM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a -- | Discard first n elements from the stream and take the rest. drop :: Monad m => Int -> Stream m a -> Stream m a -- | Drop elements in the stream as long as the predicate succeeds and then -- take the rest of the stream. dropWhile :: Monad m => (a -> Bool) -> Stream m a -> Stream m a -- | Same as dropWhile but with a monadic predicate. dropWhileM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a -- | WARNING! O(n^2) time complexity wrt number of streams. Suitable for -- statically fusing a small number of streams. Use the O(n) complexity -- StreamK.append otherwise. -- -- Fuses two streams sequentially, yielding all elements from the first -- stream, and then all elements from the second stream. -- --
--   >>> s1 = Stream.fromList [1,2]
--   
--   >>> s2 = Stream.fromList [3,4]
--   
--   >>> Stream.fold Fold.toList $ s1 `Stream.append` s2
--   [1,2,3,4]
--   
append :: Monad m => Stream m a -> Stream m a -> Stream m a -- | WARNING! O(n^2) time complexity wrt number of streams. Suitable for -- statically fusing a small number of streams. Use the O(n) complexity -- StreamK.interleave otherwise. -- -- Interleaves two streams, yielding one element from each stream -- alternately. When one stream stops the rest of the other stream is -- used in the output stream. interleave :: Monad m => Stream m a -> Stream m a -> Stream m a -- | WARNING! O(n^2) time complexity wrt number of streams. Suitable for -- statically fusing a small number of streams. Use the O(n) complexity -- StreamK.mergeBy otherwise. -- -- Merge two streams using a comparison function. The head elements of -- both the streams are compared and the smaller of the two elements is -- emitted, if both elements are equal then the element from the first -- stream is used first. -- -- If the streams are sorted in ascending order, the resulting stream -- would also remain sorted in ascending order. -- --
--   >>> s1 = Stream.fromList [1,3,5]
--   
--   >>> s2 = Stream.fromList [2,4,6,8]
--   
--   >>> Stream.fold Fold.toList $ Stream.mergeBy compare s1 s2
--   [1,2,3,4,5,6,8]
--   
mergeBy :: Monad m => (a -> a -> Ordering) -> Stream m a -> Stream m a -> Stream m a -- | Like mergeBy but with a monadic comparison function. -- -- Example, to merge two streams randomly: -- --
--   > randomly _ _ = randomIO >>= x -> return $ if x then LT else GT
--   > Stream.toList $ Stream.mergeByM randomly (Stream.fromList [1,1,1,1]) (Stream.fromList [2,2,2,2])
--   [2,1,2,2,2,1,1,1]
--   
-- -- Example, merge two streams in a proportion of 2:1: -- --
--   >>> :{
--   do
--    let s1 = Stream.fromList [1,1,1,1,1,1]
--        s2 = Stream.fromList [2,2,2]
--    let proportionately m n = do
--         ref <- newIORef $ cycle $ Prelude.concat [Prelude.replicate m LT, Prelude.replicate n GT]
--         return $ \_ _ -> do
--            r <- readIORef ref
--            writeIORef ref $ Prelude.tail r
--            return $ Prelude.head r
--    f <- proportionately 2 1
--    xs <- Stream.fold Fold.toList $ Stream.mergeByM f s1 s2
--    print xs
--   :}
--   [1,1,2,1,1,2,1,1,2]
--   
mergeByM :: Monad m => (a -> a -> m Ordering) -> Stream m a -> Stream m a -> Stream m a -- | WARNING! O(n^2) time complexity wrt number of streams. Suitable for -- statically fusing a small number of streams. Use the O(n) complexity -- StreamK.zipWith otherwise. -- -- Stream a is evaluated first, followed by stream b, -- the resulting elements a and b are then zipped using -- the supplied zip function and the result c is yielded to the -- consumer. -- -- If stream a or stream b ends, the zipped stream -- ends. If stream b ends first, the element a from -- previous evaluation of stream a is discarded. -- --
--   >>> s1 = Stream.fromList [1,2,3]
--   
--   >>> s2 = Stream.fromList [4,5,6]
--   
--   >>> Stream.fold Fold.toList $ Stream.zipWith (+) s1 s2
--   [5,7,9]
--   
zipWith :: Monad m => (a -> b -> c) -> Stream m a -> Stream m b -> Stream m c -- | Like zipWith but using a monadic zipping function. zipWithM :: Monad m => (a -> b -> m c) -> Stream m a -> Stream m b -> Stream m c -- | Definition: -- --
--   >>> crossWith f m1 m2 = fmap f m1 `Stream.crossApply` m2
--   
-- -- Note that the second stream is evaluated multiple times. crossWith :: Monad m => (a -> b -> c) -> Stream m a -> Stream m b -> Stream m c -- | unfoldMany unfold stream uses unfold to map the -- input stream elements to streams and then flattens the generated -- streams into a single output stream. -- -- Like concatMap but uses an Unfold for stream generation. -- Unlike concatMap this can fuse the Unfold code with the -- inner loop and therefore provide many times better performance. unfoldMany :: Monad m => Unfold m a b -> Stream m a -> Stream m b -- | intersperse followed by unfold and concat. -- --
--   >>> intercalate u a = Stream.unfoldMany u . Stream.intersperse a
--   
--   >>> intersperse = Stream.intercalate Unfold.identity
--   
--   >>> unwords = Stream.intercalate Unfold.fromList " "
--   
-- --
--   >>> input = Stream.fromList ["abc", "def", "ghi"]
--   
--   >>> Stream.fold Fold.toList $ Stream.intercalate Unfold.fromList " " input
--   "abc def ghi"
--   
intercalate :: Monad m => Unfold m b c -> b -> Stream m b -> Stream m c -- | intersperseMSuffix followed by unfold and concat. -- --
--   >>> intercalateSuffix u a = Stream.unfoldMany u . Stream.intersperseMSuffix a
--   
--   >>> intersperseMSuffix = Stream.intercalateSuffix Unfold.identity
--   
--   >>> unlines = Stream.intercalateSuffix Unfold.fromList "\n"
--   
-- --
--   >>> input = Stream.fromList ["abc", "def", "ghi"]
--   
--   >>> Stream.fold Fold.toList $ Stream.intercalateSuffix Unfold.fromList "\n" input
--   "abc\ndef\nghi\n"
--   
intercalateSuffix :: Monad m => Unfold m b c -> b -> Stream m b -> Stream m c -- | Given a stream value in the underlying monad, lift and join the -- underlying monad with the stream monad. -- --
--   >>> concatEffect = Stream.concat . Stream.fromEffect
--   
--   >>> concatEffect eff = Stream.concatMapM (\() -> eff) (Stream.fromPure ())
--   
-- -- See also: concat, sequence concatEffect :: Monad m => m (Stream m a) -> Stream m a -- | Map a stream producing function on each element of the stream and then -- flatten the results into a single stream. -- --
--   >>> concatMap f = Stream.concatMapM (return . f)
--   
--   >>> concatMap f = Stream.concat . fmap f
--   
--   >>> concatMap f = Stream.unfoldMany (Unfold.lmap f Unfold.fromStream)
--   
-- -- See unfoldMany for a fusible alternative. concatMap :: Monad m => (a -> Stream m b) -> Stream m a -> Stream m b -- | Map a stream producing monadic function on each element of the stream -- and then flatten the results into a single stream. Since the stream -- generation function is monadic, unlike concatMap, it can -- produce an effect at the beginning of each iteration of the inner -- loop. -- -- See unfoldMany for a fusible alternative. concatMapM :: Monad m => (a -> m (Stream m b)) -> Stream m a -> Stream m b -- | Apply a Fold repeatedly on a stream and emit the results in the -- output stream. -- -- Definition: -- --
--   >>> foldMany f = Stream.parseMany (Parser.fromFold f)
--   
-- -- Example, empty stream: -- --
--   >>> f = Fold.take 2 Fold.sum
--   
--   >>> fmany = Stream.fold Fold.toList . Stream.foldMany f
--   
--   >>> fmany $ Stream.fromList []
--   []
--   
-- -- Example, last fold empty: -- --
--   >>> fmany $ Stream.fromList [1..4]
--   [3,7]
--   
-- -- Example, last fold non-empty: -- --
--   >>> fmany $ Stream.fromList [1..5]
--   [3,7,5]
--   
-- -- Note that using a closed fold e.g. Fold.take 0, would result -- in an infinite stream on a non-empty input stream. foldMany :: Monad m => Fold m a b -> Stream m a -> Stream m b -- | Group the input stream into groups of n elements each and -- then fold each group using the provided fold function. -- --
--   groupsOf n f = foldMany (FL.take n f)
--   
-- --
--   >>> Stream.toList $ Stream.groupsOf 2 Fold.sum (Stream.enumerateFromTo 1 10)
--   [3,7,11,15,19]
--   
-- -- This can be considered as an n-fold version of take where we -- apply take repeatedly on the leftover stream until the stream -- exhausts. groupsOf :: Monad m => Int -> Fold m a b -> Stream m a -> Stream m b -- | Apply a Parser repeatedly on a stream and emit the parsed -- values in the output stream. -- -- Example: -- --
--   >>> s = Stream.fromList [1..10]
--   
--   >>> parser = Parser.takeBetween 0 2 Fold.sum
--   
--   >>> Stream.fold Fold.toList $ Stream.parseMany parser s
--   [Right 3,Right 7,Right 11,Right 15,Right 19]
--   
-- -- This is the streaming equivalent of the many parse combinator. -- -- Known Issues: When the parser fails there is no way to get the -- remaining stream. parseMany :: Monad m => Parser a m b -> Stream m a -> Stream m (Either ParseError b) -- | Split on an infixed separator element, dropping the separator. The -- supplied Fold is applied on the split segments. Splits the -- stream on separator elements determined by the supplied predicate, -- separator is considered as infixed between two segments: -- --
--   >>> splitOn' p xs = Stream.fold Fold.toList $ Stream.splitOn p Fold.toList (Stream.fromList xs)
--   
--   >>> splitOn' (== '.') "a.b"
--   ["a","b"]
--   
-- -- An empty stream is folded to the default value of the fold: -- --
--   >>> splitOn' (== '.') ""
--   [""]
--   
-- -- If one or both sides of the separator are missing then the empty -- segment on that side is folded to the default output of the fold: -- --
--   >>> splitOn' (== '.') "."
--   ["",""]
--   
-- --
--   >>> splitOn' (== '.') ".a"
--   ["","a"]
--   
-- --
--   >>> splitOn' (== '.') "a."
--   ["a",""]
--   
-- --
--   >>> splitOn' (== '.') "a..b"
--   ["a","","b"]
--   
-- -- splitOn is an inverse of intercalating single element: -- --
--   Stream.intercalate (Stream.fromPure '.') Unfold.fromList . Stream.splitOn (== '.') Fold.toList === id
--   
-- -- Assuming the input stream does not contain the separator: -- --
--   Stream.splitOn (== '.') Fold.toList . Stream.intercalate (Stream.fromPure '.') Unfold.fromList === id
--   
splitOn :: Monad m => (a -> Bool) -> Fold m a b -> Stream m a -> Stream m b -- | Split the stream after stripping leading, trailing, and repeated -- separators as per the fold supplied. Therefore, ".a..b." with -- . as the separator would be parsed as ["a","b"]. In -- other words, its like parsing words from whitespace separated text. wordsBy :: Monad m => (a -> Bool) -> Fold m a b -> Stream m a -> Stream m b -- | Returns the elements of the stream in reverse order. The stream must -- be finite. Note that this necessarily buffers the entire stream in -- memory. -- -- Definition: -- --
--   >>> reverse m = Stream.concatEffect $ Stream.fold Fold.toListRev m >>= return . Stream.fromList
--   
reverse :: Monad m => Stream m a -> Stream m a -- | Compare two streams for equality eqBy :: Monad m => (a -> b -> Bool) -> Stream m a -> Stream m b -> m Bool -- | Compare two streams lexicographically. cmpBy :: Monad m => (a -> b -> Ordering) -> Stream m a -> Stream m b -> m Ordering -- | Returns True if the first stream is the same as or a prefix of -- the second. A stream is a prefix of itself. -- --
--   >>> Stream.isPrefixOf (Stream.fromList "hello") (Stream.fromList "hello" :: Stream IO Char)
--   True
--   
isPrefixOf :: (Monad m, Eq a) => Stream m a -> Stream m a -> m Bool -- | Returns True if all the elements of the first stream occur, in -- order, in the second stream. The elements do not have to occur -- consecutively. A stream is a subsequence of itself. -- --
--   >>> Stream.isSubsequenceOf (Stream.fromList "hlo") (Stream.fromList "hello" :: Stream IO Char)
--   True
--   
isSubsequenceOf :: (Monad m, Eq a) => Stream m a -> Stream m a -> m Bool -- | stripPrefix prefix input strips the prefix stream -- from the input stream if it is a prefix of input. Returns -- Nothing if the input does not start with the given prefix, -- stripped input otherwise. Returns Just nil when the prefix is -- the same as the input stream. -- -- Space: O(1) stripPrefix :: (Monad m, Eq a) => Stream m a -> Stream m a -> m (Maybe (Stream m a)) -- | Run the action m b if the stream evaluation is aborted due to -- an exception. The exception is not caught, simply rethrown. -- -- Observes exceptions only in the stream generation, and not in stream -- consumers. -- -- Inhibits stream fusion onException :: MonadCatch m => m b -> Stream m a -> Stream m a -- | When evaluating a stream if an exception occurs, stream evaluation -- aborts and the specified exception handler is run with the exception -- as argument. The exception is caught and handled unless the handler -- decides to rethrow it. Note that exception handling is not applied to -- the stream returned by the exception handler. -- -- Observes exceptions only in the stream generation, and not in stream -- consumers. -- -- Inhibits stream fusion handle :: (MonadCatch m, Exception e) => (e -> m (Stream m a)) -> Stream m a -> Stream m a -- | Run the action m b before the stream yields its first -- element. -- -- Same as the following but more efficient due to fusion: -- --
--   >>> before action xs = Stream.nilM action <> xs
--   
--   >>> before action xs = Stream.concatMap (const xs) (Stream.fromEffect action)
--   
before :: Monad m => m b -> Stream m a -> Stream m a -- | Run the action IO b whenever the stream is evaluated to -- completion, or if it is garbage collected after a partial lazy -- evaluation. -- -- The semantics of the action IO b are similar to the semantics -- of cleanup action in bracketIO. -- -- See also afterUnsafe afterIO :: MonadIO m => IO b -> Stream m a -> Stream m a -- | Run the action IO b whenever the stream stream stops -- normally, aborts due to an exception or if it is garbage collected -- after a partial lazy evaluation. -- -- The semantics of running the action IO b are similar to the -- cleanup action semantics described in bracketIO. -- --
--   >>> finallyIO release = Stream.bracketIO (return ()) (const release)
--   
-- -- See also finallyUnsafe -- -- Inhibits stream fusion finallyIO :: (MonadIO m, MonadCatch m) => IO b -> Stream m a -> Stream m a -- | Run the alloc action IO b with async exceptions disabled but -- keeping blocking operations interruptible (see mask). Use the -- output b of the IO action as input to the function b -- -> Stream m a to generate an output stream. -- -- b is usually a resource under the IO monad, e.g. a file -- handle, that requires a cleanup after use. The cleanup action b -- -> IO c, runs whenever (1) the stream ends normally, (2) due -- to a sync or async exception or, (3) if it gets garbage collected -- after a partial lazy evaluation. The exception is not caught, it is -- rethrown. -- -- bracketIO only guarantees that the cleanup action runs, and it -- runs with async exceptions enabled. The action must ensure that it can -- successfully cleanup the resource in the face of sync or async -- exceptions. -- -- When the stream ends normally or on a sync exception, cleanup action -- runs immediately in the current thread context, whereas in other cases -- it runs in the GC context, therefore, cleanup may be delayed until the -- GC gets to run. An example where GC based cleanup happens is when a -- stream is being folded but the fold terminates without draining the -- entire stream or if the consumer of the stream encounters an -- exception. -- -- Observes exceptions only in the stream generation, and not in stream -- consumers. -- -- See also: bracketUnsafe -- -- Inhibits stream fusion bracketIO :: (MonadIO m, MonadCatch m) => IO b -> (b -> IO c) -> (b -> Stream m a) -> Stream m a -- | Like bracketIO but can use 3 separate cleanup actions depending -- on the mode of termination: -- --
    --
  1. When the stream stops normally
  2. --
  3. When the stream is garbage collected
  4. --
  5. When the stream encounters an exception
  6. --
-- -- bracketIO3 before onStop onGC onException action runs -- action using the result of before. If the stream -- stops, onStop action is executed, if the stream is abandoned -- onGC is executed, if the stream encounters an exception -- onException is executed. -- -- The exception is not caught, it is rethrown. -- -- Inhibits stream fusion -- -- Pre-release bracketIO3 :: (MonadIO m, MonadCatch m) => IO b -> (b -> IO c) -> (b -> IO d) -> (b -> IO e) -> (b -> Stream m a) -> Stream m a -- | Transform the inner monad of a stream using a natural transformation. -- -- Example, generalize the inner monad from Identity to any other: -- --
--   >>> generalizeInner = Stream.morphInner (return . runIdentity)
--   
-- -- Also known as hoist. morphInner :: Monad n => (forall x. m x -> n x) -> Stream m a -> Stream n a -- | Lift the inner monad m of Stream m a to t m -- where t is a monad transformer. liftInner :: (Monad m, MonadTrans t, Monad (t m)) => Stream m a -> Stream (t m) a -- | Evaluate the inner monad of a stream as ReaderT. runReaderT :: Monad m => m s -> Stream (ReaderT s m) a -> Stream m a -- | Evaluate the inner monad of a stream as StateT and emit the -- resulting state and value pair after each step. runStateT :: Monad m => m s -> Stream (StateT s m) a -> Stream m (s, a) -- | chunksOf n stream groups the elements in the input stream -- into arrays of n elements each. -- -- Same as the following but may be more efficient: -- --
--   >>> chunksOf n = Stream.foldMany (Array.writeN n)
--   
-- -- Pre-release chunksOf :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> Stream m (Array a) module Streamly.Internal.FileSystem.Dir -- | Raw read of a directory. -- -- Pre-release read :: (MonadIO m, MonadCatch m) => FilePath -> Stream m FilePath -- | Read files only. -- -- Internal readFiles :: (MonadIO m, MonadCatch m) => FilePath -> Stream m FilePath -- | Read directories only. -- -- Internal readDirs :: (MonadIO m, MonadCatch m) => FilePath -> Stream m FilePath -- | Read directories as Left and files as Right. Filter out "." and ".." -- entries. The output contains the names of the directories and files. -- -- Pre-release readEither :: (MonadIO m, MonadCatch m) => FilePath -> Stream m (Either FilePath FilePath) -- | Like readEither but prefix the names of the files and -- directories with the supplied directory path. readEitherPaths :: (MonadIO m, MonadCatch m) => FilePath -> Stream m (Either FilePath FilePath) reader :: (MonadIO m, MonadCatch m) => Unfold m FilePath FilePath -- | Read files only. -- -- Internal fileReader :: (MonadIO m, MonadCatch m) => Unfold m FilePath FilePath -- | Read directories only. Filter out "." and ".." entries. -- -- Internal dirReader :: (MonadIO m, MonadCatch m) => Unfold m FilePath FilePath -- | Read directories as Left and files as Right. Filter out "." and ".." -- entries. -- -- Internal eitherReader :: (MonadIO m, MonadCatch m) => Unfold m FilePath (Either FilePath FilePath) eitherReaderPaths :: (MonadIO m, MonadCatch m) => Unfold m FilePath (Either FilePath FilePath) -- | Deprecated: Please use read instead toStream :: (MonadIO m, MonadCatch m) => String -> Stream m String -- | Deprecated: Please use readEither instead toEither :: (MonadIO m, MonadCatch m) => FilePath -> Stream m (Either FilePath FilePath) -- | Deprecated: Please use readFiles instead toFiles :: (MonadIO m, MonadCatch m) => FilePath -> Stream m FilePath -- | Deprecated: Please use readDirs instead toDirs :: (MonadIO m, MonadCatch m) => String -> Stream m String -- | Warning: The API of this module is subject to change in future -- releases. Especially the type for representing paths may change from -- FilePath to something else. module Streamly.FileSystem.Dir -- | Raw read of a directory. -- -- Pre-release read :: (MonadIO m, MonadCatch m) => FilePath -> Stream m FilePath -- | Read directories as Left and files as Right. Filter out "." and ".." -- entries. The output contains the names of the directories and files. -- -- Pre-release readEither :: (MonadIO m, MonadCatch m) => FilePath -> Stream m (Either FilePath FilePath) module Streamly.Internal.Data.StreamK -- | Continuation Passing Style (CPS) version of -- Streamly.Data.Stream.Stream. Unlike -- Streamly.Data.Stream.Stream, StreamK can be composed -- recursively without affecting performance. -- -- Semigroup instance appends two streams: -- --
--   >>> (<>) = Stream.append
--   
-- | Deprecated: Please use StreamK instead. type Stream = StreamK newtype StreamK m a MkStream :: (forall r. State StreamK m a -> (a -> StreamK m a -> m r) -> (a -> m r) -> m r -> m r) -> StreamK m a -- | A newtype wrapper for the StreamK type adding a cross product -- style monad instance. -- -- A Monad bind behaves like a for loop: -- --
--   >>> :{
--   Stream.fold Fold.toList $ StreamK.toStream $ StreamK.unCross $ do
--       x <- StreamK.mkCross $ StreamK.fromStream $ Stream.fromList [1,2]
--       -- Perform the following actions for each x in the stream
--       return x
--   :}
--   [1,2]
--   
-- -- Nested monad binds behave like nested for loops: -- --
--   >>> :{
--   Stream.fold Fold.toList $ StreamK.toStream $ StreamK.unCross $ do
--       x <- StreamK.mkCross $ StreamK.fromStream $ Stream.fromList [1,2]
--       y <- StreamK.mkCross $ StreamK.fromStream $ Stream.fromList [3,4]
--       -- Perform the following actions for each x, for each y
--       return (x, y)
--   :}
--   [(1,3),(1,4),(2,3),(2,4)]
--   
data CrossStreamK m a -- | Unwrap the StreamK type from CrossStreamK newtype. -- -- This is a type level operation with no runtime overhead. unCross :: CrossStreamK m a -> StreamK m a -- | Wrap the StreamK type in a CrossStreamK newtype to -- enable cross product style applicative and monad instances. -- -- This is a type level operation with no runtime overhead. mkCross :: StreamK m a -> CrossStreamK m a mkStream :: (forall r. State StreamK m a -> (a -> StreamK m a -> m r) -> (a -> m r) -> m r -> m r) -> StreamK m a -- | Fold a stream by providing a State, stop continuation, a singleton -- continuation and a yield continuation. The stream will not use the -- SVar passed via State. foldStream :: State StreamK m a -> (a -> StreamK m a -> m r) -> (a -> m r) -> m r -> StreamK m a -> m r -- | Fold a stream by providing an SVar, a stop continuation, a singleton -- continuation and a yield continuation. The stream would share the -- current SVar passed via the State. foldStreamShared :: State StreamK m a -> (a -> StreamK m a -> m r) -> (a -> m r) -> m r -> StreamK m a -> m r -- | Lazy right fold with a monadic step function. foldrM :: (a -> m b -> m b) -> m b -> StreamK m a -> m b -- | Right fold to a streaming monad. -- --
--   foldrS StreamK.cons StreamK.nil === id
--   
-- -- foldrS can be used to perform stateless stream to stream -- transformations like map and filter in general. It can be coupled with -- a scan to perform stateful transformations. However, note that the -- custom map and filter routines can be much more efficient than this -- due to better stream fusion. -- --
--   >>> input = StreamK.fromStream $ Stream.fromList [1..5]
--   
--   >>> Stream.fold Fold.toList $ StreamK.toStream $ StreamK.foldrS StreamK.cons StreamK.nil input
--   [1,2,3,4,5]
--   
-- -- Find if any element in the stream is True: -- --
--   >>> step x xs = if odd x then StreamK.fromPure True else xs
--   
--   >>> input = StreamK.fromStream (Stream.fromList (2:4:5:undefined)) :: StreamK IO Int
--   
--   >>> Stream.fold Fold.toList $ StreamK.toStream $ StreamK.foldrS step (StreamK.fromPure False) input
--   [True]
--   
-- -- Map (+2) on odd elements and filter out the even elements: -- --
--   >>> step x xs = if odd x then (x + 2) `StreamK.cons` xs else xs
--   
--   >>> input = StreamK.fromStream (Stream.fromList [1..5]) :: StreamK IO Int
--   
--   >>> Stream.fold Fold.toList $ StreamK.toStream $ StreamK.foldrS step StreamK.nil input
--   [3,5,7]
--   
-- -- Pre-release foldrS :: (a -> StreamK m b -> StreamK m b) -> StreamK m b -> StreamK m a -> StreamK m b -- | Fold sharing the SVar state within the reconstructed stream foldrSShared :: (a -> StreamK m b -> StreamK m b) -> StreamK m b -> StreamK m a -> StreamK m b foldrSM :: Monad m => (m a -> StreamK m b -> StreamK m b) -> StreamK m b -> StreamK m a -> StreamK m b build :: forall m a. (forall b. (a -> b -> b) -> b -> b) -> StreamK m a buildS :: ((a -> StreamK m a -> StreamK m a) -> StreamK m a -> StreamK m a) -> StreamK m a buildM :: Monad m => (forall r. (a -> StreamK m a -> m r) -> (a -> m r) -> m r -> m r) -> StreamK m a buildSM :: Monad m => ((m a -> StreamK m a -> StreamK m a) -> StreamK m a -> StreamK m a) -> StreamK m a augmentS :: ((a -> StreamK m a -> StreamK m a) -> StreamK m a -> StreamK m a) -> StreamK m a -> StreamK m a augmentSM :: Monad m => ((m a -> StreamK m a -> StreamK m a) -> StreamK m a -> StreamK m a) -> StreamK m a -> StreamK m a -- | Detach a stream from an SVar unShare :: StreamK m a -> StreamK m a -- | Make an empty stream from a stop function. fromStopK :: StopK m -> StreamK m a -- | Make a singleton stream from a callback function. The callback -- function calls the one-shot yield continuation to yield an element. fromYieldK :: YieldK m a -> StreamK m a -- | Add a yield function at the head of the stream. consK :: YieldK m a -> StreamK m a -> StreamK m a -- | A right associative prepend operation to add a pure value at the head -- of an existing stream:: -- --
--   >>> s = 1 `StreamK.cons` 2 `StreamK.cons` 3 `StreamK.cons` StreamK.nil
--   
--   >>> Stream.fold Fold.toList (StreamK.toStream s)
--   [1,2,3]
--   
-- -- It can be used efficiently with foldr: -- --
--   >>> fromFoldable = Prelude.foldr StreamK.cons StreamK.nil
--   
-- -- Same as the following but more efficient: -- --
--   >>> cons x xs = return x `StreamK.consM` xs
--   
cons :: a -> StreamK m a -> StreamK m a infixr 5 `cons` -- | Operator equivalent of cons. -- --
--   > toList $ 1 .: 2 .: 3 .: nil
--   [1,2,3]
--   
(.:) :: a -> StreamK m a -> StreamK m a infixr 5 .: -- | A right associative prepend operation to add an effectful value at the -- head of an existing stream:: -- --
--   >>> s = putStrLn "hello" `StreamK.consM` putStrLn "world" `StreamK.consM` StreamK.nil
--   
--   >>> Stream.fold Fold.drain (StreamK.toStream s)
--   hello
--   world
--   
-- -- It can be used efficiently with foldr: -- --
--   >>> fromFoldableM = Prelude.foldr StreamK.consM StreamK.nil
--   
-- -- Same as the following but more efficient: -- --
--   >>> consM x xs = StreamK.fromEffect x `StreamK.append` xs
--   
consM :: Monad m => m a -> StreamK m a -> StreamK m a infixr 5 `consM` consMBy :: Monad m => (StreamK m a -> StreamK m a -> StreamK m a) -> m a -> StreamK m a -> StreamK m a -- | A stream that terminates without producing any output or side effect. -- --
--   >>> Stream.fold Fold.toList (StreamK.toStream StreamK.nil)
--   []
--   
nil :: StreamK m a -- | A stream that terminates without producing any output, but produces a -- side effect. -- --
--   >>> Stream.fold Fold.toList (StreamK.toStream (StreamK.nilM (print "nil")))
--   "nil"
--   []
--   
-- -- Pre-release nilM :: Applicative m => m b -> StreamK m a -- |
--   >>> :{
--   unfoldr step s =
--       case step s of
--           Nothing -> StreamK.nil
--           Just (a, b) -> a `StreamK.cons` unfoldr step b
--   :}
--   
-- -- Build a stream by unfolding a pure step function step -- starting from a seed s. The step function returns the next -- element in the stream and the next seed value. When it is done it -- returns Nothing and the stream ends. For example, -- --
--   >>> :{
--   let f b =
--           if b > 2
--           then Nothing
--           else Just (b, b + 1)
--   in StreamK.toList $ StreamK.unfoldr f 0
--   :}
--   [0,1,2]
--   
unfoldr :: (b -> Maybe (a, b)) -> b -> StreamK m a unfoldrMWith :: Monad m => (m a -> StreamK m a -> StreamK m a) -> (b -> m (Maybe (a, b))) -> b -> StreamK m a -- | Build a stream by unfolding a monadic step function starting -- from a seed. The step function returns the next element in the stream -- and the next seed value. When it is done it returns Nothing and -- the stream ends. For example, -- --
--   >>> :{
--   let f b =
--           if b > 2
--           then return Nothing
--           else return (Just (b, b + 1))
--   in StreamK.toList $ StreamK.unfoldrM f 0
--   :}
--   [0,1,2]
--   
unfoldrM :: Monad m => (b -> m (Maybe (a, b))) -> b -> StreamK m a fromEffect :: Monad m => m a -> StreamK m a fromPure :: a -> StreamK m a -- | Generate an infinite stream by repeating a pure value. -- -- Pre-release repeat :: a -> StreamK m a -- | Like repeatM but takes a stream cons operation to -- combine the actions in a stream specific manner. A serial cons would -- repeat the values serially while an async cons would repeat -- concurrently. -- -- Pre-release repeatMWith :: (m a -> t m a -> t m a) -> m a -> t m a replicateMWith :: (m a -> StreamK m a -> StreamK m a) -> Int -> m a -> StreamK m a fromIndicesMWith :: (m a -> StreamK m a -> StreamK m a) -> (Int -> m a) -> StreamK m a iterateMWith :: Monad m => (m a -> StreamK m a -> StreamK m a) -> (a -> m a) -> m a -> StreamK m a -- |
--   >>> fromFoldable = Prelude.foldr StreamK.cons StreamK.nil
--   
-- -- Construct a stream from a Foldable containing pure values: fromFoldable :: Foldable f => f a -> StreamK m a fromFoldableM :: (Foldable f, Monad m) => f (m a) -> StreamK m a -- | We can define cyclic structures using let: -- --
--   >>> let (a, b) = ([1, b], head a) in (a, b)
--   ([1,1],1)
--   
-- -- The function fix defined as: -- --
--   >>> fix f = let x = f x in x
--   
-- -- ensures that the argument of a function and its output refer to the -- same lazy value x i.e. the same location in memory. Thus -- x can be defined in terms of itself, creating structures with -- cyclic references. -- --
--   >>> f ~(a, b) = ([1, b], head a)
--   
--   >>> fix f
--   ([1,1],1)
--   
-- -- mfix is essentially the same as fix but for monadic -- values. -- -- Using mfix for streams we can construct a stream in which each -- element of the stream is defined in a cyclic fashion. The argument of -- the function being fixed represents the current element of the stream -- which is being returned by the stream monad. Thus, we can use the -- argument to construct itself. -- -- In the following example, the argument action of the function -- f represents the tuple (x,y) returned by it in a -- given iteration. We define the first element of the tuple in terms of -- the second. -- --
--   >>> import System.IO.Unsafe (unsafeInterleaveIO)
--   
-- --
--   >>> :{
--   main = Stream.fold (Fold.drainMapM print) $ StreamK.toStream $ StreamK.mfix f
--       where
--       f action = StreamK.unCross $ do
--           let incr n act = fmap ((+n) . snd) $ unsafeInterleaveIO act
--           x <- StreamK.mkCross $ StreamK.fromStream $ Stream.sequence $ Stream.fromList [incr 1 action, incr 2 action]
--           y <- StreamK.mkCross $ StreamK.fromStream $ Stream.fromList [4,5]
--           return (x, y)
--   :}
--   
-- -- Note: you cannot achieve this by just changing the order of the monad -- statements because that would change the order in which the stream -- elements are generated. -- -- Note that the function f must be lazy in its argument, that's -- why we use unsafeInterleaveIO on action because IO -- monad is strict. -- -- Pre-release mfix :: Monad m => (m a -> StreamK m a) -> StreamK m a uncons :: Applicative m => StreamK m a -> m (Maybe (a, StreamK m a)) -- | Strict left associative fold. foldl' :: Monad m => (b -> a -> b) -> b -> StreamK m a -> m b -- | Strict left fold with an extraction function. Like the standard strict -- left fold, but applies a user supplied extraction function (the third -- argument) to the folded value at the end. This is designed to work -- with the foldl library. The suffix x is a mnemonic -- for extraction. -- -- Note that the accumulator is always evaluated including the initial -- value. foldlx' :: forall m a b x. Monad m => (x -> a -> x) -> x -> (x -> b) -> StreamK m a -> m b -- | Like foldx, but with a monadic step function. foldlMx' :: Monad m => (x -> a -> m x) -> m x -> (x -> m b) -> StreamK m a -> m b -- | Like foldl' but with a monadic step function. foldlM' :: Monad m => (b -> a -> m b) -> m b -> StreamK m a -> m b -- | Lazy right associative fold. foldr :: Monad m => (a -> b -> b) -> b -> StreamK m a -> m b drain :: Monad m => StreamK m a -> m () null :: Monad m => StreamK m a -> m Bool tail :: Applicative m => StreamK m a -> m (Maybe (StreamK m a)) -- | Extract all but the last element of the stream, if any. -- -- Note: This will end up buffering the entire stream. -- -- Pre-release init :: Applicative m => StreamK m a -> m (Maybe (StreamK m a)) map :: (a -> b) -> StreamK m a -> StreamK m b mapMWith :: (m b -> StreamK m b -> StreamK m b) -> (a -> m b) -> StreamK m a -> StreamK m b mapMSerial :: Monad m => (a -> m b) -> StreamK m a -> StreamK m b conjoin :: Monad m => StreamK m a -> StreamK m a -> StreamK m a append :: StreamK m a -> StreamK m a -> StreamK m a infixr 6 `append` -- | Note: When joining many streams in a left associative manner earlier -- streams will get exponential priority than the ones joining later. -- Because of exponentially high weighting of left streams it can be used -- with concatMapWith even on a large number of streams. interleave :: StreamK m a -> StreamK m a -> StreamK m a infixr 6 `interleave` -- | Like interleave but stops interleaving as soon as the first -- stream stops. interleaveFst :: StreamK m a -> StreamK m a -> StreamK m a infixr 6 `interleaveFst` -- | Like interleave but stops interleaving as soon as any of the -- two streams stops. interleaveMin :: StreamK m a -> StreamK m a -> StreamK m a infixr 6 `interleaveMin` crossApplyWith :: (StreamK m b -> StreamK m b -> StreamK m b) -> StreamK m (a -> b) -> StreamK m a -> StreamK m b -- | Apply a stream of functions to a stream of values and flatten the -- results. -- -- Note that the second stream is evaluated multiple times. -- -- Definition: -- --
--   >>> crossApply = StreamK.crossApplyWith StreamK.append
--   
--   >>> crossApply = Stream.crossWith id
--   
crossApply :: StreamK m (a -> b) -> StreamK m a -> StreamK m b crossApplySnd :: StreamK m a -> StreamK m b -> StreamK m b crossApplyFst :: StreamK m a -> StreamK m b -> StreamK m a -- | Definition: -- --
--   >>> crossWith f m1 m2 = fmap f m1 `StreamK.crossApply` m2
--   
-- -- Note that the second stream is evaluated multiple times. crossWith :: Monad m => (a -> b -> c) -> StreamK m a -> StreamK m b -> StreamK m c -- | Given a StreamK m a and StreamK m b generate a -- stream with all possible combinations of the tuple (a, b). -- -- Definition: -- --
--   >>> cross = StreamK.crossWith (,)
--   
-- -- The second stream is evaluated multiple times. If that is not desired -- it can be cached in an Array and then generated from the array -- before calling this function. Caching may also improve performance if -- the stream is expensive to evaluate. -- -- See cross for a much faster fused alternative. -- -- Time: O(m x n) -- -- Pre-release cross :: Monad m => StreamK m a -> StreamK m b -> StreamK m (a, b) -- | Run an action before evaluating the stream. before :: Monad m => m b -> StreamK m a -> StreamK m a concatEffect :: Monad m => m (StreamK m a) -> StreamK m a concatMapEffect :: Monad m => (b -> StreamK m a) -> m b -> StreamK m a -- | Perform a concatMap using a specified concat strategy. The -- first argument specifies a merge or concat function that is used to -- merge the streams generated by the map function. concatMapWith :: (StreamK m b -> StreamK m b -> StreamK m b) -> (a -> StreamK m b) -> StreamK m a -> StreamK m b concatMap :: (a -> StreamK m b) -> StreamK m a -> StreamK m b bindWith :: (StreamK m b -> StreamK m b -> StreamK m b) -> StreamK m a -> (a -> StreamK m b) -> StreamK m b -- | Yield an input element in the output stream, map a stream generator on -- it and repeat the process on the resulting stream. Resulting streams -- are flattened using the concatMapWith combinator. This can be -- used for a depth first style (DFS) traversal of a tree like structure. -- -- Example, list a directory tree using DFS: -- --
--   >>> f = StreamK.fromStream . either Dir.readEitherPaths (const Stream.nil)
--   
--   >>> input = StreamK.fromPure (Left ".")
--   
--   >>> ls = StreamK.concatIterateWith StreamK.append f input
--   
-- -- Note that iterateM is a special case of -- concatIterateWith: -- --
--   >>> iterateM f = StreamK.concatIterateWith StreamK.append (StreamK.fromEffect . f) . StreamK.fromEffect
--   
-- -- Pre-release concatIterateWith :: (StreamK m a -> StreamK m a -> StreamK m a) -> (a -> StreamK m a) -> StreamK m a -> StreamK m a -- | In an Either stream iterate on Lefts. This is a special -- case of concatIterateWith: -- --
--   >>> concatIterateLeftsWith combine f = StreamK.concatIterateWith combine (either f (const StreamK.nil))
--   
-- -- To traverse a directory tree: -- --
--   >>> input = StreamK.fromPure (Left ".")
--   
--   >>> ls = StreamK.concatIterateLeftsWith StreamK.append (StreamK.fromStream . Dir.readEither) input
--   
-- -- Pre-release concatIterateLeftsWith :: b ~ Either a c => (StreamK m b -> StreamK m b -> StreamK m b) -> (a -> StreamK m b) -> StreamK m b -> StreamK m b -- | Like iterateMap but carries a state in the stream generation -- function. This can be used to traverse graph like structures, we can -- remember the visited nodes in the state to avoid cycles. -- -- Note that a combination of iterateMap and usingState -- can also be used to traverse graphs. However, this function provides a -- more localized state instead of using a global state. -- -- See also: mfix -- -- Pre-release concatIterateScanWith :: Monad m => (StreamK m a -> StreamK m a -> StreamK m a) -> (b -> a -> m (b, StreamK m a)) -> m b -> StreamK m a -> StreamK m a -- | Combine streams in pairs using a binary combinator, the resulting -- streams are then combined again in pairs recursively until we get to a -- single combined stream. The composition would thus form a binary tree. -- -- For example, you can sort a stream using merge sort like this: -- --
--   >>> s = StreamK.fromStream $ Stream.fromList [5,1,7,9,2]
--   
--   >>> generate = StreamK.fromPure
--   
--   >>> combine = StreamK.mergeBy compare
--   
--   >>> Stream.fold Fold.toList $ StreamK.toStream $ StreamK.mergeMapWith combine generate s
--   [1,2,5,7,9]
--   
-- -- Note that if the stream length is not a power of 2, the binary tree -- composed by mergeMapWith would not be balanced, which may or may not -- be important depending on what you are trying to achieve. -- -- Caution: the stream of streams must be finite -- -- Pre-release mergeMapWith :: (StreamK m b -> StreamK m b -> StreamK m b) -> (a -> StreamK m b) -> StreamK m a -> StreamK m b -- | Like concatIterateWith but uses the pairwise flattening -- combinator mergeMapWith for flattening the resulting streams. -- This can be used for a balanced traversal of a tree like structure. -- -- Example, list a directory tree using balanced traversal: -- --
--   >>> f = StreamK.fromStream . either Dir.readEitherPaths (const Stream.nil)
--   
--   >>> input = StreamK.fromPure (Left ".")
--   
--   >>> ls = StreamK.mergeIterateWith StreamK.interleave f input
--   
-- -- Pre-release mergeIterateWith :: (StreamK m a -> StreamK m a -> StreamK m a) -> (a -> StreamK m a) -> StreamK m a -> StreamK m a -- | Lazy left fold to a stream. foldlS :: (StreamK m b -> a -> StreamK m b) -> StreamK m b -> StreamK m a -> StreamK m b reverse :: StreamK m a -> StreamK m a -- | Lazy left fold to an arbitrary transformer monad. foldlT :: (Monad m, Monad (s m), MonadTrans s) => (s m b -> a -> s m b) -> s m b -> StreamK m a -> s m b -- | Right associative fold to an arbitrary transformer monad. foldrT :: (Monad m, Monad (s m), MonadTrans s) => (a -> s m b -> s m b) -> s m b -> StreamK m a -> s m b liftInner :: (Monad m, MonadTrans t, Monad (t m)) => StreamK m a -> StreamK (t m) a evalStateT :: Monad m => m s -> StreamK (StateT s m) a -> StreamK m a newtype StreamK m a MkStream :: (forall r. State StreamK m a -> (a -> StreamK m a -> m r) -> (a -> m r) -> m r -> m r) -> StreamK m a fromList :: [a] -> StreamK m a -- | Convert a fused Stream to StreamK. -- -- For example: -- --
--   >>> s1 = StreamK.fromStream $ Stream.fromList [1,2]
--   
--   >>> s2 = StreamK.fromStream $ Stream.fromList [3,4]
--   
--   >>> Stream.fold Fold.toList $ StreamK.toStream $ s1 `StreamK.append` s2
--   [1,2,3,4]
--   
fromStream :: Monad m => Stream m a -> StreamK m a -- | Convert a StreamK to a fused Stream. toStream :: Applicative m => StreamK m a -> Stream m a -- |
--   >>> repeatM = StreamK.sequence . StreamK.repeat
--   
--   >>> repeatM = fix . StreamK.consM
--   
--   >>> repeatM = cycle1 . StreamK.fromEffect
--   
-- -- Generate a stream by repeatedly executing a monadic action forever. -- --
--   >>> :{
--   repeatAction =
--          StreamK.repeatM (threadDelay 1000000 >> print 1)
--        & StreamK.take 10
--        & StreamK.fold Fold.drain
--   :}
--   
repeatM :: Monad m => m a -> StreamK m a replicate :: Int -> a -> StreamK m a replicateM :: Monad m => Int -> m a -> StreamK m a fromIndices :: (Int -> a) -> StreamK m a fromIndicesM :: Monad m => (Int -> m a) -> StreamK m a -- |
--   >>> iterate f x = x `StreamK.cons` iterate f x
--   
-- -- Generate an infinite stream with x as the first element and -- each successive element derived by applying the function f on -- the previous element. -- --
--   >>> StreamK.toList $ StreamK.take 5 $ StreamK.iterate (+1) 1
--   [1,2,3,4,5]
--   
iterate :: (a -> a) -> a -> StreamK m a -- |
--   >>> iterateM f m = m >>= \a -> return a `StreamK.consM` iterateM f (f a)
--   
-- -- Generate an infinite stream with the first element generated by the -- action m and each successive element derived by applying the -- monadic function f on the previous element. -- --
--   >>> :{
--   StreamK.iterateM (\x -> print x >> return (x + 1)) (return 0)
--       & StreamK.take 3
--       & StreamK.toList
--   :}
--   0
--   1
--   [0,1,2]
--   
iterateM :: Monad m => (a -> m a) -> m a -> StreamK m a foldr1 :: Monad m => (a -> a -> a) -> StreamK m a -> m (Maybe a) -- | Fold a stream using the supplied left Fold and reducing the -- resulting expression strictly at each step. The behavior is similar to -- foldl'. A Fold can terminate early without consuming the -- full stream. See the documentation of individual Folds for -- termination behavior. -- -- Definitions: -- --
--   >>> fold f = fmap fst . StreamK.foldBreak f
--   
--   >>> fold f = StreamK.parseD (Parser.fromFold f)
--   
-- -- Example: -- --
--   >>> StreamK.fold Fold.sum $ StreamK.fromStream $ Stream.enumerateFromTo 1 100
--   5050
--   
fold :: Monad m => Fold m a b -> StreamK m a -> m b -- | Like fold but also returns the remaining stream. The resulting -- stream would be nil if the stream finished before the fold. foldBreak :: Monad m => Fold m a b -> StreamK m a -> m (b, StreamK m a) -- | Fold resulting in either breaking the stream or continuation of the -- fold. Instead of supplying the input stream in one go we can run the -- fold multiple times, each time supplying the next segment of the input -- stream. If the fold has not yet finished it returns a fold that can be -- run again otherwise it returns the fold result and the residual -- stream. -- -- Internal foldEither :: Monad m => Fold m a b -> StreamK m a -> m (Either (Fold m a b) (b, StreamK m a)) -- | Generate streams from individual elements of a stream and fold the -- concatenation of those streams using the supplied fold. Return the -- result of the fold and residual stream. -- -- For example, this can be used to efficiently fold an Array Word8 -- stream using Word8 folds. -- -- Internal foldConcat :: Monad m => Producer m a b -> Fold m b c -> StreamK m a -> m (c, StreamK m a) -- | Run a Parser over a stream and return rest of the Stream. parseDBreak :: Monad m => Parser a m b -> StreamK m a -> m (Either ParseError b, StreamK m a) parseD :: Monad m => Parser a m b -> StreamK m a -> m (Either ParseError b) -- | Run a ParserK over a chunked StreamK and return the -- parse result and the remaining Stream. parseBreakChunks :: (Monad m, Unbox a) => ParserK (Array a) m b -> StreamK m (Array a) -> m (Either ParseError b, StreamK m (Array a)) parseChunks :: (Monad m, Unbox a) => ParserK (Array a) m b -> StreamK m (Array a) -> m (Either ParseError b) -- | Similar to parseBreak but works on singular elements. parseBreak :: forall m a b. Monad m => ParserK a m b -> StreamK m a -> m (Either ParseError b, StreamK m a) -- | Run a ParserK over a StreamK. Please use -- parseChunks where possible, for better performance. parse :: Monad m => ParserK a m b -> StreamK m a -> m (Either ParseError b) -- | Similar to parseBreak but works on generic arrays parseBreakChunksGeneric :: forall m a b. Monad m => ParserK (Array a) m b -> StreamK m (Array a) -> m (Either ParseError b, StreamK m (Array a)) parseChunksGeneric :: Monad m => ParserK (Array a) m b -> StreamK m (Array a) -> m (Either ParseError b) head :: Monad m => StreamK m a -> m (Maybe a) elem :: (Monad m, Eq a) => a -> StreamK m a -> m Bool notElem :: (Monad m, Eq a) => a -> StreamK m a -> m Bool all :: Monad m => (a -> Bool) -> StreamK m a -> m Bool any :: Monad m => (a -> Bool) -> StreamK m a -> m Bool -- | Extract the last element of the stream, if any. last :: Monad m => StreamK m a -> m (Maybe a) minimum :: (Monad m, Ord a) => StreamK m a -> m (Maybe a) minimumBy :: Monad m => (a -> a -> Ordering) -> StreamK m a -> m (Maybe a) maximum :: (Monad m, Ord a) => StreamK m a -> m (Maybe a) maximumBy :: Monad m => (a -> a -> Ordering) -> StreamK m a -> m (Maybe a) findIndices :: (a -> Bool) -> StreamK m a -> StreamK m Int lookup :: (Monad m, Eq a) => a -> StreamK m (a, b) -> m (Maybe b) findM :: Monad m => (a -> m Bool) -> StreamK m a -> m (Maybe a) find :: Monad m => (a -> Bool) -> StreamK m a -> m (Maybe a) (!!) :: Monad m => StreamK m a -> Int -> m (Maybe a) -- | Apply a monadic action to each element of the stream and discard the -- output of the action. mapM_ :: Monad m => (a -> m b) -> StreamK m a -> m () toList :: Monad m => StreamK m a -> m [a] hoist :: (Monad m, Monad n) => (forall x. m x -> n x) -> StreamK m a -> StreamK n a scanl' :: (b -> a -> b) -> b -> StreamK m a -> StreamK m b scanlx' :: (x -> a -> x) -> x -> (x -> b) -> StreamK m a -> StreamK m b filter :: (a -> Bool) -> StreamK m a -> StreamK m a take :: Int -> StreamK m a -> StreamK m a takeWhile :: (a -> Bool) -> StreamK m a -> StreamK m a drop :: Int -> StreamK m a -> StreamK m a dropWhile :: (a -> Bool) -> StreamK m a -> StreamK m a mapM :: Monad m => (a -> m b) -> StreamK m a -> StreamK m b sequence :: Monad m => StreamK m (m a) -> StreamK m a intersperseM :: Monad m => m a -> StreamK m a -> StreamK m a intersperse :: Monad m => a -> StreamK m a -> StreamK m a insertBy :: (a -> a -> Ordering) -> a -> StreamK m a -> StreamK m a deleteBy :: (a -> a -> Bool) -> a -> StreamK m a -> StreamK m a -- | Sort the input stream using a supplied comparison function. -- -- Sorting can be achieved by simply: -- --
--   >>> sortBy cmp = StreamK.mergeMapWith (StreamK.mergeBy cmp) StreamK.fromPure
--   
-- -- However, this combinator uses a parser to first split the input stream -- into down and up sorted segments and then merges them to optimize -- sorting when pre-sorted sequences exist in the input stream. -- -- O(n) space sortBy :: Monad m => (a -> a -> Ordering) -> StreamK m a -> StreamK m a mapMaybe :: (a -> Maybe b) -> StreamK m a -> StreamK m b -- | Zipping of n streams can be performed by combining the -- streams pair wise using mergeMapWith with O(n * log n) time -- complexity. If used with concatMapWith it will have O(n^2) -- performance. zipWith :: Monad m => (a -> b -> c) -> StreamK m a -> StreamK m b -> StreamK m c zipWithM :: Monad m => (a -> b -> m c) -> StreamK m a -> StreamK m b -> StreamK m c -- | Merging of n streams can be performed by combining the -- streams pair wise using mergeMapWith to give O(n * log n) time -- complexity. If used with concatMapWith it will have O(n^2) -- performance. mergeBy :: (a -> a -> Ordering) -> StreamK m a -> StreamK m a -> StreamK m a mergeByM :: Monad m => (a -> a -> m Ordering) -> StreamK m a -> StreamK m a -> StreamK m a the :: (Eq a, Monad m) => StreamK m a -> m (Maybe a) -- | Like Streamly.Data.Stream.handle but with one significant -- difference, this function observes exceptions from the consumer of the -- stream as well. -- -- You can also convert StreamK to Stream and use exception -- handling from Stream module: -- --
--   >>> handle f s = StreamK.fromStream $ Stream.handle (\e -> StreamK.toStream (f e)) (StreamK.toStream s)
--   
handle :: (MonadCatch m, Exception e) => (e -> m (StreamK m a)) -> StreamK m a -> StreamK m a -- | Like Streamly.Data.Stream.bracketIO but with one significant -- difference, this function observes exceptions from the consumer of the -- stream as well. Therefore, it cleans up the resource promptly when the -- consumer encounters an exception. -- -- You can also convert StreamK to Stream and use resource -- handling from Stream module: -- --
--   >>> bracketIO bef aft bet = StreamK.fromStream $ Stream.bracketIO bef aft (StreamK.toStream . bet)
--   
bracketIO :: (MonadIO m, MonadCatch m) => IO b -> (b -> IO c) -> (b -> StreamK m a) -> StreamK m a -- | Streams represented as chains of functions calls using Continuation -- Passing Style (CPS), suitable for dynamically composing potentially -- large number of streams. -- -- Unlike the statically fused operations in Streamly.Data.Stream, -- StreamK operations are less efficient, involving a function call -- overhead for each element, but they exhibit linear O(n) time -- complexity wrt to the number of stream compositions. Therefore, they -- are suitable for dynamically composing streams e.g. appending -- potentially infinite streams in recursive loops. While fused streams -- can be used to efficiently process elements as small as a single byte, -- CPS streams are typically used on bigger chunks of data to avoid the -- larger overhead per element. For more details See the Stream vs -- StreamK section in the Streamly.Data.Stream module. -- -- In addition to the combinators in this module, you can use operations -- from Streamly.Data.Stream for StreamK as well by converting -- StreamK to Stream (toStream), and vice-versa -- (fromStream). Please refer to -- Streamly.Internal.Data.StreamK for more functions that have not -- yet been released. -- -- For documentation see the corresponding combinators in -- Streamly.Data.Stream. Documentation has been omitted in this -- module unless there is a difference worth mentioning or if the -- combinator does not exist in Streamly.Data.Stream. module Streamly.Data.StreamK data StreamK m a -- | A stream that terminates without producing any output or side effect. -- --
--   >>> Stream.fold Fold.toList (StreamK.toStream StreamK.nil)
--   []
--   
nil :: StreamK m a -- | A stream that terminates without producing any output, but produces a -- side effect. -- --
--   >>> Stream.fold Fold.toList (StreamK.toStream (StreamK.nilM (print "nil")))
--   "nil"
--   []
--   
-- -- Pre-release nilM :: Applicative m => m b -> StreamK m a -- | A right associative prepend operation to add a pure value at the head -- of an existing stream:: -- --
--   >>> s = 1 `StreamK.cons` 2 `StreamK.cons` 3 `StreamK.cons` StreamK.nil
--   
--   >>> Stream.fold Fold.toList (StreamK.toStream s)
--   [1,2,3]
--   
-- -- It can be used efficiently with foldr: -- --
--   >>> fromFoldable = Prelude.foldr StreamK.cons StreamK.nil
--   
-- -- Same as the following but more efficient: -- --
--   >>> cons x xs = return x `StreamK.consM` xs
--   
cons :: a -> StreamK m a -> StreamK m a infixr 5 `cons` -- | A right associative prepend operation to add an effectful value at the -- head of an existing stream:: -- --
--   >>> s = putStrLn "hello" `StreamK.consM` putStrLn "world" `StreamK.consM` StreamK.nil
--   
--   >>> Stream.fold Fold.drain (StreamK.toStream s)
--   hello
--   world
--   
-- -- It can be used efficiently with foldr: -- --
--   >>> fromFoldableM = Prelude.foldr StreamK.consM StreamK.nil
--   
-- -- Same as the following but more efficient: -- --
--   >>> consM x xs = StreamK.fromEffect x `StreamK.append` xs
--   
consM :: Monad m => m a -> StreamK m a -> StreamK m a infixr 5 `consM` fromPure :: a -> StreamK m a fromEffect :: Monad m => m a -> StreamK m a -- | Convert a fused Stream to StreamK. -- -- For example: -- --
--   >>> s1 = StreamK.fromStream $ Stream.fromList [1,2]
--   
--   >>> s2 = StreamK.fromStream $ Stream.fromList [3,4]
--   
--   >>> Stream.fold Fold.toList $ StreamK.toStream $ s1 `StreamK.append` s2
--   [1,2,3,4]
--   
fromStream :: Monad m => Stream m a -> StreamK m a -- | Convert a StreamK to a fused Stream. toStream :: Applicative m => StreamK m a -> Stream m a -- |
--   >>> fromFoldable = Prelude.foldr StreamK.cons StreamK.nil
--   
-- -- Construct a stream from a Foldable containing pure values: fromFoldable :: Foldable f => f a -> StreamK m a uncons :: Applicative m => StreamK m a -> m (Maybe (a, StreamK m a)) drain :: Monad m => StreamK m a -> m () -- | Run a ParserK over a StreamK. Please use -- parseChunks where possible, for better performance. parse :: Monad m => ParserK a m b -> StreamK m a -> m (Either ParseError b) -- | Similar to parseBreak but works on singular elements. parseBreak :: forall m a b. Monad m => ParserK a m b -> StreamK m a -> m (Either ParseError b, StreamK m a) -- | Run a ParserK over a chunked StreamK and return the -- parse result and the remaining Stream. parseBreakChunks :: (Monad m, Unbox a) => ParserK (Array a) m b -> StreamK m (Array a) -> m (Either ParseError b, StreamK m (Array a)) parseChunks :: (Monad m, Unbox a) => ParserK (Array a) m b -> StreamK m (Array a) -> m (Either ParseError b) mapM :: Monad m => (a -> m b) -> StreamK m a -> StreamK m b dropWhile :: (a -> Bool) -> StreamK m a -> StreamK m a take :: Int -> StreamK m a -> StreamK m a append :: StreamK m a -> StreamK m a -> StreamK m a infixr 6 `append` -- | Note: When joining many streams in a left associative manner earlier -- streams will get exponential priority than the ones joining later. -- Because of exponentially high weighting of left streams it can be used -- with concatMapWith even on a large number of streams. interleave :: StreamK m a -> StreamK m a -> StreamK m a infixr 6 `interleave` -- | Merging of n streams can be performed by combining the -- streams pair wise using mergeMapWith to give O(n * log n) time -- complexity. If used with concatMapWith it will have O(n^2) -- performance. mergeBy :: (a -> a -> Ordering) -> StreamK m a -> StreamK m a -> StreamK m a mergeByM :: Monad m => (a -> a -> m Ordering) -> StreamK m a -> StreamK m a -> StreamK m a -- | Zipping of n streams can be performed by combining the -- streams pair wise using mergeMapWith with O(n * log n) time -- complexity. If used with concatMapWith it will have O(n^2) -- performance. zipWith :: Monad m => (a -> b -> c) -> StreamK m a -> StreamK m b -> StreamK m c zipWithM :: Monad m => (a -> b -> m c) -> StreamK m a -> StreamK m b -> StreamK m c -- | Definition: -- --
--   >>> crossWith f m1 m2 = fmap f m1 `StreamK.crossApply` m2
--   
-- -- Note that the second stream is evaluated multiple times. crossWith :: Monad m => (a -> b -> c) -> StreamK m a -> StreamK m b -> StreamK m c concatEffect :: Monad m => m (StreamK m a) -> StreamK m a -- | Perform a concatMap using a specified concat strategy. The -- first argument specifies a merge or concat function that is used to -- merge the streams generated by the map function. concatMapWith :: (StreamK m b -> StreamK m b -> StreamK m b) -> (a -> StreamK m b) -> StreamK m a -> StreamK m b -- | Combine streams in pairs using a binary combinator, the resulting -- streams are then combined again in pairs recursively until we get to a -- single combined stream. The composition would thus form a binary tree. -- -- For example, you can sort a stream using merge sort like this: -- --
--   >>> s = StreamK.fromStream $ Stream.fromList [5,1,7,9,2]
--   
--   >>> generate = StreamK.fromPure
--   
--   >>> combine = StreamK.mergeBy compare
--   
--   >>> Stream.fold Fold.toList $ StreamK.toStream $ StreamK.mergeMapWith combine generate s
--   [1,2,5,7,9]
--   
-- -- Note that if the stream length is not a power of 2, the binary tree -- composed by mergeMapWith would not be balanced, which may or may not -- be important depending on what you are trying to achieve. -- -- Caution: the stream of streams must be finite -- -- Pre-release mergeMapWith :: (StreamK m b -> StreamK m b -> StreamK m b) -> (a -> StreamK m b) -> StreamK m a -> StreamK m b reverse :: StreamK m a -> StreamK m a -- | Sort the input stream using a supplied comparison function. -- -- Sorting can be achieved by simply: -- --
--   >>> sortBy cmp = StreamK.mergeMapWith (StreamK.mergeBy cmp) StreamK.fromPure
--   
-- -- However, this combinator uses a parser to first split the input stream -- into down and up sorted segments and then merges them to optimize -- sorting when pre-sorted sequences exist in the input stream. -- -- O(n) space sortBy :: Monad m => (a -> a -> Ordering) -> StreamK m a -> StreamK m a -- | Like Streamly.Data.Stream.handle but with one significant -- difference, this function observes exceptions from the consumer of the -- stream as well. -- -- You can also convert StreamK to Stream and use exception -- handling from Stream module: -- --
--   >>> handle f s = StreamK.fromStream $ Stream.handle (\e -> StreamK.toStream (f e)) (StreamK.toStream s)
--   
handle :: (MonadCatch m, Exception e) => (e -> m (StreamK m a)) -> StreamK m a -> StreamK m a -- | Like Streamly.Data.Stream.bracketIO but with one significant -- difference, this function observes exceptions from the consumer of the -- stream as well. Therefore, it cleans up the resource promptly when the -- consumer encounters an exception. -- -- You can also convert StreamK to Stream and use resource -- handling from Stream module: -- --
--   >>> bracketIO bef aft bet = StreamK.fromStream $ Stream.bracketIO bef aft (StreamK.toStream . bet)
--   
bracketIO :: (MonadIO m, MonadCatch m) => IO b -> (b -> IO c) -> (b -> StreamK m a) -> StreamK m a module Streamly.Internal.Data.ParserK -- | The intermediate result of running a parser step. The parser driver -- may stop with a final result, pause with a continuation to resume, or -- fail with an error. -- -- See ParserD docs. This is the same as the ParserD Step except that it -- uses a continuation in Partial and Continue constructors instead of a -- state in case of ParserD. -- -- Pre-release data Step a m r Done :: !Int -> r -> Step a m r Partial :: !Int -> (Input a -> m (Step a m r)) -> Step a m r Continue :: !Int -> (Input a -> m (Step a m r)) -> Step a m r Error :: !Int -> String -> Step a m r data Input a None :: Input a Chunk :: a -> Input a -- | The parser's result. -- -- Int is the position index into the current input array. Could be -- negative. Cannot be beyond the input array max bound. -- -- Pre-release data ParseResult b Success :: !Int -> !b -> ParseResult b Failure :: !Int -> !String -> ParseResult b -- | A continuation passing style parser representation. A continuation of -- Steps, each step passes a state and a parse result to the next -- Step. The resulting Step may carry a continuation that -- consumes input a and results in another Step. -- Essentially, the continuation may either consume input without a -- result or return a result with no further input to be consumed. newtype ParserK a m b MkParser :: (forall r. (ParseResult b -> Int -> Input a -> m (Step a m r)) -> Int -> Int -> Input a -> m (Step a m r)) -> ParserK a m b [runParser] :: ParserK a m b -> forall r. (ParseResult b -> Int -> Input a -> m (Step a m r)) -> Int -> Int -> Input a -> m (Step a m r) -- | Convert an element Parser to a chunked ParserK. A -- chunked parser is more efficient than an element parser. -- -- Pre-release adaptC :: (Monad m, Unbox a) => Parser a m b -> ParserK (Array a) m b -- | Convert a Parser to ParserK. -- -- Pre-release adapt :: Monad m => Parser a m b -> ParserK a m b -- | A generic adaptC. Similar to adaptC but is not -- constrained to Unbox types. -- -- Pre-release adaptCG :: Monad m => Parser a m b -> ParserK (Array a) m b -- | A parser that always yields a pure value without consuming any input. -- -- Pre-release fromPure :: b -> ParserK a m b -- | See fromEffect. -- -- Pre-release fromEffect :: Monad m => m b -> ParserK a m b -- | A parser that always fails with an error message without consuming any -- input. -- -- Pre-release die :: String -> ParserK a m b -- | Unconstrained version of Streamly.Data.Array module. -- -- See the Streamly.Data.Array module for documentation. module Streamly.Data.Array.Generic data Array a fromListN :: Int -> [a] -> Array a fromList :: [a] -> Array a createOf :: MonadIO m => Int -> Fold m a (Array a) -- | Fold the whole input to a single array. -- -- Caution! Do not use this on infinite streams. create :: MonadIO m => Fold m a (Array a) toList :: Array a -> [a] read :: Monad m => Array a -> Stream m a readRev :: Monad m => Array a -> Stream m a reader :: Monad m => Unfold m (Array a) a length :: Array a -> Int -- | Lookup the element at the given index. Index starts from 0. getIndex :: Int -> Array a -> Maybe a writeN :: MonadIO m => Int -> Fold m a (Array a) write :: MonadIO m => Fold m a (Array a) module Streamly.Internal.Data.Array data Array a Array :: {-# UNPACK #-} !MutByteArray -> {-# UNPACK #-} !Int -> {-# UNPACK #-} !Int -> Array a [arrContents] :: Array a -> {-# UNPACK #-} !MutByteArray [arrStart] :: Array a -> {-# UNPACK #-} !Int [arrEnd] :: Array a -> {-# UNPACK #-} !Int -- | Makes an immutable array using the underlying memory of the mutable -- array. -- -- Please make sure that there are no other references to the mutable -- array lying around, so that it is never used after freezing it using -- unsafeFreeze. If the underlying array is mutated, the immutable -- promise is lost. -- -- Pre-release unsafeFreeze :: MutArray a -> Array a -- | Similar to unsafeFreeze but uses rightSize on the -- mutable array first. unsafeFreezeWithShrink :: Unbox a => MutArray a -> Array a -- | Makes a mutable array using the underlying memory of the immutable -- array. -- -- Please make sure that there are no other references to the immutable -- array lying around, so that it is never used after thawing it using -- unsafeThaw. If the resulting array is mutated, any references -- to the older immutable array are mutated as well. -- -- Pre-release unsafeThaw :: Array a -> MutArray a -- | Return a copy of the Array in pinned memory if unpinned, else -- return the original array. pin :: Array a -> IO (Array a) -- | Return a copy of the Array in unpinned memory if pinned, else -- return the original array. unpin :: Array a -> IO (Array a) -- | Return True if the array is allocated in pinned memory. isPinned :: Array a -> Bool -- | Use an Array a as Ptr a. -- -- See unsafePinnedAsPtr in the Mutable array module for more -- details. -- -- Unsafe -- -- Pre-release unsafePinnedAsPtr :: MonadIO m => Array a -> (Ptr a -> m b) -> m b empty :: Array a clone :: MonadIO m => Array a -> m (Array a) pinnedClone :: MonadIO m => Array a -> m (Array a) -- | Create two slices of an array without copying the original array. The -- specified index i is the first index of the second slice. splitAt :: Unbox a => Int -> Array a -> (Array a, Array a) breakOn :: MonadIO m => Word8 -> Array Word8 -> m (Array Word8, Maybe (Array Word8)) -- | Fold "step" has a dependency on "initial", and each step is dependent -- on the previous invocation of step due to state passing, finally -- extract depends on the result of step, therefore, as long as the fold -- is driven in the correct order the operations would be correctly -- ordered. We need to ensure that we strictly evaluate the previous step -- completely before the next step. -- -- To not share the same array we need to make sure that the result of -- "initial" is not shared. Existential type ensures that it does not get -- shared across different folds. However, if we invoke "initial" -- multiple times for the same fold, there is a possiblity of sharing the -- two because the compiler would consider it as a pure value. One such -- example is the chunksOf combinator, or using an array creation fold -- with foldMany combinator. Is there a proper way in GHC to tell it to -- not share a pure expression in a particular case? -- -- For this reason array creation folds have a MonadIO constraint. Pure -- folds could be unsafe and dangerous. This is dangerous especially when -- used with foldMany like operations. -- --
--   >>> unsafePureWrite = Array.unsafeMakePure Array.write
--   
unsafeMakePure :: Monad m => Fold IO a b -> Fold m a b -- | createOf n folds a maximum of n elements from the -- input stream to an Array. createOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) -- | Like createOf but creates a pinned array. pinnedCreateOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) -- | Like createOf but does not check the array bounds when writing. -- The fold driver must not call the step function more than n -- times otherwise it will corrupt the memory and crash. This function -- exists mainly because any conditional in the step function blocks -- fusion causing 10x performance slowdown. unsafeCreateOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) unsafePinnedCreateOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) -- | Fold the whole input to a single array. -- -- Caution! Do not use this on infinite streams. create :: forall m a. (MonadIO m, Unbox a) => Fold m a (Array a) -- | Like create but creates a pinned array. pinnedCreate :: forall m a. (MonadIO m, Unbox a) => Fold m a (Array a) createWith :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) -- | Create an Array from the first N elements of a list. The array -- is allocated to size N, if the list terminates before N elements then -- the array may hold less than N elements. fromListN :: Unbox a => Int -> [a] -> Array a -- | Like fromListN but creates a pinned array. pinnedFromListN :: Unbox a => Int -> [a] -> Array a -- | Create an Array from a list. The list must be of finite size. fromList :: Unbox a => [a] -> Array a -- | Like fromList but creates a pinned array. pinnedFromList :: Unbox a => [a] -> Array a -- | Create an Array from the first N elements of a list in reverse -- order. The array is allocated to size N, if the list terminates before -- N elements then the array may hold less than N elements. -- -- Pre-release fromListRevN :: Unbox a => Int -> [a] -> Array a -- | Create an Array from a list in reverse order. The list must be -- of finite size. -- -- Pre-release fromListRev :: Unbox a => [a] -> Array a -- | Create an Array from the first N elements of a stream. The -- array is allocated to size N, if the stream terminates before N -- elements then the array may hold less than N elements. -- --
--   >>> fromStreamN n = Stream.fold (Array.writeN n)
--   
-- -- Pre-release fromStreamN :: (MonadIO m, Unbox a) => Int -> Stream m a -> m (Array a) -- | Create an Array from a stream. This is useful when we want to -- create a single array from a stream of unknown size. writeN is -- at least twice as efficient when the size is already known. -- --
--   >>> fromStream = Stream.fold Array.write
--   
-- -- Note that if the input stream is too large memory allocation for the -- array may fail. When the stream size is not known, chunksOf -- followed by processing of indvidual arrays in the resulting stream -- should be preferred. -- -- Pre-release fromStream :: (MonadIO m, Unbox a) => Stream m a -> m (Array a) fromPureStreamN :: Unbox a => Int -> Stream Identity a -> Array a -- | Convert a pure stream in Identity monad to an immutable array. -- -- Same as the following but with better performance: -- --
--   >>> fromPureStream = Array.fromList . runIdentity . Stream.toList
--   
fromPureStream :: Unbox a => Stream Identity a -> Array a -- | Copy a null terminated immutable Addr# Word8 sequence into an -- array. -- -- Unsafe: The caller is responsible for safe addressing. -- -- Note that this is completely safe when reading from Haskell string -- literals because they are guaranteed to be NULL terminated: -- --
--   >>> Array.toList $ Array.fromByteStr# "\1\2\3\0"#
--   [1,2,3]
--   
-- -- Note that this should be evaluated strictly to ensure that we do not -- hold the reference to the pointer in a lazy thunk. fromByteStr# :: Addr# -> Array Word8 -- | Note that this should be evaluated strictly to ensure that we do not -- hold the reference to the pointer in a lazy thunk. fromByteStr :: Ptr Word8 -> Array Word8 -- | Copy an immutable 'Ptr Word8' sequence into an array. -- -- Unsafe: The caller is responsible for safe addressing. -- -- Note that this should be evaluated strictly to ensure that we do not -- hold the reference to the pointer in a lazy thunk. fromPtrN :: Int -> Ptr Word8 -> Array Word8 -- | Given a stream of arrays, splice them all together to generate a -- single array. The stream must be finite. fromChunks :: (MonadIO m, Unbox a) => Stream m (Array a) -> m (Array a) -- | Convert an array stream to an array. Note that this requires peak -- memory that is double the size of the array stream. fromChunksK :: (MonadIO m, Unbox a) => StreamK m (Array a) -> m (Array a) -- | Return element at the specified index without checking the bounds. -- -- Unsafe because it does not check the bounds of the array. unsafeIndexIO :: forall a. Unbox a => Int -> Array a -> IO a -- | Return element at the specified index without checking the bounds. getIndexUnsafe :: forall a. Unbox a => Int -> Array a -> a -- | Convert an Array into a stream. -- -- Pre-release read :: (Monad m, Unbox a) => Array a -> Stream m a -- | Convert an Array into a stream in reverse order. -- -- Pre-release readRev :: (Monad m, Unbox a) => Array a -> Stream m a toStreamK :: forall m a. (Monad m, Unbox a) => Array a -> StreamK m a toStreamKRev :: forall m a. (Monad m, Unbox a) => Array a -> StreamK m a -- | Convert an Array into a list. toList :: Unbox a => Array a -> [a] producer :: forall m a. (Monad m, Unbox a) => Producer m (Array a) a -- | Unfold an array into a stream, does not check the end of the array, -- the user is responsible for terminating the stream within the array -- bounds. For high performance application where the end condition can -- be determined by a terminating fold. -- -- Written in the hope that it may be faster than "read", however, in the -- case for which this was written, "read" proves to be faster even -- though the core generated with unsafeRead looks simpler. -- -- Pre-release readerUnsafe :: forall m a. (Monad m, Unbox a) => Unfold m (Array a) a -- | Unfold an array into a stream. reader :: forall m a. (Monad m, Unbox a) => Unfold m (Array a) a -- | Unfold an array into a stream in reverse order. readerRev :: forall m a. (Monad m, Unbox a) => Unfold m (Array a) a -- | O(1) Get the length of the array i.e. the number of elements in -- the array. length :: Unbox a => Array a -> Int -- | O(1) Get the byte length of the array. byteLength :: Array a -> Int foldl' :: forall a b. Unbox a => (b -> a -> b) -> b -> Array a -> b foldr :: Unbox a => (a -> b -> b) -> b -> Array a -> b -- | Byte compare two arrays. Compare the length of the arrays. If the -- length is equal, compare the lexicographical ordering of two -- underlying byte arrays otherwise return the result of length -- comparison. -- -- Unsafe: Note that the Unbox instance of sum types with -- constructors of different sizes may leave some memory uninitialized -- which can make byte comparison unreliable. -- -- Pre-release byteCmp :: Array a -> Array a -> Ordering -- | Byte equality of two arrays. -- --
--   >>> byteEq arr1 arr2 = (==) EQ $ Array.byteCmp arr1 arr2
--   
-- -- Unsafe: See byteCmp. byteEq :: Array a -> Array a -> Bool -- | Copy two immutable arrays into a new array. If you want to splice more -- than two arrays then this operation would be highly inefficient -- because it would make a copy on every splice operation, instead use -- the fromChunksK operation to combine n immutable arrays. splice :: MonadIO m => Array a -> Array a -> m (Array a) -- | chunksOf n stream groups the elements in the input stream -- into arrays of n elements each. -- -- Same as the following but may be more efficient: -- --
--   >>> chunksOf n = Stream.foldMany (Array.writeN n)
--   
-- -- Pre-release chunksOf :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> Stream m (Array a) -- | Like chunksOf but creates pinned arrays. pinnedChunksOf :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> Stream m (Array a) buildChunks :: (MonadIO m, Unbox a) => Stream m a -> m (StreamK m (Array a)) -- | Convert a stream of arrays into a stream of their elements. -- --
--   >>> concat = Stream.unfoldMany Array.reader
--   
concat :: (Monad m, Unbox a) => Stream m (Array a) -> Stream m a -- | Convert a stream of arrays into a stream of their elements reversing -- the contents of each array before flattening. -- --
--   >>> concatRev = Stream.unfoldMany Array.readerRev
--   
concatRev :: forall m a. (Monad m, Unbox a) => Stream m (Array a) -> Stream m a -- | Fold fCompactGE n coalesces adjacent arrays in the input -- stream until the size becomes greater than or equal to n. -- -- Generates unpinned arrays irrespective of the pinning status of input -- arrays. fCompactGE :: (MonadIO m, Unbox a) => Int -> Fold m (Array a) (Array a) -- | PInned version of fCompactGE. fPinnedCompactGE :: (MonadIO m, Unbox a) => Int -> Fold m (Array a) (Array a) -- | Like compactGE but for transforming folds instead of stream. -- --
--   >>> lCompactGE n = Fold.many (Array.fCompactGE n)
--   
-- -- Generates unpinned arrays irrespective of the pinning status of input -- arrays. lCompactGE :: (MonadIO m, Unbox a) => Int -> Fold m (Array a) () -> Fold m (Array a) () -- | Pinned version of lCompactGE. lPinnedCompactGE :: (MonadIO m, Unbox a) => Int -> Fold m (Array a) () -> Fold m (Array a) () -- | compactGE n stream coalesces adjacent arrays in the -- stream until the size becomes greater than or equal to -- n. -- --
--   >>> compactGE n = Stream.foldMany (Array.fCompactGE n)
--   
-- -- Generates unpinned arrays irrespective of the pinning status of input -- arrays. compactGE :: (MonadIO m, Unbox a) => Int -> Stream m (Array a) -> Stream m (Array a) -- | Deprecated: Please use unsafePinnedAsPtr instead. asPtrUnsafe :: MonadIO m => Array a -> (Ptr a -> m b) -> m b -- | Deprecated: Please use getIndexUnsafe instead unsafeIndex :: forall a. Unbox a => Int -> Array a -> a -- | Deprecated: Please use buildChunks instead. bufferChunks :: (MonadIO m, Unbox a) => Stream m a -> m (StreamK m (Array a)) -- | Deprecated: Please use "unfoldMany reader" instead. flattenArrays :: forall m a. (MonadIO m, Unbox a) => Stream m (Array a) -> Stream m a -- | Deprecated: Please use "unfoldMany readerRev" instead. flattenArraysRev :: forall m a. (MonadIO m, Unbox a) => Stream m (Array a) -> Stream m a -- | Deprecated: Please use fromChunksK instead. fromArrayStreamK :: (Unbox a, MonadIO m) => StreamK m (Array a) -> m (Array a) -- | Deprecated: Please use fromStreamN instead. fromStreamDN :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> m (Array a) -- | Deprecated: Please use fromStream instead. fromStreamD :: forall m a. (MonadIO m, Unbox a) => Stream m a -> m (Array a) -- | Deprecated: Please use read instead. toStreamD :: forall m a. (Monad m, Unbox a) => Array a -> Stream m a -- | Deprecated: Please use readRev instead. toStreamDRev :: forall m a. (Monad m, Unbox a) => Array a -> Stream m a -- | Same as read -- | Deprecated: Please use read instead. toStream :: (Monad m, Unbox a) => Array a -> Stream m a -- | Same as readRev -- | Deprecated: Please use readRev instead. toStreamRev :: (Monad m, Unbox a) => Array a -> Stream m a -- | Deprecated: Please use empty instead. nil :: Array a -- | Deprecated: Please use createWith instead. writeWith :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) writeN :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) -- | Deprecated: Please use pinnedCreateOf instead. pinnedWriteN :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) -- | Deprecated: Please use unsafeCreateOf instead. writeNUnsafe :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) -- | Deprecated: Please use unsafePinnedCreateOf instead. pinnedWriteNUnsafe :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) -- | pinnedWriteNAligned alignment n folds a maximum of n -- elements from the input stream to an Array aligned to the given -- size. -- -- Pre-release -- | Deprecated: To be removed. pinnedWriteNAligned :: forall m a. (MonadIO m, Unbox a) => Int -> Int -> Fold m a (Array a) write :: forall m a. (MonadIO m, Unbox a) => Fold m a (Array a) -- | Deprecated: Please use pinnedCreate instead. pinnedWrite :: forall m a. (MonadIO m, Unbox a) => Fold m a (Array a) -- | writeLastN n folds a maximum of n elements from the -- end of the input stream to an Array. writeLastN :: (Storable a, Unbox a, MonadIO m) => Int -> Fold m a (Array a) -- | O(1) Lookup the element at the given index. Index starts from -- 0. getIndex :: forall a. Unbox a => Int -> Array a -> Maybe a -- | Like getIndex but indexes the array in reverse from the end. -- -- Pre-release getIndexRev :: forall a. Unbox a => Int -> Array a -> Maybe a -- |
--   >>> last arr = Array.getIndexRev arr 0
--   
-- -- Pre-release last :: Unbox a => Array a -> Maybe a -- | Given a stream of array indices, read the elements on those indices -- from the supplied Array. An exception is thrown if an index is out of -- bounds. -- -- This is the most general operation. We can implement other operations -- in terms of this: -- --
--   read =
--        let u = lmap (arr -> (0, length arr - 1)) Unfold.enumerateFromTo
--         in Unfold.lmap f (indexReader arr)
--   
--   readRev =
--        let i = length arr - 1
--         in Unfold.lmap f (indexReaderFromThenTo i (i - 1) 0)
--   
-- -- Pre-release indexReader :: (Monad m, Unbox a) => Stream m Int -> Unfold m (Array a) a -- | Unfolds (from, then, to, array) generating a finite stream -- whose first element is the array value from the index from -- and the successive elements are from the indices in increments of -- then up to to. Index enumeration can occur downwards -- or upwards depending on whether then comes before or after -- from. -- --
--   getIndicesFromThenTo =
--       let f (from, next, to, arr) =
--               (Stream.enumerateFromThenTo from next to, arr)
--        in Unfold.lmap f getIndices
--   
-- -- Unimplemented indexReaderFromThenTo :: Unfold m (Int, Int, Int, Array a) a -- |
--   >>> null arr = Array.byteLength arr == 0
--   
-- -- Pre-release null :: Array a -> Bool -- | Given a sorted array, perform a binary search to find the given -- element. Returns the index of the element if found. -- -- Unimplemented binarySearch :: a -> Array a -> Maybe Int -- | Unimplemented findIndicesOf :: (a -> Bool) -> Array a -> Stream Identity Int -- | Perform a linear search to find all the indices where a given element -- is present in an array. -- -- Unimplemented indexFinder :: (a -> Bool) -> Unfold Identity (Array a) Int -- | Cast an array having elements of type a into an array having -- elements of type b. The length of the array should be a -- multiple of the size of the target element otherwise Nothing is -- returned. cast :: forall a b. Unbox b => Array a -> Maybe (Array b) -- | Cast an Array a into an Array Word8. asBytes :: Array a -> Array Word8 -- | Cast an array having elements of type a into an array having -- elements of type b. The array size must be a multiple of the -- size of type b otherwise accessing the last element of the -- array may result into a crash or a random value. -- -- Pre-release castUnsafe :: Array a -> Array b -- | Convert an array of any type into a null terminated CString Ptr. If -- the array is unpinned it is first converted to a pinned array which -- requires a copy. -- -- Unsafe -- -- O(n) Time: (creates a copy of the array) -- -- Pre-release asCStringUnsafe :: Array a -> (CString -> IO b) -> IO b -- | O(1) Slice an array in constant time. -- -- Caution: The bounds of the slice are not checked. -- -- Unsafe -- -- Pre-release getSliceUnsafe :: forall a. Unbox a => Int -> Int -> Array a -> Array a sliceIndexerFromLen :: forall m a. (Monad m, Unbox a) => Int -> Int -> Unfold m (Array a) (Int, Int) -- | Generate a stream of slices of specified length from an array, -- starting from the supplied array index. The last slice may be shorter -- than the requested length. -- -- Pre-release/ slicerFromLen :: forall m a. (Monad m, Unbox a) => Int -> Int -> Unfold m (Array a) (Array a) -- | Split the array into a stream of slices using a predicate. The element -- matching the predicate is dropped. -- -- Pre-release splitOn :: (Monad m, Unbox a) => (a -> Bool) -> Array a -> Stream m (Array a) -- | Transform an array into another array using a stream transformation -- operation. -- -- Pre-release streamTransform :: forall m a b. (MonadIO m, Unbox a, Unbox b) => (Stream m a -> Stream m b) -> Array a -> m (Array b) -- | Fold an array using a stream fold operation. -- -- Pre-release streamFold :: (Monad m, Unbox a) => (Stream m a -> m b) -> Array a -> m b -- | Fold an array using a Fold. -- -- Pre-release fold :: forall m a b. (Monad m, Unbox a) => Fold m a b -> Array a -> m b -- | Insert the given element between arrays and flatten. -- --
--   >>> interpose x = Stream.interpose x Array.reader
--   
interpose :: (Monad m, Unbox a) => a -> Stream m (Array a) -> Stream m a -- | Insert the given element after each array and flatten. This is similar -- to unlines. -- --
--   >>> interposeSuffix x = Stream.interposeSuffix x Array.reader
--   
interposeSuffix :: forall m a. (Monad m, Unbox a) => a -> Stream m (Array a) -> Stream m a -- | Insert the given array after each array and flatten. -- --
--   >>> intercalateSuffix = Stream.intercalateSuffix Array.reader
--   
intercalateSuffix :: (Monad m, Unbox a) => Array a -> Stream m (Array a) -> Stream m a -- | compactLE n coalesces adjacent arrays in the input stream -- only if the combined size would be less than or equal to n. -- -- Generates unpinned arrays irrespective of the pinning status of input -- arrays. compactLE :: (MonadIO m, Unbox a) => Int -> Stream m (Array a) -> Stream m (Array a) -- | Pinned version of compactLE. pinnedCompactLE :: (MonadIO m, Unbox a) => Int -> Stream m (Array a) -> Stream m (Array a) -- | Split a stream of arrays on a given separator byte, dropping the -- separator and coalescing all the arrays between two separators into a -- single array. compactOnByte :: MonadIO m => Word8 -> Stream m (Array Word8) -> Stream m (Array Word8) -- | Like compactOnByte considers the separator in suffix position -- instead of infix position. compactOnByteSuffix :: MonadIO m => Word8 -> Stream m (Array Word8) -> Stream m (Array Word8) foldBreakChunks :: forall m a b. (MonadIO m, Unbox a) => Fold m a b -> Stream m (Array a) -> m (b, Stream m (Array a)) -- | Fold a stream of arrays using a Fold. This is equivalent to the -- following: -- --
--   >>> foldChunks f = Stream.fold f . Stream.unfoldMany Array.reader
--   
foldChunks :: (MonadIO m, Unbox a) => Fold m a b -> Stream m (Array a) -> m b -- | Fold a stream of arrays using a Fold and return the remaining -- stream. -- -- The following alternative to this function allows composing the fold -- using the parser Monad: -- --
--   foldBreakStreamK f s =
--         fmap (first (fromRight undefined))
--       $ StreamK.parseBreakChunks (ParserK.adaptC (Parser.fromFold f)) s
--   
-- -- We can compare perf and remove this one or define it in terms of that. foldBreakChunksK :: forall m a b. (MonadIO m, Unbox a) => Fold m a b -> StreamK m (Array a) -> m (b, StreamK m (Array a)) -- | Parse an array stream using the supplied Parser. Returns the -- parse result and the unconsumed stream. Throws ParseError if -- the parse fails. -- -- The following alternative to this function allows composing the parser -- using the parser Monad: -- --
--   >>> parseBreakStreamK p = StreamK.parseBreakChunks (ParserK.adaptC p)
--   
-- -- We can compare perf and remove this one or define it in terms of that. -- -- Internal parseBreakChunksK :: forall m a b. (MonadIO m, Unbox a) => Parser a m b -> StreamK m (Array a) -> m (Either ParseError b, StreamK m (Array a)) encodeAs :: forall a. Serialize a => PinnedState -> a -> Array Word8 -- | Properties: 1. Identity: deserialize . serialize == id 2. -- Encoded equivalence: serialize a == serialize a serialize :: Serialize a => a -> Array Word8 -- | Serialize a Haskell type to a pinned byte array. The array is -- allocated using pinned memory so that it can be used directly in OS -- APIs for writing to file or sending over the network. -- -- Properties: 1. Identity: deserialize . pinnedSerialize == id -- 2. Encoded equivalence: pinnedSerialize a == pinnedSerialize -- a pinnedSerialize :: Serialize a => a -> Array Word8 -- | Decode a Haskell type from a byte array containing its serialized -- representation. deserialize :: Serialize a => Array Word8 -> a -- | Deprecated: Please use sliceIndexerFromLen instead. genSlicesFromLen :: forall m a. (Monad m, Unbox a) => Int -> Int -> Unfold m (Array a) (Int, Int) -- | Deprecated: Please use slicerFromLen instead. getSlicesFromLen :: forall m a. (Monad m, Unbox a) => Int -> Int -> Unfold m (Array a) (Array a) -- | Deprecated: Please use getIndices instead. getIndices :: (Monad m, Unbox a) => Stream m Int -> Unfold m (Array a) a -- | The fundamental singleton IO APIs are getChunk and -- putChunk and the fundamental stream IO APIs built on top of -- those are readChunksWith and writeChunks. Rest of this -- module is just combinatorial programming using these. -- -- We can achieve line buffering by folding lines in the input stream -- into a stream of arrays using Stream.splitOn or Fold.takeEndBy_ and -- similar operations. One can wrap the input stream in Maybe type -- and then use writeMaybesWith to achieve user controlled -- buffering. module Streamly.Internal.FileSystem.Handle -- | Read a ByteArray consisting of one or more bytes from a file -- handle. If no data is available on the handle it blocks until at least -- one byte becomes available. If any data is available then it -- immediately returns that data without blocking. As a result of this -- behavior, it may read less than or equal to the size requested. getChunk :: MonadIO m => Int -> Handle -> m (Array Word8) -- | Read a ByteArray consisting of exactly the specified number -- of bytes from a file handle. -- -- Unimplemented getChunkOf :: Int -> Handle -> IO (Array Word8) -- | Write an Array to a file handle. putChunk :: MonadIO m => Handle -> Array a -> m () -- | Generate a byte stream from a file Handle. -- --
--   >>> read h = Stream.unfoldMany Array.reader $ Handle.readChunks h
--   
-- -- Pre-release read :: MonadIO m => Handle -> Stream m Word8 -- | readWith bufsize handle reads a byte stream from a file -- handle, reads are performed in chunks of up to bufsize. -- --
--   >>> readWith size h = Stream.unfoldMany Array.reader $ Handle.readChunksWith size h
--   
-- -- Pre-release readWith :: MonadIO m => Int -> Handle -> Stream m Word8 -- | readChunksWith size handle reads a stream of arrays from the -- file handle handle. The maximum size of a single array is -- limited to size. The actual size read may be less than or -- equal to size. -- --
--   >>> readChunksWith size h = Stream.unfold Handle.chunkReaderWith (size, h)
--   
readChunksWith :: MonadIO m => Int -> Handle -> Stream m (Array Word8) -- | getChunks handle reads a stream of arrays from the specified -- file handle. The maximum size of a single array is limited to -- defaultChunkSize. The actual size read may be less than or -- equal to defaultChunkSize. -- --
--   >>> readChunks = Handle.readChunksWith IO.defaultChunkSize
--   
-- -- Pre-release readChunks :: MonadIO m => Handle -> Stream m (Array Word8) -- | Unfolds a file handle into a byte stream. IO requests to the device -- are performed in sizes of defaultChunkSize. -- --
--   >>> reader = Unfold.many Array.reader Handle.chunkReader
--   
reader :: MonadIO m => Unfold m Handle Word8 -- | Unfolds the tuple (bufsize, handle) into a byte stream, read -- requests to the IO device are performed using buffers of -- bufsize. -- --
--   >>> readerWith = Unfold.many Array.reader Handle.chunkReaderWith
--   
readerWith :: MonadIO m => Unfold m (Int, Handle) Word8 -- | Unfolds a handle into a stream of Word8 arrays. Requests to the -- IO device are performed using a buffer of size -- defaultChunkSize. The size of arrays in the resulting stream -- are therefore less than or equal to defaultChunkSize. -- --
--   >>> chunkReader = Unfold.first IO.defaultChunkSize Handle.chunkReaderWith
--   
chunkReader :: MonadIO m => Unfold m Handle (Array Word8) -- | Unfold the tuple (bufsize, handle) into a stream of -- Word8 arrays. Read requests to the IO device are performed -- using a buffer of size bufsize. The size of an array in the -- resulting stream is always less than or equal to bufsize. chunkReaderWith :: MonadIO m => Unfold m (Int, Handle) (Array Word8) -- | Write a byte stream to a file handle. Accumulates the input in chunks -- of up to defaultChunkSize before writing to the IO device. -- --
--   >>> write = Handle.writeWith IO.defaultChunkSize
--   
write :: MonadIO m => Handle -> Fold m Word8 () -- | writeWith reqSize handle writes the input stream to -- handle. Bytes in the input stream are collected into a buffer -- until we have a chunk of reqSize and then written to the IO -- device. -- --
--   >>> writeWith n h = Fold.groupsOf n (Array.unsafeCreateOf n) (Handle.writeChunks h)
--   
writeWith :: MonadIO m => Int -> Handle -> Fold m Word8 () -- | Write a stream of arrays to a handle. Each array in the stream is -- written to the device as a separate IO request. -- -- writeChunks h = Fold.drainBy (Handle.putChunk h) writeChunks :: MonadIO m => Handle -> Fold m (Array a) () -- | writeChunksWith bufsize handle writes a stream of arrays to -- handle after coalescing the adjacent arrays in chunks of -- bufsize. We never split an array, if a single array is bigger -- than the specified size it emitted as it is. Multiple arrays are -- coalesed as long as the total size remains below the specified size. writeChunksWith :: (MonadIO m, Unbox a) => Int -> Handle -> Fold m (Array a) () -- | Write a stream of Maybe values. Keep buffering the just values -- in an array until a Nothing is encountered or the buffer size -- exceeds the specified limit, at that point flush the buffer to the -- handle. -- -- Pre-release writeMaybesWith :: MonadIO m => Int -> Handle -> Fold m (Maybe Word8) () -- | Like write but uses the experimental Refold API. -- -- Internal writer :: MonadIO m => Refold m Handle Word8 () -- | Like writeWith but uses the experimental Refold API. -- -- Internal writerWith :: MonadIO m => Int -> Refold m Handle Word8 () -- | Like writeChunks but uses the experimental Refold API. -- -- Internal chunkWriter :: MonadIO m => Refold m Handle (Array a) () -- | Write a byte stream to a file handle. Accumulates the input in chunks -- of up to defaultChunkSize before writing. -- -- NOTE: This may perform better than the write fold, you can try -- this if you need some extra perf boost. -- --
--   >>> putBytes = Handle.putBytesWith IO.defaultChunkSize
--   
putBytes :: MonadIO m => Handle -> Stream m Word8 -> m () -- | putBytesWith bufsize handle stream writes stream to -- handle in chunks of bufsize. A write is performed to -- the IO device as soon as we collect the required input size. putBytesWith :: MonadIO m => Int -> Handle -> Stream m Word8 -> m () -- | putChunksWith bufsize handle stream writes a stream of arrays -- to handle after coalescing the adjacent arrays in chunks of -- bufsize. The chunk size is only a maximum and the actual -- writes could be smaller as we do not split the arrays to fit exactly -- to the specified size. putChunksWith :: (MonadIO m, Unbox a) => Int -> Handle -> Stream m (Array a) -> m () -- | Write a stream of arrays to a handle. -- --
--   >>> putChunks h = Stream.fold (Fold.drainBy (Handle.putChunk h))
--   
putChunks :: MonadIO m => Handle -> Stream m (Array a) -> m () -- | The input to the unfold is (from, to, bufferSize, handle). It -- starts reading from the offset from in the file and reads up -- to the offset to. chunkReaderFromToWith :: MonadIO m => Unfold m (Int, Int, Int, Handle) (Array Word8) -- | Same as chunkReaderWith -- | Deprecated: Please use chunkReaderWith instead. readChunksWithBufferOf :: MonadIO m => Unfold m (Int, Handle) (Array Word8) -- | Same as readerWith -- | Deprecated: Please use readerWith instead. readWithBufferOf :: MonadIO m => Unfold m (Int, Handle) Word8 -- | Same as writeChunksWith -- | Deprecated: Please use writeChunksWith instead. writeChunksWithBufferOf :: (MonadIO m, Unbox a) => Int -> Handle -> Fold m (Array a) () -- | Same as writeWith -- | Deprecated: Please use writeWith instead. writeWithBufferOf :: MonadIO m => Int -> Handle -> Fold m Word8 () -- |
--   >>> import qualified Streamly.FileSystem.Handle as Handle
--   
-- -- Read and write byte streams and array streams to and from file handles -- (Handle). -- -- The TextEncoding, NewLineMode, and -- Buffering options of the underlying GHC Handle are -- ignored by these APIs. Please use Streamly.Unicode.Stream -- module for encoding and decoding a byte stream, use stream splitting -- operations in Streamly.Data.Stream to create a stream of lines -- or to split the input stream on any other type of boundaries. -- -- To set the read or write start position use hSeek on the -- Handle, the before combinator may be used to do that -- on a streaming combinator. To restrict the length of read or write use -- the stream trimming operations like take. -- -- Note that a Handle is inherently stateful, therefore, we -- cannot use these APIs from multiple threads without serialization; -- reading or writing in one thread would affect the file position for -- other threads. -- -- For additional, experimental APIs take a look at -- Streamly.Internal.FileSystem.Handle module. module Streamly.FileSystem.Handle -- | Read a ByteArray consisting of one or more bytes from a file -- handle. If no data is available on the handle it blocks until at least -- one byte becomes available. If any data is available then it -- immediately returns that data without blocking. As a result of this -- behavior, it may read less than or equal to the size requested. getChunk :: MonadIO m => Int -> Handle -> m (Array Word8) -- | Write an Array to a file handle. putChunk :: MonadIO m => Handle -> Array a -> m () -- | Generate a byte stream from a file Handle. -- --
--   >>> read h = Stream.unfoldMany Array.reader $ Handle.readChunks h
--   
-- -- Pre-release read :: MonadIO m => Handle -> Stream m Word8 -- | readWith bufsize handle reads a byte stream from a file -- handle, reads are performed in chunks of up to bufsize. -- --
--   >>> readWith size h = Stream.unfoldMany Array.reader $ Handle.readChunksWith size h
--   
-- -- Pre-release readWith :: MonadIO m => Int -> Handle -> Stream m Word8 -- | getChunks handle reads a stream of arrays from the specified -- file handle. The maximum size of a single array is limited to -- defaultChunkSize. The actual size read may be less than or -- equal to defaultChunkSize. -- --
--   >>> readChunks = Handle.readChunksWith IO.defaultChunkSize
--   
-- -- Pre-release readChunks :: MonadIO m => Handle -> Stream m (Array Word8) -- | readChunksWith size handle reads a stream of arrays from the -- file handle handle. The maximum size of a single array is -- limited to size. The actual size read may be less than or -- equal to size. -- --
--   >>> readChunksWith size h = Stream.unfold Handle.chunkReaderWith (size, h)
--   
readChunksWith :: MonadIO m => Int -> Handle -> Stream m (Array Word8) -- | Unfolds a file handle into a byte stream. IO requests to the device -- are performed in sizes of defaultChunkSize. -- --
--   >>> reader = Unfold.many Array.reader Handle.chunkReader
--   
reader :: MonadIO m => Unfold m Handle Word8 -- | Unfolds the tuple (bufsize, handle) into a byte stream, read -- requests to the IO device are performed using buffers of -- bufsize. -- --
--   >>> readerWith = Unfold.many Array.reader Handle.chunkReaderWith
--   
readerWith :: MonadIO m => Unfold m (Int, Handle) Word8 -- | Unfolds a handle into a stream of Word8 arrays. Requests to the -- IO device are performed using a buffer of size -- defaultChunkSize. The size of arrays in the resulting stream -- are therefore less than or equal to defaultChunkSize. -- --
--   >>> chunkReader = Unfold.first IO.defaultChunkSize Handle.chunkReaderWith
--   
chunkReader :: MonadIO m => Unfold m Handle (Array Word8) -- | Unfold the tuple (bufsize, handle) into a stream of -- Word8 arrays. Read requests to the IO device are performed -- using a buffer of size bufsize. The size of an array in the -- resulting stream is always less than or equal to bufsize. chunkReaderWith :: MonadIO m => Unfold m (Int, Handle) (Array Word8) -- | Write a byte stream to a file handle. Accumulates the input in chunks -- of up to defaultChunkSize before writing to the IO device. -- --
--   >>> write = Handle.writeWith IO.defaultChunkSize
--   
write :: MonadIO m => Handle -> Fold m Word8 () -- | writeWith reqSize handle writes the input stream to -- handle. Bytes in the input stream are collected into a buffer -- until we have a chunk of reqSize and then written to the IO -- device. -- --
--   >>> writeWith n h = Fold.groupsOf n (Array.unsafeCreateOf n) (Handle.writeChunks h)
--   
writeWith :: MonadIO m => Int -> Handle -> Fold m Word8 () -- | Write a stream of arrays to a handle. Each array in the stream is -- written to the device as a separate IO request. -- -- writeChunks h = Fold.drainBy (Handle.putChunk h) writeChunks :: MonadIO m => Handle -> Fold m (Array a) () -- | Same as readerWith -- | Deprecated: Please use readerWith instead. readWithBufferOf :: MonadIO m => Unfold m (Int, Handle) Word8 -- | Same as chunkReaderWith -- | Deprecated: Please use chunkReaderWith instead. readChunksWithBufferOf :: MonadIO m => Unfold m (Int, Handle) (Array Word8) -- | Same as writeChunksWith -- | Deprecated: Please use writeChunksWith instead. writeChunksWithBufferOf :: (MonadIO m, Unbox a) => Int -> Handle -> Fold m (Array a) () -- | Same as writeWith -- | Deprecated: Please use writeWith instead. writeWithBufferOf :: MonadIO m => Int -> Handle -> Fold m Word8 () -- | Read and write streams and arrays to and from files specified by their -- paths in the file system. Unlike the handle based APIs which can have -- a read/write session consisting of multiple reads and writes to the -- handle, these APIs are one shot read or write APIs. These APIs open -- the file handle, perform the requested operation and close the handle. -- Thease are safer compared to the handle based APIs as there is no -- possibility of a file descriptor leakage. -- --
--   import qualified Streamly.Internal.FileSystem.File as File
--   
module Streamly.Internal.FileSystem.File -- | withFile name mode act opens a file using -- openFile and passes the resulting handle to the computation -- act. The handle will be closed on exit from withFile, -- whether by normal termination or by raising an exception. If closing -- the handle raises an exception, then this exception will be raised by -- withFile rather than any exception raised by act. -- -- Pre-release withFile :: (MonadIO m, MonadCatch m) => FilePath -> IOMode -> (Handle -> Stream m a) -> Stream m a -- | Generate a stream of bytes from a file specified by path. The stream -- ends when EOF is encountered. File is locked using multiple reader and -- single writer locking mode. -- -- Pre-release read :: (MonadIO m, MonadCatch m) => FilePath -> Stream m Word8 -- | readChunksWith size file reads a stream of arrays from file -- file. The maximum size of a single array is specified by -- size. The actual size read may be less than or equal to -- size. -- -- Pre-release readChunksWith :: (MonadIO m, MonadCatch m) => Int -> FilePath -> Stream m (Array Word8) -- | readChunks file reads a stream of arrays from file -- file. The maximum size of a single array is limited to -- defaultChunkSize. The actual size read may be less than -- defaultChunkSize. -- --
--   readChunks = readChunksWith defaultChunkSize
--   
-- -- Pre-release readChunks :: (MonadIO m, MonadCatch m) => FilePath -> Stream m (Array Word8) -- | Unfolds the tuple (bufsize, filepath) into a byte stream, -- read requests to the IO device are performed using buffers of -- bufsize. -- -- Pre-release readerWith :: (MonadIO m, MonadCatch m) => Unfold m (Int, FilePath) Word8 -- | Unfolds a file path into a byte stream. IO requests to the device are -- performed in sizes of defaultChunkSize. -- -- Pre-release reader :: (MonadIO m, MonadCatch m) => Unfold m FilePath Word8 -- | Unfold the tuple (bufsize, filepath) into a stream of -- Word8 arrays. Read requests to the IO device are performed -- using a buffer of size bufsize. The size of an array in the -- resulting stream is always less than or equal to bufsize. -- -- Pre-release chunkReaderWith :: (MonadIO m, MonadCatch m) => Unfold m (Int, FilePath) (Array Word8) -- | Unfold the tuple (from, to, bufsize, filepath) into a stream -- of Word8 arrays. Read requests to the IO device are performed -- using a buffer of size bufsize starting from absolute offset -- of from till the absolute position of to. The size -- of an array in the resulting stream is always less than or equal to -- bufsize. -- -- Pre-release chunkReaderFromToWith :: (MonadIO m, MonadCatch m) => Unfold m (Int, Int, Int, FilePath) (Array Word8) -- | Unfolds a FilePath into a stream of Word8 arrays. -- Requests to the IO device are performed using a buffer of size -- defaultChunkSize. The size of arrays in the resulting stream -- are therefore less than or equal to defaultChunkSize. -- -- Pre-release chunkReader :: (MonadIO m, MonadCatch m) => Unfold m FilePath (Array Word8) -- | Write an array to a file. Overwrites the file if it exists. -- -- Pre-release putChunk :: FilePath -> Array a -> IO () -- | Write a byte stream to a file. Accumulates the input in chunks of up -- to defaultChunkSize before writing to the IO device. -- -- Pre-release write :: (MonadIO m, MonadCatch m) => FilePath -> Fold m Word8 () -- | writeWith chunkSize handle writes the input stream to -- handle. Bytes in the input stream are collected into a buffer -- until we have a chunk of size chunkSize and then written to -- the IO device. -- -- Pre-release writeWith :: (MonadIO m, MonadCatch m) => Int -> FilePath -> Fold m Word8 () -- | Write a stream of chunks to a handle. Each chunk in the stream is -- written to the device as a separate IO request. -- -- Pre-release writeChunks :: (MonadIO m, MonadCatch m) => FilePath -> Fold m (Array a) () -- | Write a byte stream to a file. Combines the bytes in chunks of size up -- to defaultChunkSize before writing. If the file exists it is -- truncated to zero size before writing. If the file does not exist it -- is created. File is locked using single writer locking mode. -- -- Pre-release fromBytes :: (MonadIO m, MonadCatch m) => FilePath -> Stream m Word8 -> m () -- | Like write but provides control over the write buffer. Output -- will be written to the IO device as soon as we collect the specified -- number of input elements. -- -- Pre-release fromBytesWith :: (MonadIO m, MonadCatch m) => Int -> FilePath -> Stream m Word8 -> m () -- | Write a stream of arrays to a file. Overwrites the file if it exists. -- -- Pre-release fromChunks :: (MonadIO m, MonadCatch m) => FilePath -> Stream m (Array a) -> m () -- | Append a byte stream to a file. Combines the bytes in chunks of size -- up to defaultChunkSize before writing. If the file exists then -- the new data is appended to the file. If the file does not exist it is -- created. File is locked using single writer locking mode. -- -- Pre-release writeAppend :: (MonadIO m, MonadCatch m) => FilePath -> Stream m Word8 -> m () -- | Like append but provides control over the write buffer. -- Output will be written to the IO device as soon as we collect the -- specified number of input elements. -- -- Pre-release writeAppendWith :: (MonadIO m, MonadCatch m) => Int -> FilePath -> Stream m Word8 -> m () -- | append an array to a file. -- -- Pre-release writeAppendArray :: FilePath -> Array a -> IO () -- | Append a stream of arrays to a file. -- -- Pre-release writeAppendChunks :: (MonadIO m, MonadCatch m) => FilePath -> Stream m (Array a) -> m () -- | Deprecated: Please use readerWith instead readWithBufferOf :: (MonadIO m, MonadCatch m) => Unfold m (Int, FilePath) Word8 -- | Deprecated: Please use chunkReaderWith instead readChunksWithBufferOf :: (MonadIO m, MonadCatch m) => Unfold m (Int, FilePath) (Array Word8) -- | Deprecated: Please use chunkReaderFromToWith instead readChunksFromToWith :: (MonadIO m, MonadCatch m) => Unfold m (Int, Int, Int, FilePath) (Array Word8) -- | Deprecated: Please use read instead toBytes :: (MonadIO m, MonadCatch m) => FilePath -> Stream m Word8 -- | Deprecated: Please use readChunks instead toChunks :: (MonadIO m, MonadCatch m) => FilePath -> Stream m (Array Word8) -- | Deprecated: Please use readChunksWith instead toChunksWithBufferOf :: (MonadIO m, MonadCatch m) => Int -> FilePath -> Stream m (Array Word8) -- | Deprecated: Please use writeWith instead writeWithBufferOf :: (MonadIO m, MonadCatch m) => Int -> FilePath -> Fold m Word8 () -- | Deprecated: Please use fromBytesWith instead fromBytesWithBufferOf :: (MonadIO m, MonadCatch m) => Int -> FilePath -> Stream m Word8 -> m () -- | Warning: The API of this module is subject to change in future -- releases. Especially the type for representing paths may change from -- FilePath to something else. -- -- Read and write streams and arrays to and from files specified by their -- paths in the file system. Unlike the handle based APIs which can have -- a read/write session consisting of multiple reads and writes to the -- handle, these APIs are one shot read or write APIs. These APIs open -- the file handle, perform the requested operation and close the handle. -- These are safer compared to the handle based APIs as there is no -- possibility of a file descriptor leakage. -- --
--   >>> import qualified Streamly.FileSystem.File as File
--   
module Streamly.FileSystem.File -- | withFile name mode act opens a file using -- openFile and passes the resulting handle to the computation -- act. The handle will be closed on exit from withFile, -- whether by normal termination or by raising an exception. If closing -- the handle raises an exception, then this exception will be raised by -- withFile rather than any exception raised by act. -- -- Pre-release withFile :: (MonadIO m, MonadCatch m) => FilePath -> IOMode -> (Handle -> Stream m a) -> Stream m a -- | Generate a stream of bytes from a file specified by path. The stream -- ends when EOF is encountered. File is locked using multiple reader and -- single writer locking mode. -- -- Pre-release read :: (MonadIO m, MonadCatch m) => FilePath -> Stream m Word8 -- | readChunksWith size file reads a stream of arrays from file -- file. The maximum size of a single array is specified by -- size. The actual size read may be less than or equal to -- size. -- -- Pre-release readChunksWith :: (MonadIO m, MonadCatch m) => Int -> FilePath -> Stream m (Array Word8) -- | readChunks file reads a stream of arrays from file -- file. The maximum size of a single array is limited to -- defaultChunkSize. The actual size read may be less than -- defaultChunkSize. -- --
--   readChunks = readChunksWith defaultChunkSize
--   
-- -- Pre-release readChunks :: (MonadIO m, MonadCatch m) => FilePath -> Stream m (Array Word8) -- | Write a byte stream to a file. Accumulates the input in chunks of up -- to defaultChunkSize before writing to the IO device. -- -- Pre-release write :: (MonadIO m, MonadCatch m) => FilePath -> Fold m Word8 () -- | writeWith chunkSize handle writes the input stream to -- handle. Bytes in the input stream are collected into a buffer -- until we have a chunk of size chunkSize and then written to -- the IO device. -- -- Pre-release writeWith :: (MonadIO m, MonadCatch m) => Int -> FilePath -> Fold m Word8 () -- | Write a stream of chunks to a handle. Each chunk in the stream is -- written to the device as a separate IO request. -- -- Pre-release writeChunks :: (MonadIO m, MonadCatch m) => FilePath -> Fold m (Array a) () module Streamly.Internal.Data.MutByteArray -- | A lifted mutable byte array type wrapping MutableByteArray# -- RealWorld. This is a low level array used to back high level -- unboxed arrays and serialized data. data MutByteArray MutByteArray :: MutableByteArray# RealWorld -> MutByteArray -- | Deprecated: Please use MutByteArray instead type MutableByteArray = MutByteArray getMutableByteArray# :: MutByteArray -> MutableByteArray# RealWorld data PinnedState Pinned :: PinnedState Unpinned :: PinnedState -- | Return True if the array is allocated in pinned memory. isPinned :: MutByteArray -> Bool -- | Return a copy of the array in pinned memory if unpinned, else return -- the original array. pin :: MutByteArray -> IO MutByteArray -- | Return a copy of the array in unpinned memory if pinned, else return -- the original array. unpin :: MutByteArray -> IO MutByteArray empty :: MutByteArray newBytesAs :: PinnedState -> Int -> IO MutByteArray new :: Int -> IO MutByteArray pinnedNew :: Int -> IO MutByteArray pinnedNewAlignedBytes :: Int -> Int -> IO MutByteArray -- | Return the size of the array in bytes. sizeOfMutableByteArray :: MutByteArray -> IO Int -- | Put a sub range of a source array into a subrange of a destination -- array. This is not safe as it does not check the bounds of neither the -- src array nor the destination array. putSliceUnsafe :: MonadIO m => MutByteArray -> Int -> MutByteArray -> Int -> Int -> m () -- | Unsafe as it does not check whether the start offset and length -- supplied are valid inside the array. cloneSliceUnsafeAs :: MonadIO m => PinnedState -> Int -> Int -> MutByteArray -> m MutByteArray -- | cloneSliceUnsafe offset len arr clones a slice of the -- supplied array starting at the given offset and equal to the given -- length. cloneSliceUnsafe :: MonadIO m => Int -> Int -> MutByteArray -> m MutByteArray -- |
--   pinnedCloneSliceUnsafe offset len arr
--   
pinnedCloneSliceUnsafe :: MonadIO m => Int -> Int -> MutByteArray -> m MutByteArray -- | Use a MutByteArray as Ptr a. This is useful when we -- want to pass an array as a pointer to some operating system call or to -- a "safe" FFI call. -- -- If the array is not pinned it is copied to pinned memory before -- passing it to the monadic action. -- -- Performance Notes: Forces a copy if the array is not pinned. It -- is advised that the programmer keeps this in mind and creates a pinned -- array opportunistically before this operation occurs, to avoid the -- cost of a copy if possible. -- -- Unsafe because of direct pointer operations. The user must -- ensure that they are writing within the legal bounds of the array. -- -- Pre-release unsafePinnedAsPtr :: MonadIO m => MutByteArray -> (Ptr a -> m b) -> m b -- | For use with unsafe FFI functions. Does not force pin the array -- memory. unsafeAsPtr :: MonadIO m => MutByteArray -> (Ptr a -> m b) -> m b -- | Deprecated: Please use unsafePinnedAsPtr instead. asPtrUnsafe :: MonadIO m => MutByteArray -> (Ptr a -> m b) -> m b -- | Deprecated: Please use empty instead nil :: MutByteArray -- | The Unbox type class provides operations for serialization -- (unboxing) and deserialization (boxing) of fixed-length, non-recursive -- Haskell data types to and from their byte stream representation. -- -- Unbox uses fixed size encoding, therefore, size is independent of the -- value, it must be determined solely by the type. This restriction -- makes types with Unbox instances suitable for storing in -- arrays. Note that sum types may have multiple constructors of -- different sizes, the size of a sum type is computed as the maximum -- required by any constructor. -- -- The peekAt operation reads as many bytes from the mutable byte -- array as the size of the data type and builds a Haskell data -- type from these bytes. pokeAt operation converts a Haskell data -- type to its binary representation which consists of size -- bytes and then stores these bytes into the mutable byte array. These -- operations do not check the bounds of the array, the user of the type -- class is expected to check the bounds before peeking or poking. -- -- IMPORTANT: The serialized data's byte ordering remains the same as the -- host machine's byte order. Therefore, it can not be deserialized from -- host machines with a different byte ordering. -- -- Instances can be derived via Generics, Template Haskell, or written -- manually. Note that the data type must be non-recursive. WARNING! -- Generic and Template Haskell deriving, both hang for recursive data -- types. Deriving via Generics is more convenient but Template Haskell -- should be preferred over Generics for the following reasons: -- --
    --
  1. Instances derived via Template Haskell provide better and more -- reliable performance.
  2. --
  3. Generic deriving allows only 256 fields or constructor tags -- whereas template Haskell has no limit.
  4. --
-- -- Here is an example, for deriving an instance of this type class using -- generics: -- --
--   >>> import GHC.Generics (Generic)
--   
--   >>> :{
--   data Object = Object
--       { _int0 :: Int
--       , _int1 :: Int
--       } deriving Generic
--   :}
--   
-- --
--   >>> import Streamly.Data.MutByteArray (Unbox(..))
--   
--   >>> instance Unbox Object
--   
-- -- To derive the instance via Template Haskell: -- --
--   import Streamly.Data.MutByteArray (deriveUnbox)
--   $(deriveUnbox [d|instance Unbox Object|])
--   
-- -- See deriveUnbox for more information on deriving using Template -- Haskell. -- -- If you want to write the instance manually: -- --
--   >>> :{
--   instance Unbox Object where
--       sizeOf _ = 16
--       peekAt i arr = do
--          -- Check the array bounds
--           x0 <- peekAt i arr
--           x1 <- peekAt (i + 8) arr
--           return $ Object x0 x1
--       pokeAt i arr (Object x0 x1) = do
--          -- Check the array bounds
--           pokeAt i arr x0
--           pokeAt (i + 8) arr x1
--   :}
--   
class Unbox a -- | Get the size. Size cannot be zero, should be at least 1 byte. sizeOf :: Unbox a => Proxy a -> Int -- | Get the size. Size cannot be zero, should be at least 1 byte. sizeOf :: (Unbox a, SizeOfRep (Rep a)) => Proxy a -> Int -- | peekAt byte-offset array reads an element of type a -- from the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. peekAt :: Unbox a => Int -> MutByteArray -> IO a -- | peekAt byte-offset array reads an element of type a -- from the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. peekAt :: (Unbox a, Generic a, PeekRep (Rep a)) => Int -> MutByteArray -> IO a -- | Deprecated: Use peekAt. peekByteIndex :: Unbox a => Int -> MutByteArray -> IO a -- | pokeAt byte-offset array writes an element of type a -- to the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. pokeAt :: Unbox a => Int -> MutByteArray -> a -> IO () -- | Deprecated: Use pokeAt. pokeByteIndex :: Unbox a => Int -> MutByteArray -> a -> IO () -- | pokeAt byte-offset array writes an element of type a -- to the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. pokeAt :: (Unbox a, Generic a, PokeRep (Rep a)) => Int -> MutByteArray -> a -> IO () -- | A location inside a mutable byte array with the bound of the array. Is -- it cheaper to just get the bound using the size of the array whenever -- needed? data BoundedPtr BoundedPtr :: MutByteArray -> Int -> Int -> BoundedPtr -- | Chains peek functions that pass the current position to the next -- function newtype Peeker a Peeker :: Builder BoundedPtr IO a -> Peeker a read :: Unbox a => Peeker a readUnsafe :: Unbox a => Peeker a skipByte :: Peeker () runPeeker :: Peeker a -> BoundedPtr -> IO a pokeBoundedPtrUnsafe :: forall a. Unbox a => a -> BoundedPtr -> IO BoundedPtr pokeBoundedPtr :: forall a. Unbox a => a -> BoundedPtr -> IO BoundedPtr class PeekRep (f :: Type -> Type) peekRep :: PeekRep f => Peeker (f x) class PokeRep (f :: Type -> Type) pokeRep :: PokeRep f => f a -> BoundedPtr -> IO BoundedPtr -- | Implementation of sizeOf that works on the generic representation of -- an ADT. class SizeOfRep (f :: Type -> Type) sizeOfRep :: SizeOfRep f => f x -> Int genericSizeOf :: forall a. SizeOfRep (Rep a) => Proxy a -> Int genericPeekByteIndex :: (Generic a, PeekRep (Rep a)) => MutByteArray -> Int -> IO a genericPokeByteIndex :: (Generic a, PokeRep (Rep a)) => MutByteArray -> Int -> a -> IO () -- | Given an Unbox instance declaration splice without the methods -- (e.g. [d|instance Unbox a => Unbox (Maybe a)|]), generate -- an instance declaration including all the type class method -- implementations. -- -- Usage: -- --
--   $(deriveUnbox [d|instance Unbox a => Unbox (Maybe a)|])
--   
deriveUnbox :: Q [Dec] -> Q [Dec] -- | Simplified info about a Con. Omits deriving, strictness, and -- kind info. This is much nicer than consuming Con directly, -- because it unifies all the constructors into one. data DataCon DataCon :: Name -> [Name] -> Cxt -> [(Maybe Name, Type)] -> DataCon [dcName] :: DataCon -> Name [dcTvs] :: DataCon -> [Name] [dcCxt] :: DataCon -> Cxt [dcFields] :: DataCon -> [(Maybe Name, Type)] -- | Simplified info about a DataD. Omits deriving, strictness, kind -- info, and whether it's data or newtype. data DataType DataType :: Name -> [Name] -> Cxt -> [DataCon] -> DataType [dtName] :: DataType -> Name [dtTvs] :: DataType -> [Name] [dtCxt] :: DataType -> Cxt [dtCons] :: DataType -> [DataCon] -- | Reify the given data or newtype declaration, and yields its -- DataType representation. reifyDataType :: Name -> Q DataType -- | The Serialize type class provides operations for serialization -- and deserialization of general Haskell data types to and from their -- byte stream representation. -- -- Unlike Unbox, Serialize uses variable length encoding, -- therefore, it can serialize recursive and variable length data types -- like lists, or variable length sum types where the length of the value -- may vary depending on a particular constructor. For variable length -- data types the length is encoded along with the data. -- -- The deserializeAt operation reads bytes from the mutable byte -- array and builds a Haskell data type from these bytes, the number of -- bytes it reads depends on the type and the encoded value it is -- reading. serializeAt operation converts a Haskell data type to -- its binary representation which must consist of as many bytes as added -- by the addSizeTo operation for that value and then stores -- these bytes into the mutable byte array. The programmer is expected to -- use the addSizeTo operation and allocate an array of -- sufficient length before calling serializeAt. -- -- IMPORTANT: The serialized data's byte ordering remains the same as the -- host machine's byte order. Therefore, it can not be deserialized from -- host machines with a different byte ordering. -- -- Instances can be derived via Template Haskell, or written manually. -- -- Here is an example, for deriving an instance of this type class using -- template Haskell: -- --
--   >>> :{
--   data Object = Object
--       { _obj1 :: [Int]
--       , _obj2 :: Int
--       }
--   :}
--   
-- --
--   import Streamly.Data.MutByteArray (deriveSerialize)
--   $(deriveSerialize [d|instance Serialize Object|])
--   
-- -- See deriveSerialize and deriveSerializeWith for more -- information on deriving using Template Haskell. -- -- Here is an example of a manual instance. -- --
--   >>> import Streamly.Data.MutByteArray (Serialize(..))
--   
-- --
--   >>> :{
--   instance Serialize Object where
--       addSizeTo acc obj = addSizeTo (addSizeTo acc (_obj1 obj)) (_obj2 obj)
--       deserializeAt i arr len = do
--            -- Check the array bounds before reading
--           (i1, x0) <- deserializeAt i arr len
--           (i2, x1) <- deserializeAt i1 arr len
--           pure (i2, Object x0 x1)
--       serializeAt i arr (Object x0 x1) = do
--           i1 <- serializeAt i arr x0
--           i2 <- serializeAt i1 arr x1
--           pure i2
--   :}
--   
class Serialize a -- | addSizeTo accum value returns accum incremented by -- the size of the serialized representation of value in bytes. -- Size cannot be zero. It should be at least 1 byte. addSizeTo :: Serialize a => Int -> a -> Int -- | deserializeAt byte-offset array arrayLen deserializes a value -- from the given byte-offset in the array. Returns a tuple consisting of -- the next byte-offset and the deserialized value. -- -- The arrayLen passed is the entire length of the input buffer. It is to -- be used to check if we would overflow the input buffer when -- deserializing. -- -- Throws an exception if the operation would exceed the supplied -- arrayLen. deserializeAt :: Serialize a => Int -> MutByteArray -> Int -> IO (Int, a) -- | serializeAt byte-offset array value writes the serialized -- representation of the value in the array at the given -- byte-offset. Returns the next byte-offset. -- -- This is an unsafe operation, the programmer must ensure that the array -- has enough space available to serialize the value as determined by the -- addSizeTo operation. serializeAt :: Serialize a => Int -> MutByteArray -> a -> IO Int -- | Given an Serialize instance declaration splice without the -- methods (e.g. [d|instance Serialize a => Serialize (Maybe -- a)|]), generate an instance declaration including all the type -- class method implementations. -- --
--   >>> deriveSerialize = deriveSerializeWith id
--   
-- -- Usage: -- --
--   $(deriveSerialize
--         [d|instance Serialize a => Serialize (Maybe a)|])
--   
deriveSerialize :: Q [Dec] -> Q [Dec] -- | deriveSerializeWith config-modifier instance-dec generates a -- template Haskell splice consisting of a declaration of a -- Serialize instance. instance-dec is a template Haskell -- declaration splice consisting of a standard Haskell instance -- declaration without the type class methods (e.g. [d|instance -- Serialize a => Serialize (Maybe a)|]). -- -- The type class methods for the given instance are generated according -- to the supplied config-modifier parameter. See -- SerializeConfig for default configuration settings. -- -- Usage: -- --
--   $(deriveSerializeWith
--         ( inlineSerializeAt (Just NoInline)
--         . inlineDeserializeAt (Just NoInline)
--         )
--         [d|instance Serialize a => Serialize (Maybe a)|])
--   
deriveSerializeWith :: (SerializeConfig -> SerializeConfig) -> Q [Dec] -> Q [Dec] -- | Configuration to control how the Serialize instance is -- generated. The configuration is opaque and is modified by composing -- config modifier functions, for example: -- --
--   >>> (inlineSerializeAt (Just NoInline)) . (inlineSerializeAt (Just Inlinable))
--   
-- -- The default configuration settings are: -- -- -- -- The following experimental options are also available: -- -- data SerializeConfig SerializeConfig :: Maybe Inline -> Maybe Inline -> Maybe Inline -> Bool -> Bool -> SerializeConfig [cfgInlineSize] :: SerializeConfig -> Maybe Inline [cfgInlineSerialize] :: SerializeConfig -> Maybe Inline [cfgInlineDeserialize] :: SerializeConfig -> Maybe Inline [cfgConstructorTagAsString] :: SerializeConfig -> Bool [cfgRecordSyntaxWithHeader] :: SerializeConfig -> Bool serializeConfig :: SerializeConfig -- | How should we inline the addSizeTo function? The default is -- Nothing which means left to the compiler. Forcing inline on -- addSizeTo function actually worsens some benchmarks and -- improves none. inlineAddSizeTo :: Maybe Inline -> SerializeConfig -> SerializeConfig -- | How should we inline the serialize function? The default -- 'Just Inline'. However, aggressive inlining can bloat the code and -- increase in compilation times when there are big functions and too -- many nesting levels so you can change it accordingly. A Nothing -- value leaves the decision to the compiler. inlineSerializeAt :: Maybe Inline -> SerializeConfig -> SerializeConfig -- | How should we inline the deserialize function? See guidelines -- in inlineSerializeAt. inlineDeserializeAt :: Maybe Inline -> SerializeConfig -> SerializeConfig -- | Experimental -- -- In sum types, use Latin-1 encoded original constructor names rather -- than binary values to identify constructors. This option is not -- applicable to product types. -- -- This option enables the following behavior: -- -- -- -- Note that if you change a type, change the semantics of a type, or -- delete a field and add a new field with the same name, deserialization -- of old data may result in silent unexpected behavior. -- -- This option has to be the same on both encoding and decoding side. -- -- The default is False. encodeConstrNames :: Bool -> SerializeConfig -> SerializeConfig -- | Experimental -- -- In explicit record types, use Latin-1 encoded record field names -- rather than binary values to identify the record fields. Note that -- this option is not applicable to sum types. Also, it does not work on -- a product type which is not a record, because there are no field names -- to begin with. -- -- This option enables the following behavior: -- -- -- -- This option has to be the same on both encoding and decoding side. -- -- There is a constant performance overhead proportional to the total -- length of the record field names and the number of record fields. -- -- The default is False. encodeRecordFields :: Bool -> SerializeConfig -> SerializeConfig data TypeOfType UnitType :: Name -> TypeOfType TheType :: SimpleDataCon -> TypeOfType MultiType :: [SimpleDataCon] -> TypeOfType typeOfType :: Type -> [DataCon] -> TypeOfType data SimpleDataCon SimpleDataCon :: Name -> [Field] -> SimpleDataCon simplifyDataCon :: DataCon -> SimpleDataCon type Field = (Maybe Name, Type) mkFieldName :: Int -> Name isUnitType :: [DataCon] -> Bool isRecordSyntax :: SimpleDataCon -> Bool c2w :: Char -> Word8 wListToString :: [Word8] -> String xorCmp :: [Word8] -> Name -> Name -> Q Exp serializeW8List :: Name -> Name -> [Word8] -> Q Exp litIntegral :: Integral a => a -> Q Exp litProxy :: Unbox a => Proxy a -> Q Exp matchConstructor :: Name -> Int -> Q Exp -> Q Match openConstructor :: Name -> Int -> Q Pat makeI :: Int -> Name makeN :: Int -> Name makeA :: Int -> Name int_w8 :: Int -> Word8 int_w32 :: Int -> Word32 w32_int :: Word32 -> Int w8_int :: Word8 -> Int _acc :: Name _arr :: Name _endOffset :: Name _initialOffset :: Name _x :: Name _tag :: Name _val :: Name errorUnsupported :: String -> a errorUnimplemented :: a mkDeserializeExprOne :: Name -> SimpleDataCon -> Q Exp mkSerializeExprFields :: Name -> [Field] -> Q Exp mkRecSerializeExpr :: Name -> SimpleDataCon -> Q Exp mkRecDeserializeExpr :: Name -> Name -> Name -> SimpleDataCon -> Q Exp mkRecSizeOfExpr :: SimpleDataCon -> Q Exp conUpdateFuncDec :: Name -> [Field] -> Q [Dec] mkDeserializeKeysDec :: Name -> Name -> SimpleDataCon -> Q [Dec] mkRecSerializeExpr :: Name -> SimpleDataCon -> Q Exp mkRecDeserializeExpr :: Name -> Name -> Name -> SimpleDataCon -> Q Exp mkRecSizeOfExpr :: SimpleDataCon -> Q Exp conUpdateFuncDec :: Name -> [Field] -> Q [Dec] mkDeserializeKeysDec :: Name -> Name -> SimpleDataCon -> Q [Dec] instance Streamly.Internal.Data.Serialize.Type.Serialize Streamly.Internal.Data.MutByteArray.LiftedInteger instance Streamly.Internal.Data.Serialize.Type.Serialize a => Streamly.Internal.Data.Serialize.Type.Serialize (GHC.Maybe.Maybe a) instance (Streamly.Internal.Data.Serialize.Type.Serialize a, Streamly.Internal.Data.Serialize.Type.Serialize b) => Streamly.Internal.Data.Serialize.Type.Serialize (Data.Either.Either a b) instance Streamly.Internal.Data.Serialize.Type.Serialize (Data.Proxy.Proxy a) instance Streamly.Internal.Data.Serialize.Type.Serialize GHC.Num.Integer.Integer -- | A low level byte Array type MutByteArray, along with type -- classes Unbox and Serialize for fast binary -- serialization and deserialization of Haskell values. Serialization, -- deserialization performance is similar to, and in some cases many -- times better than the store package. Conceptually, the -- Serialize type class works in the same way as store. -- -- Serialize instances are configurable to use constructor names (see -- encodeConstrNames), record field names (see -- encodeRecordFields) instead of binary encoded values. This is -- an experimental feature which allows JSON like properties with faster -- speed. For example, you can change the order of constructors or record -- fields without affecting serialized value. -- -- Higher level unboxed array modules Streamly.Data.Array and -- Streamly.Data.MutArray are built on top of this module. Unboxed -- arrays are essentially serialized Haskell values. Array modules -- provide higher level serialization routines like -- pinnedSerialize and deserialize from the -- Streamly.Internal.Data.Array module. -- --

Mutable Byte Array

-- -- MutByteArray is a primitive mutable array in the IO monad. -- Unbox and Serialize type classes use this primitive -- array to serialize data to and deserialize it from. This array is used -- to build higher level unboxed array types MutArray and -- Array. -- --

Using Unbox

-- -- The Unbox type class is simple and used to serialize -- non-recursive fixed size data types. This type class is primarily used -- to implement unboxed arrays. Unboxed arrays are just a sequence of -- serialized fixed length Haskell data types. Instances of this type -- class can be derived using Generic or template haskell based -- deriving functions provided in this module. -- -- Writing a data type to an array using the array creation routines in -- Streamly.Data.Array or Streamly.Data.MutArray (e.g. -- writeN or fromListN), serializes the type to the -- array. Similarly, reading the data type from the array deserializes -- it. You can also serialize and deserialize directly to and from a -- MutByteArray, using the type class methods. -- --

Using Serialize

-- -- The Serialize type class is a superset of the Unbox type -- class, it can serialize variable length data types as well e.g. -- Haskell lists. Use deriveSerialize to derive the instances of -- the type class automatically and then use the type class methods to -- serialize and deserialize to and from a MutByteArray. -- -- See pinnedSerialize and deserialize for Array -- type based serialization. -- --

Comparing serialized values

-- -- When using the Unbox type class the same value may result in -- differing serialized bytes because of unused uninitialized data in -- case of sum types. Therefore, byte comparison of serialized values is -- not reliable. -- -- However, the Serialize type class guarantees that the -- serialized values are always exactly the same and byte comparison of -- serialized is reliable. module Streamly.Data.MutByteArray -- | A lifted mutable byte array type wrapping MutableByteArray# -- RealWorld. This is a low level array used to back high level -- unboxed arrays and serialized data. data MutByteArray -- | Return True if the array is allocated in pinned memory. isPinned :: MutByteArray -> Bool -- | Return a copy of the array in pinned memory if unpinned, else return -- the original array. pin :: MutByteArray -> IO MutByteArray -- | Return a copy of the array in unpinned memory if pinned, else return -- the original array. unpin :: MutByteArray -> IO MutByteArray new :: Int -> IO MutByteArray pinnedNew :: Int -> IO MutByteArray -- | The Unbox type class provides operations for serialization -- (unboxing) and deserialization (boxing) of fixed-length, non-recursive -- Haskell data types to and from their byte stream representation. -- -- Unbox uses fixed size encoding, therefore, size is independent of the -- value, it must be determined solely by the type. This restriction -- makes types with Unbox instances suitable for storing in -- arrays. Note that sum types may have multiple constructors of -- different sizes, the size of a sum type is computed as the maximum -- required by any constructor. -- -- The peekAt operation reads as many bytes from the mutable byte -- array as the size of the data type and builds a Haskell data -- type from these bytes. pokeAt operation converts a Haskell data -- type to its binary representation which consists of size -- bytes and then stores these bytes into the mutable byte array. These -- operations do not check the bounds of the array, the user of the type -- class is expected to check the bounds before peeking or poking. -- -- IMPORTANT: The serialized data's byte ordering remains the same as the -- host machine's byte order. Therefore, it can not be deserialized from -- host machines with a different byte ordering. -- -- Instances can be derived via Generics, Template Haskell, or written -- manually. Note that the data type must be non-recursive. WARNING! -- Generic and Template Haskell deriving, both hang for recursive data -- types. Deriving via Generics is more convenient but Template Haskell -- should be preferred over Generics for the following reasons: -- --
    --
  1. Instances derived via Template Haskell provide better and more -- reliable performance.
  2. --
  3. Generic deriving allows only 256 fields or constructor tags -- whereas template Haskell has no limit.
  4. --
-- -- Here is an example, for deriving an instance of this type class using -- generics: -- --
--   >>> import GHC.Generics (Generic)
--   
--   >>> :{
--   data Object = Object
--       { _int0 :: Int
--       , _int1 :: Int
--       } deriving Generic
--   :}
--   
-- --
--   >>> import Streamly.Data.MutByteArray (Unbox(..))
--   
--   >>> instance Unbox Object
--   
-- -- To derive the instance via Template Haskell: -- --
--   import Streamly.Data.MutByteArray (deriveUnbox)
--   $(deriveUnbox [d|instance Unbox Object|])
--   
-- -- See deriveUnbox for more information on deriving using Template -- Haskell. -- -- If you want to write the instance manually: -- --
--   >>> :{
--   instance Unbox Object where
--       sizeOf _ = 16
--       peekAt i arr = do
--          -- Check the array bounds
--           x0 <- peekAt i arr
--           x1 <- peekAt (i + 8) arr
--           return $ Object x0 x1
--       pokeAt i arr (Object x0 x1) = do
--          -- Check the array bounds
--           pokeAt i arr x0
--           pokeAt (i + 8) arr x1
--   :}
--   
class Unbox a -- | Get the size. Size cannot be zero, should be at least 1 byte. sizeOf :: Unbox a => Proxy a -> Int -- | Get the size. Size cannot be zero, should be at least 1 byte. sizeOf :: (Unbox a, SizeOfRep (Rep a)) => Proxy a -> Int -- | peekAt byte-offset array reads an element of type a -- from the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. peekAt :: Unbox a => Int -> MutByteArray -> IO a -- | peekAt byte-offset array reads an element of type a -- from the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. peekAt :: (Unbox a, Generic a, PeekRep (Rep a)) => Int -> MutByteArray -> IO a -- | Deprecated: Use peekAt. peekByteIndex :: Unbox a => Int -> MutByteArray -> IO a -- | pokeAt byte-offset array writes an element of type a -- to the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. pokeAt :: Unbox a => Int -> MutByteArray -> a -> IO () -- | Deprecated: Use pokeAt. pokeByteIndex :: Unbox a => Int -> MutByteArray -> a -> IO () -- | pokeAt byte-offset array writes an element of type a -- to the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. pokeAt :: (Unbox a, Generic a, PokeRep (Rep a)) => Int -> MutByteArray -> a -> IO () -- | Given an Unbox instance declaration splice without the methods -- (e.g. [d|instance Unbox a => Unbox (Maybe a)|]), generate -- an instance declaration including all the type class method -- implementations. -- -- Usage: -- --
--   $(deriveUnbox [d|instance Unbox a => Unbox (Maybe a)|])
--   
deriveUnbox :: Q [Dec] -> Q [Dec] -- | The Serialize type class provides operations for serialization -- and deserialization of general Haskell data types to and from their -- byte stream representation. -- -- Unlike Unbox, Serialize uses variable length encoding, -- therefore, it can serialize recursive and variable length data types -- like lists, or variable length sum types where the length of the value -- may vary depending on a particular constructor. For variable length -- data types the length is encoded along with the data. -- -- The deserializeAt operation reads bytes from the mutable byte -- array and builds a Haskell data type from these bytes, the number of -- bytes it reads depends on the type and the encoded value it is -- reading. serializeAt operation converts a Haskell data type to -- its binary representation which must consist of as many bytes as added -- by the addSizeTo operation for that value and then stores -- these bytes into the mutable byte array. The programmer is expected to -- use the addSizeTo operation and allocate an array of -- sufficient length before calling serializeAt. -- -- IMPORTANT: The serialized data's byte ordering remains the same as the -- host machine's byte order. Therefore, it can not be deserialized from -- host machines with a different byte ordering. -- -- Instances can be derived via Template Haskell, or written manually. -- -- Here is an example, for deriving an instance of this type class using -- template Haskell: -- --
--   >>> :{
--   data Object = Object
--       { _obj1 :: [Int]
--       , _obj2 :: Int
--       }
--   :}
--   
-- --
--   import Streamly.Data.MutByteArray (deriveSerialize)
--   $(deriveSerialize [d|instance Serialize Object|])
--   
-- -- See deriveSerialize and deriveSerializeWith for more -- information on deriving using Template Haskell. -- -- Here is an example of a manual instance. -- --
--   >>> import Streamly.Data.MutByteArray (Serialize(..))
--   
-- --
--   >>> :{
--   instance Serialize Object where
--       addSizeTo acc obj = addSizeTo (addSizeTo acc (_obj1 obj)) (_obj2 obj)
--       deserializeAt i arr len = do
--            -- Check the array bounds before reading
--           (i1, x0) <- deserializeAt i arr len
--           (i2, x1) <- deserializeAt i1 arr len
--           pure (i2, Object x0 x1)
--       serializeAt i arr (Object x0 x1) = do
--           i1 <- serializeAt i arr x0
--           i2 <- serializeAt i1 arr x1
--           pure i2
--   :}
--   
class Serialize a -- | addSizeTo accum value returns accum incremented by -- the size of the serialized representation of value in bytes. -- Size cannot be zero. It should be at least 1 byte. addSizeTo :: Serialize a => Int -> a -> Int -- | deserializeAt byte-offset array arrayLen deserializes a value -- from the given byte-offset in the array. Returns a tuple consisting of -- the next byte-offset and the deserialized value. -- -- The arrayLen passed is the entire length of the input buffer. It is to -- be used to check if we would overflow the input buffer when -- deserializing. -- -- Throws an exception if the operation would exceed the supplied -- arrayLen. deserializeAt :: Serialize a => Int -> MutByteArray -> Int -> IO (Int, a) -- | serializeAt byte-offset array value writes the serialized -- representation of the value in the array at the given -- byte-offset. Returns the next byte-offset. -- -- This is an unsafe operation, the programmer must ensure that the array -- has enough space available to serialize the value as determined by the -- addSizeTo operation. serializeAt :: Serialize a => Int -> MutByteArray -> a -> IO Int -- | Configuration to control how the Serialize instance is -- generated. The configuration is opaque and is modified by composing -- config modifier functions, for example: -- --
--   >>> (inlineSerializeAt (Just NoInline)) . (inlineSerializeAt (Just Inlinable))
--   
-- -- The default configuration settings are: -- -- -- -- The following experimental options are also available: -- -- data SerializeConfig -- | How should we inline the addSizeTo function? The default is -- Nothing which means left to the compiler. Forcing inline on -- addSizeTo function actually worsens some benchmarks and -- improves none. inlineAddSizeTo :: Maybe Inline -> SerializeConfig -> SerializeConfig -- | How should we inline the serialize function? The default -- 'Just Inline'. However, aggressive inlining can bloat the code and -- increase in compilation times when there are big functions and too -- many nesting levels so you can change it accordingly. A Nothing -- value leaves the decision to the compiler. inlineSerializeAt :: Maybe Inline -> SerializeConfig -> SerializeConfig -- | How should we inline the deserialize function? See guidelines -- in inlineSerializeAt. inlineDeserializeAt :: Maybe Inline -> SerializeConfig -> SerializeConfig -- | Given an Serialize instance declaration splice without the -- methods (e.g. [d|instance Serialize a => Serialize (Maybe -- a)|]), generate an instance declaration including all the type -- class method implementations. -- --
--   >>> deriveSerialize = deriveSerializeWith id
--   
-- -- Usage: -- --
--   $(deriveSerialize
--         [d|instance Serialize a => Serialize (Maybe a)|])
--   
deriveSerialize :: Q [Dec] -> Q [Dec] -- | deriveSerializeWith config-modifier instance-dec generates a -- template Haskell splice consisting of a declaration of a -- Serialize instance. instance-dec is a template Haskell -- declaration splice consisting of a standard Haskell instance -- declaration without the type class methods (e.g. [d|instance -- Serialize a => Serialize (Maybe a)|]). -- -- The type class methods for the given instance are generated according -- to the supplied config-modifier parameter. See -- SerializeConfig for default configuration settings. -- -- Usage: -- --
--   $(deriveSerializeWith
--         ( inlineSerializeAt (Just NoInline)
--         . inlineDeserializeAt (Just NoInline)
--         )
--         [d|instance Serialize a => Serialize (Maybe a)|])
--   
deriveSerializeWith :: (SerializeConfig -> SerializeConfig) -> Q [Dec] -> Q [Dec] -- | Use Streamly.Data.Parser instead. -- -- Fold a stream of foreign arrays. Fold m a b in this module -- works on a stream of "Array a" and produces an output of type -- b. -- -- Though Fold m a b in this module works on a stream of -- Array a it is different from Data.Fold m (Array a) -- b. While the latter works on arrays as a whole treating them as -- atomic elements, the folds in this module can work on the stream of -- arrays as if it is an element stream with all the arrays coalesced -- together. This module allows adapting the element stream folds in -- Data.Fold to correctly work on an array stream as if it is an element -- stream. For example: -- --
--   > import qualified Streamly.Data.Fold as Fold
--   > import qualified Streamly.Internal.Data.Array.Stream as ArrayStream
--   > import qualified Streamly.Internal.Data.Fold.Chunked as ChunkFold
--   > import qualified Streamly.Data.Stream as Stream
--   > import qualified Streamly.Data.StreamK as StreamK
--   
-- --
--   > f = ChunkFold.fromFold (Fold.take 7 Fold.toList)
--   > s = Stream.chunksOf 5 $ Stream.fromList "hello world"
--   > ArrayStream.runArrayFold f (StreamK.fromStream s)
--   
-- -- Right "hello w" -- | Deprecated: Please use Streamly.Data.Parser instead. module Streamly.Internal.Data.Fold.Chunked -- | Array stream fold. -- -- An array stream fold is basically an array stream Parser that -- does not fail. In case of array stream folds the count in -- Partial, Continue and Done is a count of elements -- that includes the leftover element count in the array that is -- currently being processed by the parser. If none of the elements is -- consumed by the parser the count is at least the whole array length. -- If the whole array is consumed by the parser then the count will be 0. -- -- Pre-release newtype ChunkFold m a b ChunkFold :: Parser (Array a) m b -> ChunkFold m a b -- | Convert an element Fold into an array stream fold. -- -- Pre-release fromFold :: forall m a b. (MonadIO m, Unbox a) => Fold m a b -> ChunkFold m a b -- | Adapt an array stream fold. -- -- Pre-release adaptFold :: forall m a b. MonadIO m => Fold m (Array a) b -> ChunkFold m a b -- | Convert an element Parser into an array stream fold. If the -- parser fails the fold would throw an exception. -- -- Pre-release fromParser :: forall m a b. (MonadIO m, Unbox a) => Parser a m b -> ChunkFold m a b -- | Convert an element Parser into an array stream fold. If the -- parser fails the fold would throw an exception. -- -- Pre-release fromParserD :: forall m a b. (MonadIO m, Unbox a) => Parser a m b -> ChunkFold m a b -- | Map a monadic function on the output of a fold. -- -- Pre-release rmapM :: Monad m => (b -> m c) -> ChunkFold m a b -> ChunkFold m a c -- | A fold that always yields a pure value without consuming any input. -- -- Pre-release fromPure :: Monad m => b -> ChunkFold m a b -- | A fold that always yields the result of an effectful action without -- consuming any input. -- -- Pre-release fromEffect :: Monad m => m b -> ChunkFold m a b -- | Applies two folds sequentially on the input stream and combines their -- results using the supplied function. -- -- Pre-release splitWith :: Monad m => (a -> b -> c) -> ChunkFold m x a -> ChunkFold m x b -> ChunkFold m x c -- | Applies a fold on the input stream, generates the next fold from the -- output of the previously applied fold and then applies that fold. -- -- Pre-release concatMap :: Monad m => (b -> ChunkFold m a c) -> ChunkFold m a b -> ChunkFold m a c -- | Take n array elements (a) from a stream of arrays -- (Array a). take :: forall m a b. (Monad m, Unbox a) => Int -> ChunkFold m a b -> ChunkFold m a b instance GHC.Base.Functor m => GHC.Base.Functor (Streamly.Internal.Data.Fold.Chunked.ChunkFold m a) instance GHC.Base.Monad m => GHC.Base.Applicative (Streamly.Internal.Data.Fold.Chunked.ChunkFold m a) instance GHC.Base.Monad m => GHC.Base.Monad (Streamly.Internal.Data.Fold.Chunked.ChunkFold m a) -- | Combinators to efficiently manipulate streams of immutable arrays. -- -- We can either push these in the MutArray module with a "chunks" prefix -- or keep this as a separate module and release it. -- | Deprecated: Please use Streamly.Internal.Data.Array -- instead. module Streamly.Internal.Data.Array.Stream -- | chunksOf n stream groups the elements in the input stream -- into arrays of n elements each. -- -- Same as the following but may be more efficient: -- --
--   >>> chunksOf n = Stream.foldMany (Array.writeN n)
--   
-- -- Pre-release chunksOf :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> Stream m (Array a) -- | Like chunksOf but creates pinned arrays. pinnedChunksOf :: forall m a. (MonadIO m, Unbox a) => Int -> Stream m a -> Stream m (Array a) -- | Deprecated: Please use buildChunks instead. bufferChunks :: (MonadIO m, Unbox a) => Stream m a -> m (StreamK m (Array a)) -- | Convert a stream of arrays into a stream of their elements. -- --
--   >>> concat = Stream.unfoldMany Array.reader
--   
concat :: (Monad m, Unbox a) => Stream m (Array a) -> Stream m a -- | Deprecated: Please use "unfoldMany reader" instead. flattenArrays :: forall m a. (MonadIO m, Unbox a) => Stream m (Array a) -> Stream m a -- | Convert a stream of arrays into a stream of their elements reversing -- the contents of each array before flattening. -- --
--   >>> concatRev = Stream.unfoldMany Array.readerRev
--   
concatRev :: forall m a. (Monad m, Unbox a) => Stream m (Array a) -> Stream m a -- | Deprecated: Please use "unfoldMany readerRev" instead. flattenArraysRev :: forall m a. (MonadIO m, Unbox a) => Stream m (Array a) -> Stream m a -- | Insert the given element between arrays and flatten. -- --
--   >>> interpose x = Stream.interpose x Array.reader
--   
interpose :: (Monad m, Unbox a) => a -> Stream m (Array a) -> Stream m a -- | Insert the given element after each array and flatten. This is similar -- to unlines. -- --
--   >>> interposeSuffix x = Stream.interposeSuffix x Array.reader
--   
interposeSuffix :: forall m a. (Monad m, Unbox a) => a -> Stream m (Array a) -> Stream m a -- | Insert the given array after each array and flatten. -- --
--   >>> intercalateSuffix = Stream.intercalateSuffix Array.reader
--   
intercalateSuffix :: (Monad m, Unbox a) => Array a -> Stream m (Array a) -> Stream m a unlines :: forall m a. (MonadIO m, Unbox a) => a -> Stream m (Array a) -> Stream m a -- | Fold an array stream using the supplied Fold. Returns the fold -- result and the unconsumed stream. -- --
--   foldBreak f = runArrayFoldBreak (ChunkFold.fromFold f)
--   
-- -- Instead of using this we can adapt the fold to ParserK and use -- parseBreakChunks instead. ParserK allows composing using Monad as -- well. -- --
--   foldBreak f s =
--         fmap (first (fromRight undefined))
--       $ K.parseBreakChunks (ParserK.adaptC (PR.fromFold f)) s
--   
-- -- We can compare perf and remove this one or define it in terms of that. -- -- Internal foldBreak :: (MonadIO m, Unbox a) => Fold m a b -> StreamK m (Array a) -> m (b, StreamK m (Array a)) foldBreakD :: forall m a b. (MonadIO m, Unbox a) => Fold m a b -> Stream m (Array a) -> m (b, Stream m (Array a)) -- | Parse an array stream using the supplied Parser. Returns the -- parse result and the unconsumed stream. Throws ParseError if -- the parse fails. -- --
--   > parseBreak p = K.parseBreakChunks (ParserK.adaptC p)
--   
-- -- This is redundant and we can just use parseBreakChunks, as ParserK can -- be composed using Monad. The only advantage of this is that we do not -- need to adapt. -- -- We can compare perf and remove this one or define it in terms of that. -- -- Internal parseBreak :: (MonadIO m, Unbox a) => Parser a m b -> StreamK m (Array a) -> m (Either ParseError b, StreamK m (Array a)) -- | Run a ParserK over a chunked StreamK and return the -- parse result and the remaining Stream. parseBreakChunks :: (Monad m, Unbox a) => ParserK (Array a) m b -> StreamK m (Array a) -> m (Either ParseError b, StreamK m (Array a)) parseChunks :: (Monad m, Unbox a) => ParserK (Array a) m b -> StreamK m (Array a) -> m (Either ParseError b) -- | Fold an array stream using the supplied array stream Fold. -- -- Pre-release runArrayFold :: (MonadIO m, Unbox a) => ChunkFold m a b -> StreamK m (Array a) -> m (Either ParseError b) -- | Like fold but also returns the remaining stream. -- -- Pre-release runArrayFoldBreak :: (MonadIO m, Unbox a) => ChunkFold m a b -> StreamK m (Array a) -> m (Either ParseError b, StreamK m (Array a)) -- | Note that this is not the same as using a Parser (Array a) m -- b with the regular "Streamly.Internal.Data.IsStream.parse" -- function. The regular parse would consume the input arrays as single -- unit. This parser parses in the way as described in the ChunkFold -- module. The input arrays are treated as n element units and -- can be consumed partially. The remaining elements are inserted in the -- source stream as an array. runArrayParserDBreak :: forall m a b. (MonadIO m, Unbox a) => Parser (Array a) m b -> Stream m (Array a) -> m (Either ParseError b, Stream m (Array a)) -- | Apply an ChunkFold repeatedly on an array stream and emit the -- fold outputs in the output stream. -- -- See "Streamly.Data.Stream.foldMany" for more details. -- -- Pre-release runArrayFoldMany :: (Monad m, Unbox a) => ChunkFold m a b -> StreamK m (Array a) -> StreamK m (Either ParseError b) -- | Given a stream of arrays, splice them all together to generate a -- single array. The stream must be finite. toArray :: (MonadIO m, Unbox a) => Stream m (Array a) -> m (Array a) lpackArraysChunksOf :: (MonadIO m, Unbox a) => Int -> Fold m (Array a) () -> Fold m (Array a) () -- | Coalesce adjacent arrays in incoming stream to form bigger arrays of a -- maximum specified size in bytes. compact :: (MonadIO m, Unbox a) => Int -> Stream m (Array a) -> Stream m (Array a) -- | Split a stream of arrays on a given separator byte, dropping the -- separator and coalescing all the arrays between two separators into a -- single array. splitOn :: MonadIO m => Word8 -> Stream m (Array Word8) -> Stream m (Array Word8) splitOnSuffix :: MonadIO m => Word8 -> Stream m (Array Word8) -> Stream m (Array Word8) -- | See the general notes about parsing in the Streamly.Data.Parser -- module. This module implements a using Continuation Passing Style -- (CPS) wrapper over the Streamly.Data.Parser module. It is as -- fast or faster than attoparsec. -- --

Parser vs ParserK

-- -- ParserK is preferred over Parser when extensive -- applicative, alternative and monadic composition is required, or when -- recursive or dynamic composition of parsers is required. The -- Parser type fuses statically and creates efficient loops -- whereas ParserK uses function call based composition and has -- comparatively larger runtime overhead but it is better suited to the -- specific use cases mentioned above. ParserK also allows to -- efficient parse a stream of arrays, it can also break the input stream -- into a parse result and remaining stream so that the stream can be -- parsed independently in segments. -- --

Using ParserK

-- -- All the parsers from the Streamly.Data.Parser module can be -- adapted to ParserK using the adaptC, adapt, and -- adaptCG combinators. -- -- parseChunks runs a parser on a stream of unboxed arrays, this -- is the preferred and most efficient way to parse chunked input. The -- more general parseBreakChunks function returns the remaining -- stream as well along with the parse result. There are -- parseChunksGeneric, parseBreakChunksGeneric as well to -- run parsers on boxed arrays. parse, parseBreak run -- parsers on a stream of individual elements instead of stream of -- arrays. -- --

Monadic Composition

-- -- Monad composition can be used for lookbehind parsers, we can -- dynamically compose new parsers based on the results of the previously -- parsed values. -- -- If we have to parse "a9" or "9a" but not "99" or "aa" we can use the -- following non-monadic, backtracking parser: -- --
--   >>> digits p1 p2 = ((:) <$> p1 <*> ((:) <$> p2 <*> pure []))
--   
--   >>> :{
--   backtracking :: Monad m => ParserK Char m String
--   backtracking = ParserK.adapt $
--       digits (Parser.satisfy isDigit) (Parser.satisfy isAlpha)
--       <|>
--       digits (Parser.satisfy isAlpha) (Parser.satisfy isDigit)
--   :}
--   
-- -- We know that if the first parse resulted in a digit at the first place -- then the second parse is going to fail. However, we waste that -- information and parse the first character again in the second parse -- only to know that it is not an alphabetic char. By using lookbehind in -- a Monad composition we can avoid redundant work: -- --
--   >>> data DigitOrAlpha = Digit Char | Alpha Char
--   
-- --
--   >>> :{
--   lookbehind :: Monad m => ParserK Char m String
--   lookbehind = do
--       x1 <- ParserK.adapt $
--                Digit <$> Parser.satisfy isDigit
--            <|> Alpha <$> Parser.satisfy isAlpha
--       -- Note: the parse depends on what we parsed already
--       x2 <- ParserK.adapt $
--             case x1 of
--                Digit _ -> Parser.satisfy isAlpha
--                Alpha _ -> Parser.satisfy isDigit
--       return $ case x1 of
--           Digit x -> [x,x2]
--           Alpha x -> [x,x2]
--   :}
--   
-- --

Experimental APIs

-- -- Please refer to Streamly.Internal.Data.ParserK for functions -- that have not yet been released. module Streamly.Data.ParserK -- | A continuation passing style parser representation. A continuation of -- Steps, each step passes a state and a parse result to the next -- Step. The resulting Step may carry a continuation that -- consumes input a and results in another Step. -- Essentially, the continuation may either consume input without a -- result or return a result with no further input to be consumed. data ParserK a m b -- | Convert a Parser to ParserK. -- -- Pre-release adapt :: Monad m => Parser a m b -> ParserK a m b -- | Convert an element Parser to a chunked ParserK. A -- chunked parser is more efficient than an element parser. -- -- Pre-release adaptC :: (Monad m, Unbox a) => Parser a m b -> ParserK (Array a) m b -- | A generic adaptC. Similar to adaptC but is not -- constrained to Unbox types. -- -- Pre-release adaptCG :: Monad m => Parser a m b -> ParserK (Array a) m b -- | A parser that always yields a pure value without consuming any input. -- -- Pre-release fromPure :: b -> ParserK a m b -- | See fromEffect. -- -- Pre-release fromEffect :: Monad m => m b -> ParserK a m b -- | A parser that always fails with an error message without consuming any -- input. -- -- Pre-release die :: String -> ParserK a m b -- | Deprecated: Please use "ParserK.adaptC . Parser.fromFold" -- instead. fromFold :: (MonadIO m, Unbox a) => Fold m a b -> ParserK (Array a) m b -- | Deprecated: Please use "adaptC" instead. fromParser :: (MonadIO m, Unbox a) => Parser a m b -> ParserK (Array a) m b -- | Unboxed immutable arrays with streaming interfaces. -- -- Please refer to Streamly.Internal.Data.Array for functions that -- have not yet been released. -- -- For arrays that work on boxed types, not requiring the Unbox -- constraint, please refer to Streamly.Data.Array.Generic. For -- arrays that can be mutated in-place, please see -- Streamly.Data.MutArray. module Streamly.Data.Array data Array a -- | Return a copy of the Array in pinned memory if unpinned, else -- return the original array. pin :: Array a -> IO (Array a) -- | Return a copy of the Array in unpinned memory if pinned, else -- return the original array. unpin :: Array a -> IO (Array a) -- | Return True if the array is allocated in pinned memory. isPinned :: Array a -> Bool -- | createOf n folds a maximum of n elements from the -- input stream to an Array. createOf :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) -- | Fold the whole input to a single array. -- -- Caution! Do not use this on infinite streams. create :: forall m a. (MonadIO m, Unbox a) => Fold m a (Array a) -- | writeLastN n folds a maximum of n elements from the -- end of the input stream to an Array. writeLastN :: (Storable a, Unbox a, MonadIO m) => Int -> Fold m a (Array a) -- | Create an Array from the first N elements of a list. The array -- is allocated to size N, if the list terminates before N elements then -- the array may hold less than N elements. fromListN :: Unbox a => Int -> [a] -> Array a -- | Create an Array from a list. The list must be of finite size. fromList :: Unbox a => [a] -> Array a -- | Convert an Array into a list. toList :: Unbox a => Array a -> [a] -- | Convert an Array into a stream. -- -- Pre-release read :: (Monad m, Unbox a) => Array a -> Stream m a -- | Convert an Array into a stream in reverse order. -- -- Pre-release readRev :: (Monad m, Unbox a) => Array a -> Stream m a -- | Unfold an array into a stream. reader :: forall m a. (Monad m, Unbox a) => Unfold m (Array a) a -- | Unfold an array into a stream in reverse order. readerRev :: forall m a. (Monad m, Unbox a) => Unfold m (Array a) a -- | Cast an array having elements of type a into an array having -- elements of type b. The length of the array should be a -- multiple of the size of the target element otherwise Nothing is -- returned. cast :: forall a b. Unbox b => Array a -> Maybe (Array b) -- | Cast an Array a into an Array Word8. asBytes :: Array a -> Array Word8 -- | O(1) Get the length of the array i.e. the number of elements in -- the array. length :: Unbox a => Array a -> Int -- | O(1) Lookup the element at the given index. Index starts from -- 0. getIndex :: forall a. Unbox a => Int -> Array a -> Maybe a -- | Serialize a Haskell type to a pinned byte array. The array is -- allocated using pinned memory so that it can be used directly in OS -- APIs for writing to file or sending over the network. -- -- Properties: 1. Identity: deserialize . pinnedSerialize == id -- 2. Encoded equivalence: pinnedSerialize a == pinnedSerialize -- a pinnedSerialize :: Serialize a => a -> Array Word8 -- | Decode a Haskell type from a byte array containing its serialized -- representation. deserialize :: Serialize a => Array Word8 -> a -- | The Unbox type class provides operations for serialization -- (unboxing) and deserialization (boxing) of fixed-length, non-recursive -- Haskell data types to and from their byte stream representation. -- -- Unbox uses fixed size encoding, therefore, size is independent of the -- value, it must be determined solely by the type. This restriction -- makes types with Unbox instances suitable for storing in -- arrays. Note that sum types may have multiple constructors of -- different sizes, the size of a sum type is computed as the maximum -- required by any constructor. -- -- The peekAt operation reads as many bytes from the mutable byte -- array as the size of the data type and builds a Haskell data -- type from these bytes. pokeAt operation converts a Haskell data -- type to its binary representation which consists of size -- bytes and then stores these bytes into the mutable byte array. These -- operations do not check the bounds of the array, the user of the type -- class is expected to check the bounds before peeking or poking. -- -- IMPORTANT: The serialized data's byte ordering remains the same as the -- host machine's byte order. Therefore, it can not be deserialized from -- host machines with a different byte ordering. -- -- Instances can be derived via Generics, Template Haskell, or written -- manually. Note that the data type must be non-recursive. WARNING! -- Generic and Template Haskell deriving, both hang for recursive data -- types. Deriving via Generics is more convenient but Template Haskell -- should be preferred over Generics for the following reasons: -- --
    --
  1. Instances derived via Template Haskell provide better and more -- reliable performance.
  2. --
  3. Generic deriving allows only 256 fields or constructor tags -- whereas template Haskell has no limit.
  4. --
-- -- Here is an example, for deriving an instance of this type class using -- generics: -- --
--   >>> import GHC.Generics (Generic)
--   
--   >>> :{
--   data Object = Object
--       { _int0 :: Int
--       , _int1 :: Int
--       } deriving Generic
--   :}
--   
-- --
--   >>> import Streamly.Data.MutByteArray (Unbox(..))
--   
--   >>> instance Unbox Object
--   
-- -- To derive the instance via Template Haskell: -- --
--   import Streamly.Data.MutByteArray (deriveUnbox)
--   $(deriveUnbox [d|instance Unbox Object|])
--   
-- -- See deriveUnbox for more information on deriving using Template -- Haskell. -- -- If you want to write the instance manually: -- --
--   >>> :{
--   instance Unbox Object where
--       sizeOf _ = 16
--       peekAt i arr = do
--          -- Check the array bounds
--           x0 <- peekAt i arr
--           x1 <- peekAt (i + 8) arr
--           return $ Object x0 x1
--       pokeAt i arr (Object x0 x1) = do
--          -- Check the array bounds
--           pokeAt i arr x0
--           pokeAt (i + 8) arr x1
--   :}
--   
class Unbox a -- | Get the size. Size cannot be zero, should be at least 1 byte. sizeOf :: Unbox a => Proxy a -> Int -- | Get the size. Size cannot be zero, should be at least 1 byte. sizeOf :: (Unbox a, SizeOfRep (Rep a)) => Proxy a -> Int -- | peekAt byte-offset array reads an element of type a -- from the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. peekAt :: Unbox a => Int -> MutByteArray -> IO a -- | peekAt byte-offset array reads an element of type a -- from the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. peekAt :: (Unbox a, Generic a, PeekRep (Rep a)) => Int -> MutByteArray -> IO a -- | Deprecated: Use peekAt. peekByteIndex :: Unbox a => Int -> MutByteArray -> IO a -- | pokeAt byte-offset array writes an element of type a -- to the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. pokeAt :: Unbox a => Int -> MutByteArray -> a -> IO () -- | Deprecated: Use pokeAt. pokeByteIndex :: Unbox a => Int -> MutByteArray -> a -> IO () -- | pokeAt byte-offset array writes an element of type a -- to the the given the byte offset in the array. -- -- IMPORTANT: The implementation of this interface may not check the -- bounds of the array, the caller must not assume that. pokeAt :: (Unbox a, Generic a, PokeRep (Rep a)) => Int -> MutByteArray -> a -> IO () -- | The Serialize type class provides operations for serialization -- and deserialization of general Haskell data types to and from their -- byte stream representation. -- -- Unlike Unbox, Serialize uses variable length encoding, -- therefore, it can serialize recursive and variable length data types -- like lists, or variable length sum types where the length of the value -- may vary depending on a particular constructor. For variable length -- data types the length is encoded along with the data. -- -- The deserializeAt operation reads bytes from the mutable byte -- array and builds a Haskell data type from these bytes, the number of -- bytes it reads depends on the type and the encoded value it is -- reading. serializeAt operation converts a Haskell data type to -- its binary representation which must consist of as many bytes as added -- by the addSizeTo operation for that value and then stores -- these bytes into the mutable byte array. The programmer is expected to -- use the addSizeTo operation and allocate an array of -- sufficient length before calling serializeAt. -- -- IMPORTANT: The serialized data's byte ordering remains the same as the -- host machine's byte order. Therefore, it can not be deserialized from -- host machines with a different byte ordering. -- -- Instances can be derived via Template Haskell, or written manually. -- -- Here is an example, for deriving an instance of this type class using -- template Haskell: -- --
--   >>> :{
--   data Object = Object
--       { _obj1 :: [Int]
--       , _obj2 :: Int
--       }
--   :}
--   
-- --
--   import Streamly.Data.MutByteArray (deriveSerialize)
--   $(deriveSerialize [d|instance Serialize Object|])
--   
-- -- See deriveSerialize and deriveSerializeWith for more -- information on deriving using Template Haskell. -- -- Here is an example of a manual instance. -- --
--   >>> import Streamly.Data.MutByteArray (Serialize(..))
--   
-- --
--   >>> :{
--   instance Serialize Object where
--       addSizeTo acc obj = addSizeTo (addSizeTo acc (_obj1 obj)) (_obj2 obj)
--       deserializeAt i arr len = do
--            -- Check the array bounds before reading
--           (i1, x0) <- deserializeAt i arr len
--           (i2, x1) <- deserializeAt i1 arr len
--           pure (i2, Object x0 x1)
--       serializeAt i arr (Object x0 x1) = do
--           i1 <- serializeAt i arr x0
--           i2 <- serializeAt i1 arr x1
--           pure i2
--   :}
--   
class Serialize a -- | addSizeTo accum value returns accum incremented by -- the size of the serialized representation of value in bytes. -- Size cannot be zero. It should be at least 1 byte. addSizeTo :: Serialize a => Int -> a -> Int -- | deserializeAt byte-offset array arrayLen deserializes a value -- from the given byte-offset in the array. Returns a tuple consisting of -- the next byte-offset and the deserialized value. -- -- The arrayLen passed is the entire length of the input buffer. It is to -- be used to check if we would overflow the input buffer when -- deserializing. -- -- Throws an exception if the operation would exceed the supplied -- arrayLen. deserializeAt :: Serialize a => Int -> MutByteArray -> Int -> IO (Int, a) -- | serializeAt byte-offset array value writes the serialized -- representation of the value in the array at the given -- byte-offset. Returns the next byte-offset. -- -- This is an unsafe operation, the programmer must ensure that the array -- has enough space available to serialize the value as determined by the -- addSizeTo operation. serializeAt :: Serialize a => Int -> MutByteArray -> a -> IO Int writeN :: forall m a. (MonadIO m, Unbox a) => Int -> Fold m a (Array a) write :: forall m a. (MonadIO m, Unbox a) => Fold m a (Array a) -- | Decode Haskell data types from byte streams. -- -- It would be inefficient to use this to compose parsers for general -- algebraic data types. For general deserialization of ADTs please use -- the Serialize type class instances. The fastest way to deserialize -- byte streams representing Haskell data types is to write them to -- arrays and deserialize the array using the Serialize type class. module Streamly.Internal.Data.Binary.Parser class FromBytes a -- | Decode a byte stream to a Haskell type. fromBytes :: FromBytes a => Parser Word8 m a -- | A value of type () is encoded as 0 in binary -- encoding. -- --
--   0 ==> ()
--   
-- -- Pre-release unit :: Monad m => Parser Word8 m () -- | A value of type Bool is encoded as follows in binary encoding. -- --
--   0 ==> False
--   1 ==> True
--   
-- -- Pre-release bool :: Monad m => Parser Word8 m Bool -- | A value of type Ordering is encoded as follows in binary -- encoding. -- --
--   0 ==> LT
--   1 ==> EQ
--   2 ==> GT
--   
-- -- Pre-release ordering :: Monad m => Parser Word8 m Ordering -- | Accept the input byte only if it is equal to the specified value. -- -- Pre-release eqWord8 :: Monad m => Word8 -> Parser Word8 m Word8 -- | Accept any byte. -- -- Pre-release word8 :: Monad m => Parser Word8 m Word8 -- | Parse two bytes as a Word16, the first byte is the MSB of the -- Word16 and second byte is the LSB (big endian representation). -- -- Pre-release word16be :: Monad m => Parser Word8 m Word16 -- | Parse two bytes as a Word16, the first byte is the LSB of the -- Word16 and second byte is the MSB (little endian representation). -- -- Pre-release word16le :: Monad m => Parser Word8 m Word16 -- | Parse four bytes as a Word32, the first byte is the MSB of the -- Word32 and last byte is the LSB (big endian representation). -- -- Pre-release word32be :: Monad m => Parser Word8 m Word32 -- | Parse four bytes as a Word32, the first byte is the MSB of the -- Word32 and last byte is the LSB (big endian representation). -- -- Pre-release word32le :: Monad m => Parser Word8 m Word32 -- | Parse eight bytes as a Word64, the first byte is the MSB of the -- Word64 and last byte is the LSB (big endian representation). -- -- Pre-release word64be :: Monad m => Parser Word8 m Word64 -- | Parse eight bytes as a Word64, the first byte is the MSB of the -- Word64 and last byte is the LSB (big endian representation). -- -- Pre-release word64le :: Monad m => Parser Word8 m Word64 -- | Parse eight bytes as a Word64 in the host byte order. -- -- Pre-release word64host :: MonadIO m => Parser Word8 m Word64 int8 :: Monad m => Parser Word8 m Int8 -- | Parse two bytes as a Int16, the first byte is the MSB of the -- Int16 and second byte is the LSB (big endian representation). -- -- Pre-release int16be :: Monad m => Parser Word8 m Int16 -- | Parse two bytes as a Int16, the first byte is the LSB of the -- Int16 and second byte is the MSB (little endian representation). -- -- Pre-release int16le :: Monad m => Parser Word8 m Int16 -- | Parse four bytes as a Int32, the first byte is the MSB of the -- Int32 and last byte is the LSB (big endian representation). -- -- Pre-release int32be :: Monad m => Parser Word8 m Int32 -- | Parse four bytes as a Int32, the first byte is the MSB of the -- Int32 and last byte is the LSB (big endian representation). -- -- Pre-release int32le :: Monad m => Parser Word8 m Int32 -- | Parse eight bytes as a Int64, the first byte is the MSB of the -- Int64 and last byte is the LSB (big endian representation). -- -- Pre-release int64be :: Monad m => Parser Word8 m Int64 -- | Parse eight bytes as a Int64, the first byte is the MSB of the -- Int64 and last byte is the LSB (big endian representation). -- -- Pre-release int64le :: Monad m => Parser Word8 m Int64 float32be :: MonadIO m => Parser Word8 m Float float32le :: MonadIO m => Parser Word8 m Float double64be :: MonadIO m => Parser Word8 m Double double64le :: MonadIO m => Parser Word8 m Double -- | Accept any byte. -- -- Pre-release charLatin1 :: Monad m => Parser Word8 m Char -- | To parse a text input, use the decode routines from -- Streamly.Unicode.Stream module to convert an input byte stream -- to a Unicode Char stream and then use these parsers on the Char -- stream. module Streamly.Internal.Unicode.Parser -- | Match a specific character. char :: Monad m => Char -> Parser Char m Char -- | Match a specific character ignoring case. charIgnoreCase :: Monad m => Char -> Parser Char m Char -- | Match the input with the supplied string and return it if successful. string :: Monad m => String -> Parser Char m String -- | Match the input with the supplied string and return it if successful. stringIgnoreCase :: Monad m => String -> Parser Char m String -- | Drop zero or more white space characters. dropSpace :: Monad m => Parser Char m () -- | Drop one or more white space characters. dropSpace1 :: Monad m => Parser Char m () -- | Match any character that satisfies isAlpha alpha :: Monad m => Parser Char m Char -- | Match any character that satisfies isAlphaNum alphaNum :: Monad m => Parser Char m Char -- | Match any character that satisfies isLetter letter :: Monad m => Parser Char m Char -- | Match any character that satisfies isAscii ascii :: Monad m => Parser Char m Char -- | Match any character that satisfies isAsciiLower asciiLower :: Monad m => Parser Char m Char -- | Match any character that satisfies isAsciiUpper asciiUpper :: Monad m => Parser Char m Char -- | Match any character that satisfies isLatin1 latin1 :: Monad m => Parser Char m Char -- | Match any character that satisfies isLower lower :: Monad m => Parser Char m Char -- | Match any character that satisfies isUpper upper :: Monad m => Parser Char m Char -- | Match any character that satisfies isMark mark :: Monad m => Parser Char m Char -- | Match any character that satisfies isPrint printable :: Monad m => Parser Char m Char -- | Match any character that satisfies isPunctuation punctuation :: Monad m => Parser Char m Char -- | Match any character that satisfies isSeparator separator :: Monad m => Parser Char m Char -- | Match any character that satisfies isSpace space :: Monad m => Parser Char m Char -- | Match any character that satisfies isSymbol symbol :: Monad m => Parser Char m Char -- | Match any character that satisfies isDigit digit :: Monad m => Parser Char m Char -- | Match any character that satisfies isOctDigit octDigit :: Monad m => Parser Char m Char -- | Match any character that satisfies isHexDigit hexDigit :: Monad m => Parser Char m Char -- | Match any character that satisfies isNumber numeric :: Monad m => Parser Char m Char -- | Allow an optional leading '+' or '-' sign character -- before any parser. signed :: (Num a, Monad m) => Parser Char m a -> Parser Char m a -- | A generic parser for scientific notation of numbers. Returns -- (mantissa, exponent) tuple. The result can be mapped to Double -- or any other number representation e.g. Scientific. -- -- For example, using the scientific package: >> -- parserScientific = uncurry Data.Scientific.scientific $ -- number number :: Monad m => Parser Char m (Integer, Int) -- | A fast, custom parser for double precision flaoting point numbers. -- Returns (mantissa, exponent) tuple. This is much faster than -- number because it assumes the number will fit in a -- Double type and uses Int representation to store -- mantissa. -- -- Number larger than Double may overflow. Int overflow is not -- checked in the exponent. doubleParser :: Monad m => Parser Char m (Int, Int) -- | Parse a decimal Double value. This parser accepts an optional -- sign (+ or -) followed by at least one decimal digit. Decimal digits -- are optionally followed by a decimal point and at least one decimal -- digit after the point. This parser accepts the maximal valid input as -- long as it gives a valid number. Specifcally a trailing decimal point -- is allowed but not consumed. This function does not accept "NaN" or -- "Infinity" string representations of double values. -- -- Definition: -- --
--   >>> double = uncurry Unicode.mkDouble <$> Unicode.number
--   
-- -- Examples: -- --
--   >>> p = Stream.parse Unicode.double . Stream.fromList
--   
-- --
--   >>> p "-1.23e-123"
--   Right (-1.23e-123)
--   
-- -- Trailing input examples: -- --
--   >>> p "1."
--   Right 1.0
--   
-- --
--   >>> p "1.2.3"
--   Right 1.2
--   
-- --
--   >>> p "1e"
--   Right 1.0
--   
-- --
--   >>> p "1e2.3"
--   Right 100.0
--   
-- --
--   >>> p "1+2"
--   Right 1.0
--   
-- -- Error cases: -- --
--   >>> p ""
--   Left (ParseError "number: expecting sign or decimal digit, got end of input")
--   
-- --
--   >>> p ".1"
--   Left (ParseError "number: expecting sign or decimal digit, got '.'")
--   
-- --
--   >>> p "+"
--   Left (ParseError "number: expecting decimal digit, got end of input")
--   
double :: Monad m => Parser Char m Double -- | Parse and decode an unsigned integral decimal number. decimal :: (Monad m, Integral a) => Parser Char m a -- | Parse and decode an unsigned integral hexadecimal number. The hex -- digits 'a' through 'f' may be upper or lower case. -- -- Note: This parser does not accept a leading "0x" string. hexadecimal :: (Monad m, Integral a, Bits a) => Parser Char m a -- | mkDouble mantissa exponent converts a mantissa and exponent -- to a Double value equivalent to mantissa * -- 10^exponent. It does not check for overflow, powers more than 308 -- will overflow. mkDouble :: Integer -> Int -> Double module Streamly.Internal.Unicode.Stream -- | Decode a stream of bytes to Unicode characters by mapping each byte to -- a corresponding Unicode Char in 0-255 range. decodeLatin1 :: Monad m => Stream m Word8 -> Stream m Char data CodingFailureMode TransliterateCodingFailure :: CodingFailureMode ErrorOnCodingFailure :: CodingFailureMode DropOnCodingFailure :: CodingFailureMode writeCharUtf8' :: Monad m => Parser Word8 m Char parseCharUtf8With :: Monad m => CodingFailureMode -> Parser Word8 m Char -- | Decode a UTF-8 encoded bytestream to a stream of Unicode characters. -- Any invalid codepoint encountered is replaced with the unicode -- replacement character. decodeUtf8 :: Monad m => Stream m Word8 -> Stream m Char -- | Decode a UTF-8 encoded bytestream to a stream of Unicode characters. -- The function throws an error if an invalid codepoint is encountered. decodeUtf8' :: Monad m => Stream m Word8 -> Stream m Char -- | Decode a UTF-8 encoded bytestream to a stream of Unicode characters. -- Any invalid codepoint encountered is dropped. decodeUtf8_ :: Monad m => Stream m Word8 -> Stream m Char -- | Decode a UTF-16 little endian encoded bytestream to a stream of -- Unicode characters. The function throws an error if an invalid -- codepoint is encountered. -- -- Unimplemented decodeUtf16le' :: Stream m Word16 -> Stream m Char data DecodeError DecodeError :: !DecodeState -> !CodePoint -> DecodeError type DecodeState = Word8 type CodePoint = Int -- | Pre-release decodeUtf8Either :: Monad m => Stream m Word8 -> Stream m (Either DecodeError Char) -- | Pre-release resumeDecodeUtf8Either :: Monad m => DecodeState -> CodePoint -> Stream m Word8 -> Stream m (Either DecodeError Char) -- | Like decodeUtf8 but for a chunked stream. It may be slightly -- faster than flattening the stream and then decoding with -- decodeUtf8. decodeUtf8Chunks :: MonadIO m => Stream m (Array Word8) -> Stream m Char -- | Like 'decodeUtf8'' but for a chunked stream. It may be slightly faster -- than flattening the stream and then decoding with 'decodeUtf8''. decodeUtf8Chunks' :: MonadIO m => Stream m (Array Word8) -> Stream m Char -- | Like decodeUtf8_ but for a chunked stream. It may be slightly -- faster than flattening the stream and then decoding with -- decodeUtf8_. decodeUtf8Chunks_ :: MonadIO m => Stream m (Array Word8) -> Stream m Char -- | Like encodeLatin1' but silently maps input codepoints beyond -- 255 to arbitrary Latin1 chars in 0-255 range. No error or exception is -- thrown when such mapping occurs. encodeLatin1 :: Monad m => Stream m Char -> Stream m Word8 -- | Encode a stream of Unicode characters to bytes by mapping each -- character to a byte in 0-255 range. Throws an error if the input -- stream contains characters beyond 255. encodeLatin1' :: Monad m => Stream m Char -> Stream m Word8 -- | Like encodeLatin1 but drops the input characters beyond 255. encodeLatin1_ :: Monad m => Stream m Char -> Stream m Word8 readCharUtf8' :: Monad m => Unfold m Char Word8 readCharUtf8 :: Monad m => Unfold m Char Word8 readCharUtf8_ :: Monad m => Unfold m Char Word8 -- | Encode a stream of Unicode characters to a UTF-8 encoded bytestream. -- Any Invalid characters (U+D800-U+D8FF) in the input stream are -- replaced by the Unicode replacement character U+FFFD. encodeUtf8 :: Monad m => Stream m Char -> Stream m Word8 -- | Encode a stream of Unicode characters to a UTF-8 encoded bytestream. -- When any invalid character (U+D800-U+D8FF) is encountered in the input -- stream the function errors out. encodeUtf8' :: Monad m => Stream m Char -> Stream m Word8 -- | Encode a stream of Unicode characters to a UTF-8 encoded bytestream. -- Any Invalid characters (U+D800-U+D8FF) in the input stream are -- dropped. encodeUtf8_ :: Monad m => Stream m Char -> Stream m Word8 -- | Encode a stream of String using the supplied encoding scheme. -- Each string is encoded as an Array Word8. encodeStrings :: MonadIO m => (Stream m Char -> Stream m Word8) -> Stream m String -> Stream m (Array Word8) -- | Encode a stream of Unicode characters to a UTF-16 little endian -- encoded bytestream. -- -- Unimplemented encodeUtf16le' :: Stream m Char -> Stream m Word16 -- | Remove leading whitespace from a string. -- --
--   stripHead = Stream.dropWhile isSpace
--   
-- -- Pre-release stripHead :: Monad m => Stream m Char -> Stream m Char -- | Fold each line of the stream using the supplied Fold and stream -- the result. -- --
--   >>> Stream.fold Fold.toList $ Unicode.lines Fold.toList (Stream.fromList "lines\nthis\nstring\n\n\n")
--   ["lines","this","string","",""]
--   
-- --
--   lines = Stream.splitOnSuffix (== '\n')
--   
-- -- Pre-release lines :: Monad m => Fold m Char b -> Stream m Char -> Stream m b -- | Fold each word of the stream using the supplied Fold and stream -- the result. -- --
--   >>> Stream.fold Fold.toList $ Unicode.words Fold.toList (Stream.fromList "fold these     words")
--   ["fold","these","words"]
--   
-- --
--   words = Stream.wordsBy isSpace
--   
-- -- Pre-release words :: Monad m => Fold m Char b -> Stream m Char -> Stream m b -- | Unfold a stream to character streams using the supplied Unfold -- and concat the results suffixing a newline character \n to -- each stream. -- --
--   unlines = Stream.interposeSuffix 'n'
--   unlines = Stream.intercalateSuffix Unfold.fromList "n"
--   
-- -- Pre-release unlines :: MonadIO m => Unfold m a Char -> Stream m a -> Stream m Char -- | Unfold the elements of a stream to character streams using the -- supplied Unfold and concat the results with a whitespace -- character infixed between the streams. -- --
--   unwords = Stream.interpose ' '
--   unwords = Stream.intercalate Unfold.fromList " "
--   
-- -- Pre-release unwords :: MonadIO m => Unfold m a Char -> Stream m a -> Stream m Char decodeUtf8D :: Monad m => Stream m Word8 -> Stream m Char decodeUtf8D' :: Monad m => Stream m Word8 -> Stream m Char decodeUtf8D_ :: Monad m => Stream m Word8 -> Stream m Char -- | See section "3.9 Unicode Encoding Forms" in -- https://www.unicode.org/versions/Unicode13.0.0/UnicodeStandard-13.0.pdf encodeUtf8D :: Monad m => Stream m Char -> Stream m Word8 encodeUtf8D' :: Monad m => Stream m Char -> Stream m Word8 encodeUtf8D_ :: Monad m => Stream m Char -> Stream m Word8 decodeUtf8EitherD :: Monad m => Stream m Word8 -> Stream m (Either DecodeError Char) resumeDecodeUtf8EitherD :: Monad m => DecodeState -> CodePoint -> Stream m Word8 -> Stream m (Either DecodeError Char) -- | Read UTF-8 encoded bytes as chars from an Addr# until a 0 byte -- is encountered, the 0 byte is not included in the stream. -- -- Unsafe: The caller is responsible for safe addressing. -- -- Note that this is completely safe when reading from Haskell string -- literals because they are guaranteed to be NULL terminated: -- --
--   >>> Stream.fold Fold.toList (Unicode.fromStr# "Haskell"#)
--   "Haskell"
--   
fromStr# :: MonadIO m => Addr# -> Stream m Char -- | Same as decodeUtf8 -- | Deprecated: Please use decodeUtf8 instead decodeUtf8Lax :: Monad m => Stream m Word8 -> Stream m Char -- | Same as encodeLatin1 -- | Deprecated: Please use encodeLatin1 instead encodeLatin1Lax :: Monad m => Stream m Char -> Stream m Word8 -- | Same as encodeUtf8 -- | Deprecated: Please use encodeUtf8 instead encodeUtf8Lax :: Monad m => Stream m Char -> Stream m Word8 instance GHC.Show.Show Streamly.Internal.Unicode.Stream.DecodeError instance GHC.Show.Show Streamly.Internal.Unicode.Stream.CodingFailureMode module Streamly.Internal.Unicode.Array -- | Break a string up into a stream of strings at newline characters. The -- resulting strings do not contain newlines. -- --
--   lines = S.lines A.write
--   
-- --
--   >>> Stream.fold Fold.toList $ Unicode.lines $ Stream.fromList "lines\nthis\nstring\n\n\n"
--   [fromList "lines",fromList "this",fromList "string",fromList "",fromList ""]
--   
lines :: MonadIO m => Stream m Char -> Stream m (Array Char) -- | Break a string up into a stream of strings, which were delimited by -- characters representing white space. -- --
--   words = S.words A.write
--   
-- --
--   >>> Stream.fold Fold.toList $ Unicode.words $ Stream.fromList "A  newline\nis considered white space?"
--   [fromList "A",fromList "newline",fromList "is",fromList "considered",fromList "white",fromList "space?"]
--   
words :: MonadIO m => Stream m Char -> Stream m (Array Char) -- | Flattens the stream of Array Char, after appending a -- terminating newline to each string. -- -- unlines is an inverse operation to lines. -- --
--   >>> Stream.fold Fold.toList $ Unicode.unlines $ Stream.fromList ["lines", "this", "string"]
--   "lines\nthis\nstring\n"
--   
-- --
--   unlines = S.unlines A.read
--   
-- -- Note that, in general -- --
--   unlines . lines /= id
--   
unlines :: MonadIO m => Stream m (Array Char) -> Stream m Char -- | Flattens the stream of Array Char, after appending a -- separating space to each string. -- -- unwords is an inverse operation to words. -- --
--   >>> Stream.fold Fold.toList $ Unicode.unwords $ Stream.fromList ["unwords", "this", "string"]
--   "unwords this string"
--   
-- --
--   unwords = S.unwords A.read
--   
-- -- Note that, in general -- --
--   unwords . words /= id
--   
unwords :: MonadIO m => Stream m (Array Char) -> Stream m Char -- | Well typed and flexible file systems paths, preserving the OS and -- filesystem encoding. -- -- You can choose the level of type safety you want. Path is the -- basic path type which can represent a file, directory, absolute or -- relative path with no restrictions. Depending on how much type safety -- you want you can choose appropriate type wrappers to wrap Path. -- File Path mandates the path to be a file whereas Abs -- (File Path) mandates it to be an absolute path representing a -- file. -- -- You can upgrade or downgrade the safety. Whenever a less restrictive -- path type is converted to a more restrctive path type the conversion -- involves checks and it may fail. However, a more restrictive path type -- can be freely converted to a less restrictive one. -- -- See the streamly-filepath package for interworking with the -- OsPath type. The Path type can be converted to and -- from OsPath type at zero cost since the underlying -- representation of both is the same. module Streamly.Internal.FileSystem.Path -- | A type representing file system paths for directories or files. newtype Path Path :: Array Word8 -> Path -- | A type representing a file path. data File a -- | A type representing a directory path. data Dir a -- | A type representing absolute paths. data Abs a -- | A type representing relative paths. data Rel a -- | A member of IsPath knows how to convert to and from the -- Path type. class IsPath a -- | Like fromPath but does not check the properties of Path. -- Provides performance and simplicity when we know that the properties -- of the path are already verified, for example, when we get the path -- from the file system or the OS APIs. fromPathUnsafe :: IsPath a => Path -> a -- | Convert a raw Path to other forms of well-typed paths. It may -- fail if the path does not satisfy the properties of the target type. -- -- Path components may have limits. Total path length may have a limit. fromPath :: (IsPath a, MonadThrow m) => Path -> m a -- | Convert a well-typed path to a raw Path. Never fails. toPath :: IsPath a => a -> Path -- | Convert a path type to another path type. This operation may fail with -- a PathException when converting a less restrictive path type to -- a more restrictive one. adaptPath :: (MonadThrow m, IsPath a, IsPath b) => a -> m b -- | On Posix it may fail if the byte array contains null characters. On -- Windows the array passed must be a multiple of 2 bytes as the -- underlying representation uses Word16. -- -- Throws InvalidPath. fromChunk :: MonadThrow m => Array Word8 -> m Path -- | Unsafe: On Posix, a path cannot contain null characters. On -- Windows, the array passed must be a multiple of 2 bytes as the -- underlying representation uses Word16. fromChunkUnsafe :: Array Word8 -> Path -- | Encode a Unicode string to Path using strict UTF-8 encoding on -- Posix. On Posix it may fail if the stream contains null characters. -- TBD: Use UTF16LE on Windows. fromString :: MonadThrow m => [Char] -> m Path -- | Encode a Unicode char stream to Path using strict UTF-8 -- encoding on Posix. On Posix it may fail if the stream contains null -- characters. TBD: Use UTF16LE on Windows. fromChars :: MonadThrow m => Stream Identity Char -> m Path -- | Generates a Path type from an interpolated string literal. -- -- Unimplemented path :: QuasiQuoter -- | Generates an Abs Path type from an interpolated string -- literal. -- -- Unimplemented abs :: QuasiQuoter -- | Generates an Rel Path type from an interpolated string -- literal. -- -- Unimplemented rel :: QuasiQuoter -- | Generates an Dir Path type from an interpolated string -- literal. -- -- Unimplemented dir :: QuasiQuoter -- | Generates an File Path type from an interpolated string -- literal. -- -- Unimplemented file :: QuasiQuoter -- | Generates an Abs (Dir Path) type from an interpolated string -- literal. -- -- Unimplemented absdir :: QuasiQuoter -- | Generates an Rel (Dir Path) type from an interpolated string -- literal. -- -- Unimplemented reldir :: QuasiQuoter -- | Generates an Abs (File Path) type from an interpolated string -- literal. -- -- Unimplemented absfile :: QuasiQuoter -- | Generates an Rel (File Path) type from an interpolated string -- literal. -- -- Unimplemented relfile :: QuasiQuoter -- | Generates a Path type. -- -- Unimplemented mkPath :: String -> Q Exp -- | Generates an Abs Path type. -- -- Unimplemented mkAbs :: String -> Q Exp -- | Generates an Rel Path type. -- -- Unimplemented mkRel :: String -> Q Exp -- | Generates an Dir Path type. -- -- Unimplemented mkDir :: String -> Q Exp -- | Generates an File Path type. -- -- Unimplemented mkFile :: String -> Q Exp -- | Generates an Abs (Dir Path) type. -- -- Unimplemented mkAbsDir :: String -> Q Exp -- | Generates an Rel (Dir Path) type. -- -- Unimplemented mkRelDir :: String -> Q Exp -- | Generates an Abs (File Path) type. -- -- Unimplemented mkAbsFile :: String -> Q Exp -- | Generates an Rel (File Path) type. -- -- Unimplemented mkRelFile :: String -> Q Exp -- | Convert Path to an array of bytes. toChunk :: Path -> Array Word8 -- | Decode the path to a Unicode string using strict UTF-8 decoding on -- Posix. TBD: Use UTF16LE on Windows. toString :: Path -> [Char] -- | Decode the path to a stream of Unicode chars using strict UTF-8 -- decoding on Posix. TBD: Use UTF16LE on Windows. toChars :: Monad m => Path -> Stream m Char -- | Primary path separator character, / on Posix and \ -- on Windows. Windows supports / too as a separator. Please use -- isSeparator for testing if a char is a separator char. primarySeparator :: Char -- | On Posix only / is a path separator but in windows it could -- be either / or \. isSeparator :: Char -> Bool -- | Like extendDir but for the less restrictive Path type -- which will always create a syntactically valid Path type but it -- may not be semantically valid because we may append an absolute path -- or we may append to a file path. The onus lies on the user to ensure -- that the first path is not a file and the second path is not absolute. extendPath :: Path -> Path -> Path -- | Extend a directory path by appending a relative path to it. This is -- the equivalent to the / operator from the -- filepath package. extendDir :: (IsPath (a (Dir Path)), IsPath b, IsPath (a b)) => a (Dir Path) -> Rel b -> a b instance GHC.Classes.Eq Streamly.Internal.FileSystem.Path.PathException instance GHC.Show.Show Streamly.Internal.FileSystem.Path.PathException instance Streamly.Internal.FileSystem.Path.IsPath Streamly.Internal.FileSystem.Path.Path instance Streamly.Internal.FileSystem.Path.IsPath (Streamly.Internal.FileSystem.Path.File Streamly.Internal.FileSystem.Path.Path) instance Streamly.Internal.FileSystem.Path.IsPath (Streamly.Internal.FileSystem.Path.Dir Streamly.Internal.FileSystem.Path.Path) instance Streamly.Internal.FileSystem.Path.IsPath (Streamly.Internal.FileSystem.Path.Abs Streamly.Internal.FileSystem.Path.Path) instance Streamly.Internal.FileSystem.Path.IsPath (Streamly.Internal.FileSystem.Path.Rel Streamly.Internal.FileSystem.Path.Path) instance Streamly.Internal.FileSystem.Path.IsPath (Streamly.Internal.FileSystem.Path.Abs (Streamly.Internal.FileSystem.Path.File Streamly.Internal.FileSystem.Path.Path)) instance Streamly.Internal.FileSystem.Path.IsPath (Streamly.Internal.FileSystem.Path.Abs (Streamly.Internal.FileSystem.Path.Dir Streamly.Internal.FileSystem.Path.Path)) instance Streamly.Internal.FileSystem.Path.IsPath (Streamly.Internal.FileSystem.Path.Rel (Streamly.Internal.FileSystem.Path.File Streamly.Internal.FileSystem.Path.Path)) instance Streamly.Internal.FileSystem.Path.IsPath (Streamly.Internal.FileSystem.Path.Rel (Streamly.Internal.FileSystem.Path.Dir Streamly.Internal.FileSystem.Path.Path)) instance GHC.Exception.Type.Exception Streamly.Internal.FileSystem.Path.PathException -- | Encode Haskell data types to byte streams. -- -- The primary purpose of this module is to serialize primitive Haskell -- types to streams for convenient byte by byte processing when such a -- need arises. -- -- It would be inefficient to use this to build byte streams from -- algebraic data types. For general serialization of ADTs please use the -- Serialize type class instances. The fastest way to convert general -- Haskell types to byte streams is to serialize them to an array and -- then stream the array. module Streamly.Internal.Data.Binary.Stream class ToBytes a -- | Convert a Haskell type to a byte stream. toBytes :: ToBytes a => a -> Stream m Word8 -- | A value of type () is encoded as 0 in binary -- encoding. -- --
--   0 ==> ()
--   
-- -- Pre-release unit :: Applicative m => Stream m Word8 -- | A value of type Bool is encoded as follows in binary encoding. -- --
--   0 ==> False
--   1 ==> True
--   
-- -- Pre-release bool :: Applicative m => Bool -> Stream m Word8 -- | A value of type Ordering is encoded as follows in binary -- encoding. -- --
--   0 ==> LT
--   1 ==> EQ
--   2 ==> GT
--   
-- -- Pre-release ordering :: Applicative m => Ordering -> Stream m Word8 -- | Stream a Word8. -- -- Pre-release word8 :: Applicative m => Word8 -> Stream m Word8 -- | Stream a Word16 as two bytes, the first byte is the MSB of the -- Word16 and second byte is the LSB (big endian representation). -- -- Pre-release word16be :: Monad m => Word16 -> Stream m Word8 -- | Stream a Word16 as two bytes, the first byte is the LSB of the -- Word16 and second byte is the MSB (little endian representation). -- -- Pre-release word16le :: Monad m => Word16 -> Stream m Word8 -- | Stream a Word32 as four bytes, the first byte is the MSB of the -- Word32 and last byte is the LSB (big endian representation). -- -- Pre-release word32be :: Monad m => Word32 -> Stream m Word8 -- | Stream a Word32 as four bytes, the first byte is the MSB of the -- Word32 and last byte is the LSB (big endian representation). -- -- Pre-release word32le :: Monad m => Word32 -> Stream m Word8 -- | Stream a Word64 as eight bytes, the first byte is the MSB of -- the Word64 and last byte is the LSB (big endian representation). -- -- Pre-release word64be :: Monad m => Word64 -> Stream m Word8 -- | Stream a Word64 as eight bytes, the first byte is the MSB of -- the Word64 and last byte is the LSB (big endian representation). -- -- Pre-release word64le :: Monad m => Word64 -> Stream m Word8 -- | Stream a Word64 as eight bytes in the host byte order. -- -- Pre-release word64host :: Monad m => Word64 -> Stream m Word8 int8 :: Applicative m => Int8 -> Stream m Word8 -- | Stream a Int16 as two bytes, the first byte is the MSB of the -- Int16 and second byte is the LSB (big endian representation). -- -- Pre-release int16be :: Monad m => Int16 -> Stream m Word8 -- | Stream a Int16 as two bytes, the first byte is the LSB of the -- Int16 and second byte is the MSB (little endian representation). -- -- Pre-release int16le :: Monad m => Int16 -> Stream m Word8 -- | Stream a Int32 as four bytes, the first byte is the MSB of the -- Int32 and last byte is the LSB (big endian representation). -- -- Pre-release int32be :: Monad m => Int32 -> Stream m Word8 int32le :: Monad m => Int32 -> Stream m Word8 -- | Stream a Int64 as eight bytes, the first byte is the MSB of the -- Int64 and last byte is the LSB (big endian representation). -- -- Pre-release int64be :: Monad m => Int64 -> Stream m Word8 -- | Stream a Int64 as eight bytes, the first byte is the LSB of the -- Int64 and last byte is the MSB (little endian representation). -- -- Pre-release int64le :: Monad m => Int64 -> Stream m Word8 -- | Big endian (MSB first) Float float32be :: Monad m => Float -> Stream m Word8 -- | Little endian (LSB first) Float float32le :: Monad m => Float -> Stream m Word8 -- | Big endian (MSB first) Double double64be :: Monad m => Double -> Stream m Word8 -- | Little endian (LSB first) Double double64le :: Monad m => Double -> Stream m Word8 -- | Encode a Unicode character to stream of bytes in 0-255 range. charLatin1 :: Applicative m => Char -> Stream m Word8 charUtf8 :: Monad m => Char -> Stream m Word8 module Streamly.Internal.Console.Stdio -- | Read a byte stream from standard input. -- --
--   read = Handle.read stdin
--   read = Stream.unfold Stdio.reader ()
--   
-- -- Pre-release read :: MonadIO m => Stream m Word8 -- | Read a character stream from Utf8 encoded standard input. -- --
--   readChars = Unicode.decodeUtf8 Stdio.read
--   
-- -- Pre-release readChars :: MonadIO m => Stream m Char -- | Read a stream of chunks from standard input. The maximum size of a -- single chunk is limited to defaultChunkSize. The actual size -- read may be less than defaultChunkSize. -- --
--   readChunks = Handle.readChunks stdin
--   readChunks = Stream.unfold Stdio.chunkReader ()
--   
-- -- Pre-release readChunks :: MonadIO m => Stream m (Array Word8) -- | Unfold standard input into a stream of Word8. reader :: MonadIO m => Unfold m () Word8 -- | Unfolds standard input into a stream of Word8 arrays. chunkReader :: MonadIO m => Unfold m () (Array Word8) -- | Fold a stream of Word8 to standard output. write :: MonadIO m => Fold m Word8 () -- | Fold a stream of Array Word8 to standard output. writeChunks :: MonadIO m => Fold m (Array Word8) () -- | Fold a stream of Word8 to standard error. writeErr :: MonadIO m => Fold m Word8 () -- | Fold a stream of Array Word8 to standard error. writeErrChunks :: MonadIO m => Fold m (Array Word8) () -- | Write a stream of bytes to standard output. -- --
--   putBytes = Handle.putBytes stdout
--   putBytes = Stream.fold Stdio.write
--   
-- -- Pre-release putBytes :: MonadIO m => Stream m Word8 -> m () -- | Encode a character stream to Utf8 and write it to standard output. -- --
--   putChars = Stdio.putBytes . Unicode.encodeUtf8
--   
-- -- Pre-release putChars :: MonadIO m => Stream m Char -> m () -- | Write a stream of chunks to standard output. -- --
--   putChunks = Handle.putChunks stdout
--   putChunks = Stream.fold Stdio.writeChunks
--   
-- -- Pre-release putChunks :: MonadIO m => Stream m (Array Word8) -> m () -- | Write a stream of strings to standard output using the supplied -- encoding. Output is flushed to the device for each string. -- -- Pre-release putStringsWith :: MonadIO m => (Stream m Char -> Stream m Word8) -> Stream m String -> m () -- | Write a stream of strings to standard output using UTF8 encoding. -- Output is flushed to the device for each string. -- -- Pre-release putStrings :: MonadIO m => Stream m String -> m () -- | Like putStrings but adds a newline at the end of each string. -- -- XXX This is not portable, on Windows we need to use "rn" instead. -- -- Pre-release putStringsLn :: MonadIO m => Stream m String -> m () -- | Combinators to work with standard input, output and error streams. -- -- See also: Streamly.Internal.Console.Stdio module Streamly.Console.Stdio -- | Unfold standard input into a stream of Word8. reader :: MonadIO m => Unfold m () Word8 -- | Unfolds standard input into a stream of Word8 arrays. chunkReader :: MonadIO m => Unfold m () (Array Word8) -- | Fold a stream of Word8 to standard output. write :: MonadIO m => Fold m Word8 () -- | Fold a stream of Array Word8 to standard output. writeChunks :: MonadIO m => Fold m (Array Word8) () -- | Fold a stream of Word8 to standard error. writeErr :: MonadIO m => Fold m Word8 () -- | Fold a stream of Array Word8 to standard error. writeErrChunks :: MonadIO m => Fold m (Array Word8) () -- | Convenient template Haskell quasiquoters to format strings. module Streamly.Internal.Unicode.String -- | A QuasiQuoter that treats the input as a string literal: -- --
--   >>> [str|x|]
--   "x"
--   
-- -- Any #{symbol} is replaced by the value of the Haskell symbol -- symbol which is in scope: -- --
--   >>> x = "hello"
--   
--   >>> [str|#{x} world!|]
--   "hello world!"
--   
-- -- ## means a literal # without the special meaning for -- referencing haskell symbols: -- --
--   >>> [str|##{x} world!|]
--   "#{x} world!"
--   
-- -- A # at the end of line means the line continues to the next -- line without introducing a newline character: -- --
--   >>> :{
--   [str|hello#
--   world!|]
--   :}
--   "hello world!"
--   
-- -- Bugs: because of a bug in parsers, a lone # at the end of input gets -- removed. str :: QuasiQuoter instance GHC.Classes.Eq Streamly.Internal.Unicode.String.StrSegment instance GHC.Show.Show Streamly.Internal.Unicode.String.StrSegment -- | To parse a text input, use the decode routines from -- Streamly.Unicode.Stream module to convert an input byte stream -- to a Unicode Char stream and then use these parsers on the Char -- stream. module Streamly.Unicode.Parser -- | Match a specific character. char :: Monad m => Char -> Parser Char m Char -- | Match a specific character ignoring case. charIgnoreCase :: Monad m => Char -> Parser Char m Char -- | Match any character that satisfies isAlpha alpha :: Monad m => Parser Char m Char -- | Match any character that satisfies isAlphaNum alphaNum :: Monad m => Parser Char m Char -- | Match any character that satisfies isLetter letter :: Monad m => Parser Char m Char -- | Match any character that satisfies isAscii ascii :: Monad m => Parser Char m Char -- | Match any character that satisfies isAsciiLower asciiLower :: Monad m => Parser Char m Char -- | Match any character that satisfies isAsciiUpper asciiUpper :: Monad m => Parser Char m Char -- | Match any character that satisfies isLatin1 latin1 :: Monad m => Parser Char m Char -- | Match any character that satisfies isLower lower :: Monad m => Parser Char m Char -- | Match any character that satisfies isUpper upper :: Monad m => Parser Char m Char -- | Match any character that satisfies isMark mark :: Monad m => Parser Char m Char -- | Match any character that satisfies isPrint printable :: Monad m => Parser Char m Char -- | Match any character that satisfies isPunctuation punctuation :: Monad m => Parser Char m Char -- | Match any character that satisfies isSeparator separator :: Monad m => Parser Char m Char -- | Match any character that satisfies isSpace space :: Monad m => Parser Char m Char -- | Match any character that satisfies isSymbol symbol :: Monad m => Parser Char m Char -- | Match any character that satisfies isDigit digit :: Monad m => Parser Char m Char -- | Match any character that satisfies isOctDigit octDigit :: Monad m => Parser Char m Char -- | Match any character that satisfies isHexDigit hexDigit :: Monad m => Parser Char m Char -- | Match any character that satisfies isNumber numeric :: Monad m => Parser Char m Char -- | Match the input with the supplied string and return it if successful. string :: Monad m => String -> Parser Char m String -- | Match the input with the supplied string and return it if successful. stringIgnoreCase :: Monad m => String -> Parser Char m String -- | Drop zero or more white space characters. dropSpace :: Monad m => Parser Char m () -- | Drop one or more white space characters. dropSpace1 :: Monad m => Parser Char m () -- | Parse and decode an unsigned integral decimal number. decimal :: (Monad m, Integral a) => Parser Char m a -- | Parse and decode an unsigned integral hexadecimal number. The hex -- digits 'a' through 'f' may be upper or lower case. -- -- Note: This parser does not accept a leading "0x" string. hexadecimal :: (Monad m, Integral a, Bits a) => Parser Char m a -- | Parse a decimal Double value. This parser accepts an optional -- sign (+ or -) followed by at least one decimal digit. Decimal digits -- are optionally followed by a decimal point and at least one decimal -- digit after the point. This parser accepts the maximal valid input as -- long as it gives a valid number. Specifcally a trailing decimal point -- is allowed but not consumed. This function does not accept "NaN" or -- "Infinity" string representations of double values. -- -- Definition: -- --
--   >>> double = uncurry Unicode.mkDouble <$> Unicode.number
--   
-- -- Examples: -- --
--   >>> p = Stream.parse Unicode.double . Stream.fromList
--   
-- --
--   >>> p "-1.23e-123"
--   Right (-1.23e-123)
--   
-- -- Trailing input examples: -- --
--   >>> p "1."
--   Right 1.0
--   
-- --
--   >>> p "1.2.3"
--   Right 1.2
--   
-- --
--   >>> p "1e"
--   Right 1.0
--   
-- --
--   >>> p "1e2.3"
--   Right 100.0
--   
-- --
--   >>> p "1+2"
--   Right 1.0
--   
-- -- Error cases: -- --
--   >>> p ""
--   Left (ParseError "number: expecting sign or decimal digit, got end of input")
--   
-- --
--   >>> p ".1"
--   Left (ParseError "number: expecting sign or decimal digit, got '.'")
--   
-- --
--   >>> p "+"
--   Left (ParseError "number: expecting decimal digit, got end of input")
--   
double :: Monad m => Parser Char m Double -- | Allow an optional leading '+' or '-' sign character -- before any parser. signed :: (Num a, Monad m) => Parser Char m a -> Parser Char m a -- |

Processing Unicode Strings

-- -- A Char stream is the canonical representation to process -- Unicode strings. It can be processed efficiently using regular stream -- processing operations. A byte stream of Unicode text read from an IO -- device or from an Array in memory can be decoded into a -- Char stream using the decoding routines in this module. A -- String ([Char]) can be converted into a Char -- stream using fromList. An Array Char can be -- unfolded into a stream using the array read unfold. -- --

Storing Unicode Strings

-- -- A stream of Char can be encoded into a byte stream using the -- encoding routines in this module and then written to IO devices or to -- arrays in memory. -- -- If you have to store a Char stream in memory you can -- fold the Char stream as Array Char using the -- array write fold. The Array type provides a more -- compact representation reducing GC overhead. If space efficiency is a -- concern you can use encodeUtf8' on the Char stream -- before writing it to an Array providing an even more compact -- representation. -- --

String Literals

-- -- Stream Identity Char and Array Char are instances of -- IsString and IsList, therefore, -- OverloadedStrings and OverloadedLists extensions can -- be used for convenience when specifying unicode strings literals using -- these types. -- --

Idioms

-- -- Some simple text processing operations can be represented simply as -- operations on Char streams. Follow the links for the following idioms: -- -- -- --

Pitfalls

-- -- -- --

Experimental APIs

-- -- Some experimental APIs to conveniently process text using the -- Array Char represenation directly can be found in -- Streamly.Internal.Unicode.Array. module Streamly.Unicode.Stream -- | Decode a stream of bytes to Unicode characters by mapping each byte to -- a corresponding Unicode Char in 0-255 range. decodeLatin1 :: Monad m => Stream m Word8 -> Stream m Char -- | Decode a UTF-8 encoded bytestream to a stream of Unicode characters. -- Any invalid codepoint encountered is replaced with the unicode -- replacement character. decodeUtf8 :: Monad m => Stream m Word8 -> Stream m Char -- | Decode a UTF-8 encoded bytestream to a stream of Unicode characters. -- The function throws an error if an invalid codepoint is encountered. decodeUtf8' :: Monad m => Stream m Word8 -> Stream m Char -- | Like decodeUtf8 but for a chunked stream. It may be slightly -- faster than flattening the stream and then decoding with -- decodeUtf8. decodeUtf8Chunks :: MonadIO m => Stream m (Array Word8) -> Stream m Char -- | Like encodeLatin1' but silently maps input codepoints beyond -- 255 to arbitrary Latin1 chars in 0-255 range. No error or exception is -- thrown when such mapping occurs. encodeLatin1 :: Monad m => Stream m Char -> Stream m Word8 -- | Encode a stream of Unicode characters to bytes by mapping each -- character to a byte in 0-255 range. Throws an error if the input -- stream contains characters beyond 255. encodeLatin1' :: Monad m => Stream m Char -> Stream m Word8 -- | Encode a stream of Unicode characters to a UTF-8 encoded bytestream. -- Any Invalid characters (U+D800-U+D8FF) in the input stream are -- replaced by the Unicode replacement character U+FFFD. encodeUtf8 :: Monad m => Stream m Char -> Stream m Word8 -- | Encode a stream of Unicode characters to a UTF-8 encoded bytestream. -- When any invalid character (U+D800-U+D8FF) is encountered in the input -- stream the function errors out. encodeUtf8' :: Monad m => Stream m Char -> Stream m Word8 -- | Encode a stream of String using the supplied encoding scheme. -- Each string is encoded as an Array Word8. encodeStrings :: MonadIO m => (Stream m Char -> Stream m Word8) -> Stream m String -> Stream m (Array Word8) -- | Convenient template Haskell quasiquoters to format strings. -- -- The str quasiquoter retains newlines in the string when the -- line is split across multiple lines. The unwords . lines -- idiom can be used on the resulting string to collapse it into a single -- line. module Streamly.Unicode.String -- | A QuasiQuoter that treats the input as a string literal: -- --
--   >>> [str|x|]
--   "x"
--   
-- -- Any #{symbol} is replaced by the value of the Haskell symbol -- symbol which is in scope: -- --
--   >>> x = "hello"
--   
--   >>> [str|#{x} world!|]
--   "hello world!"
--   
-- -- ## means a literal # without the special meaning for -- referencing haskell symbols: -- --
--   >>> [str|##{x} world!|]
--   "#{x} world!"
--   
-- -- A # at the end of line means the line continues to the next -- line without introducing a newline character: -- --
--   >>> :{
--   [str|hello#
--   world!|]
--   :}
--   "hello world!"
--   
-- -- Bugs: because of a bug in parsers, a lone # at the end of input gets -- removed. str :: QuasiQuoter