broadcast-chan-0.2.1.1/0000755000000000000000000000000007346545000012713 5ustar0000000000000000broadcast-chan-0.2.1.1/BroadcastChan.hs0000644000000000000000000001412007346545000015741 0ustar0000000000000000{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE Safe #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TupleSections #-} ------------------------------------------------------------------------------- -- | -- Module : BroadcastChan -- Copyright : (C) 2014-2018 Merijn Verstraaten -- License : BSD-style (see the file LICENSE) -- Maintainer : Merijn Verstraaten -- Stability : experimental -- Portability : haha -- -- A closable, fair, single-wakeup channel that avoids the 0 reader space leak -- that @"Control.Concurrent.Chan"@ from base suffers from. -- -- The @Chan@ type from @"Control.Concurrent.Chan"@ consists of both a read -- and write end combined into a single value. This means there is always at -- least 1 read end for a @Chan@, which keeps any values written to it alive. -- This is a problem for applications/libraries that want to have a channel -- that can have zero listeners. -- -- Suppose we have an library that produces events and we want to let users -- register to receive events. If we use a channel and write all events to it, -- we would like to drop and garbage collect any events that take place when -- there are 0 listeners. The always present read end of @Chan@ from base -- makes this impossible. We end up with a @Chan@ that forever accumulates -- more and more events that will never get removed, resulting in a memory -- leak. -- -- @"BroadcastChan"@ splits channels into separate read and write ends. Any -- message written to a a channel with no existing read end is immediately -- dropped so it can be garbage collected. Once a read end is created, all -- messages written to the channel will be accessible to that read end. -- -- Once all read ends for a channel have disappeared and been garbage -- collected, the channel will return to dropping messages as soon as they are -- written. -- -- __Why should I use "BroadcastChan" over "Control.Concurrent.Chan"?__ -- -- * @"BroadcastChan"@ is closable, -- -- * @"BroadcastChan"@ has no 0 reader space leak, -- -- * @"BroadcastChan"@ has comparable or better performance. -- -- __Why should I use "BroadcastChan" over various (closable) STM channels?__ -- -- * @"BroadcastChan"@ is single-wakeup, -- -- * @"BroadcastChan"@ is fair, -- -- * @"BroadcastChan"@ performs better under contention. ------------------------------------------------------------------------------- module BroadcastChan ( -- * Datatypes BroadcastChan , Direction(..) , In , Out -- * Construction , newBroadcastChan , newBChanListener -- * Basic Operations , readBChan , writeBChan , closeBChan , isClosedBChan , getBChanContents -- * Parallel processing , Action(..) , Handler(..) , parMapM_ , parFoldMap , parFoldMapM -- * Foldl combinators -- | Combinators for use with Tekmo's @foldl@ package. , foldBChan , foldBChanM ) where import Control.Exception (SomeException(..), mask, throwIO, try, uninterruptibleMask_) import Control.Monad (liftM) import Control.Monad.IO.Unlift (MonadUnliftIO, UnliftIO(..), askUnliftIO, withRunInIO) import Data.Foldable as F (Foldable(..), foldlM, forM_) import BroadcastChan.Extra import BroadcastChan.Internal bracketOnError :: MonadUnliftIO m => IO a -> (a -> IO b) -> m c -> m c bracketOnError before after thing = withRunInIO $ \run -> mask $ \restore -> do x <- before res1 <- try . restore . run $ thing case res1 of Left (SomeException exc) -> do _ :: Either SomeException b <- try . uninterruptibleMask_ $ after x throwIO exc Right y -> return y -- | Map a monadic function over a 'Foldable', processing elements in parallel. -- -- This function does __NOT__ guarantee that elements are processed in a -- deterministic order! parMapM_ :: (F.Foldable f, MonadUnliftIO m) => Handler m a -- ^ Exception handler -> Int -- ^ Number of parallel threads to use -> (a -> m ()) -- ^ Function to run in parallel -> f a -- ^ The 'Foldable' to process in parallel -> m () parMapM_ hndl threads workFun input = do UnliftIO runInIO <- askUnliftIO Bracket{allocate,cleanup,action} <- runParallel_ (mapHandler runInIO hndl) threads (runInIO . workFun) (forM_ input) bracketOnError allocate cleanup action -- | Like 'parMapM_', but folds the individual results into single result -- value. -- -- This function does __NOT__ guarantee that elements are processed in a -- deterministic order! parFoldMap :: (F.Foldable f, MonadUnliftIO m) => Handler m a -- ^ Exception handler -> Int -- ^ Number of parallel threads to use -> (a -> m b) -- ^ Function to run in parallel -> (r -> b -> r) -- ^ Function to fold results with -> r -- ^ Zero element for the fold -> f a -- ^ The 'Foldable' to process -> m r parFoldMap hndl threads work f = parFoldMapM hndl threads work (\x y -> return (f x y)) -- | Like 'parFoldMap', but uses a monadic fold function. -- -- This function does __NOT__ guarantee that elements are processed in a -- deterministic order! parFoldMapM :: forall a b f m r . (F.Foldable f, MonadUnliftIO m) => Handler m a -- ^ Exception handler -> Int -- ^ Number of parallel threads to use -> (a -> m b) -- ^ Function to run in parallel -> (r -> b -> m r) -- ^ Monadic function to fold results with -> r -- ^ Zero element for the fold -> f a -- ^ The 'Foldable' to process -> m r parFoldMapM hndl threads workFun f z input = do UnliftIO runInIO <- askUnliftIO Bracket{allocate,cleanup,action} <- runParallel (Right f) (mapHandler runInIO hndl) threads (runInIO . workFun) body bracketOnError allocate cleanup action where body :: (a -> m ()) -> (a -> m (Maybe b)) -> m r body send sendRecv = snd `liftM` foldlM wrappedFoldFun (0, z) input where wrappedFoldFun :: (Int, r) -> a -> m (Int, r) wrappedFoldFun (i, x) a | i == threads = liftM (i,) $ sendRecv a >>= maybe (return x) (f x) | otherwise = const (i+1, x) `liftM` send a broadcast-chan-0.2.1.1/BroadcastChan/0000755000000000000000000000000007346545000015407 5ustar0000000000000000broadcast-chan-0.2.1.1/BroadcastChan/Extra.hs0000644000000000000000000003407707346545000017041 0ustar0000000000000000{-# OPTIONS_HADDOCK not-home #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE Safe #-} ------------------------------------------------------------------------------- -- | -- Module : BroadcastChan.Extra -- Copyright : (C) 2014-2018 Merijn Verstraaten -- License : BSD-style (see the file LICENSE) -- Maintainer : Merijn Verstraaten -- Stability : experimental -- Portability : haha -- -- Functions in this module are *NOT* intended to be used by regular users of -- the library. Rather, they are intended for implementing parallel processing -- libraries on top of @broadcast-chan@, such as @broadcast-chan-conduit@. -- -- This module, while not for end users, is considered part of the public API, -- so users can rely on PVP bounds to avoid breakage due to changes to this -- module. ------------------------------------------------------------------------------- module BroadcastChan.Extra ( Action(..) , BracketOnError(..) , Handler(..) , ThreadBracket(..) , mapHandler , runParallel , runParallelWith , runParallel_ , runParallelWith_ ) where import Control.Concurrent (ThreadId, forkFinally, mkWeakThreadId, myThreadId) import Control.Concurrent.MVar import Control.Concurrent.QSem import Control.Concurrent.QSemN import Control.Exception (Exception(..), SomeException(..), bracketOnError) import qualified Control.Exception as Exc import Control.Monad ((>=>), replicateM, void) import Control.Monad.Trans.Cont (ContT(..)) import Control.Monad.IO.Unlift (MonadIO(..)) import Data.Typeable (Typeable) import System.Mem.Weak (Weak, deRefWeak) import BroadcastChan.Internal evalContT :: Monad m => ContT r m r -> m r evalContT m = runContT m return -- DANGER! Breaks the invariant that you can't write to closed channels! -- Only meant to be used in 'parallelCore'! unsafeWriteBChan :: MonadIO m => BroadcastChan In a -> a -> m () unsafeWriteBChan (BChan writeVar) val = liftIO $ do new_hole <- newEmptyMVar Exc.mask_ $ do old_hole <- takeMVar writeVar -- old_hole is only full if the channel was previously closed item <- tryTakeMVar old_hole case item of Nothing -> return () Just Closed -> putMVar new_hole Closed Just _ -> error "unsafeWriteBChan hit an impossible condition!" putMVar old_hole (ChItem val new_hole) putMVar writeVar new_hole {-# INLINE unsafeWriteBChan #-} data Shutdown = Shutdown deriving (Show, Typeable) instance Exception Shutdown -- | Action to take when an exception occurs while processing an element. data Action = Drop -- ^ Drop the current element and continue processing. | Retry -- ^ Retry by appending the current element to the queue of remaining -- elements. | Terminate -- ^ Stop all processing and reraise the exception. deriving (Eq, Show) -- | Exception handler for parallel processing. data Handler m a = Simple Action -- ^ Always take the specified 'Action'. | Handle (a -> SomeException -> m Action) -- ^ Allow inspection of the element, exception, and execution of monadic -- actions before deciding the 'Action' to take. -- | Allocation, cleanup, and work actions for parallel processing. These -- should be passed to an appropriate @bracketOnError@ function. data BracketOnError m r = Bracket { allocate :: IO [Weak ThreadId] -- ^ Allocation action that spawn threads and sets up handlers. , cleanup :: [Weak ThreadId] -> IO () -- ^ Cleanup action that handles exceptional termination , action :: m r -- ^ Action that performs actual processing and waits for processing to -- finish and threads to terminate. } -- | Datatype for specifying additional setup/cleanup around forking threads. -- Used by 'runParallelWith' and 'runParallelWith_' to fix resource management -- in @broadcast-chan-conduit@. -- -- If the allocation action can fail/abort with an exception it __MUST__ take -- care not to leak resources in these cases. In other words, IFF 'setupFork' -- succeeds then this library will ensure the corresponding cleanup runs. -- -- @since 0.2.1 data ThreadBracket = ThreadBracket { setupFork :: IO () -- ^ Setup action to run before spawning a new thread. , cleanupFork :: IO () -- ^ Normal cleanup action upon thread termination. , cleanupForkError :: IO () -- ^ Exceptional cleanup action in case thread terminates due to an -- exception. } noopBracket :: ThreadBracket noopBracket = ThreadBracket { setupFork = return () , cleanupFork = return () , cleanupForkError = return () } -- | Convenience function for changing the monad the exception handler runs in. mapHandler :: (m Action -> n Action) -> Handler m a -> Handler n a mapHandler _ (Simple act) = Simple act mapHandler mmorph (Handle f) = Handle $ \a exc -> mmorph (f a exc) -- Workhorse function for runParallel_ and runParallel. Spawns threads, sets up -- error handling, thread termination, etc. parallelCore :: forall a m . MonadIO m => Handler IO a -> Int -> IO () -> ThreadBracket -> (a -> IO ()) -> m (IO [Weak ThreadId], [Weak ThreadId] -> IO (), a -> IO (), m ()) parallelCore hndl threads onDrop threadBracket f = liftIO $ do originTid <- myThreadId inChanIn <- newBroadcastChan inChanOut <- newBChanListener inChanIn shutdownSem <- newQSemN 0 endSem <- newQSemN 0 let bufferValue :: a -> IO () bufferValue = void . writeBChan inChanIn simpleHandler :: a -> SomeException -> Action -> IO () simpleHandler val exc act = case act of Drop -> onDrop Retry -> unsafeWriteBChan inChanIn val Terminate -> Exc.throwIO exc handler :: a -> SomeException -> IO () handler _ exc | Just Shutdown <- fromException exc = Exc.throwIO exc handler val exc = case hndl of Simple a -> simpleHandler val exc a Handle h -> h val exc >>= simpleHandler val exc processInput :: IO () processInput = do x <- readBChan inChanOut case x of Nothing -> signalQSemN endSem 1 Just a -> do f a `Exc.catch` handler a processInput unsafeAllocThread :: IO (Weak ThreadId) unsafeAllocThread = do setupFork tid <- forkFinally processInput $ \exit -> do signalQSemN shutdownSem 1 case exit of Left exc | Just Shutdown <- fromException exc -> cleanupForkError | otherwise -> Exc.throwTo originTid exc `Exc.catch` shutdownHandler Right () -> cleanupFork mkWeakThreadId tid where shutdownHandler Shutdown = return () allocThread :: ContT r IO (Weak ThreadId) allocThread = ContT $ bracketOnError unsafeAllocThread killWeakThread allocateThreads :: IO [Weak ThreadId] allocateThreads = evalContT $ replicateM threads allocThread cleanup :: [Weak ThreadId] -> IO () cleanup threadIds = liftIO . Exc.uninterruptibleMask_ $ do mapM_ killWeakThread threadIds waitQSemN shutdownSem threads wait :: m () wait = do closeBChan inChanIn liftIO $ waitQSemN endSem threads return (allocateThreads, cleanup, bufferValue, wait) where ThreadBracket{setupFork,cleanupFork,cleanupForkError} = threadBracket killWeakThread :: Weak ThreadId -> IO () killWeakThread wTid = do tid <- deRefWeak wTid case tid of Nothing -> return () Just t -> Exc.throwTo t Shutdown -- | Sets up parallel processing. -- -- The workhorses of this function are the output yielder and \"stream\" -- processing functions. -- -- The output yielder is responsible for handling the produced @b@ values, -- which if can either yield downstream ('Left') when used with something like -- @conduit@ or @pipes@, or fold into a single results ('Right') when used to -- run IO in parallel. -- -- The stream processing function gets two arguments: -- -- [@a -> m ()@] Should be used to buffer a number of elements equal to the -- number of threads. -- -- [@a -> m b@] Which should be used to process the remainder of the -- element stream via, for example, 'Data.Conduit.mapM'. -- -- See "BroadcastChan" or @broadcast-chan-conduit@ for examples. -- -- The returned 'BracketOnError' has a 'allocate' action that takes care of -- setting up 'Control.Concurrent.forkIO' threads and exception handlers. The -- 'cleanup' action ensures all threads are terminate in case of an exception. -- Finally, 'action' performs the actual parallel processing of elements. runParallel :: forall a b m n r . (MonadIO m, MonadIO n) => Either (b -> n r) (r -> b -> n r) -- ^ Output yielder -> Handler IO a -- ^ Parallel processing exception handler -> Int -- ^ Number of threads to use -> (a -> IO b) -- ^ Function to run in parallel -> ((a -> m ()) -> (a -> m (Maybe b)) -> n r) -- ^ \"Stream\" processing function -> n (BracketOnError n r) runParallel = runParallelWith noopBracket -- | Like 'runParallel', but accepts a setup and cleanup action that will be -- run before spawning a new thread and upon thread exit respectively. -- -- The main use case is to properly manage the resource reference counts of -- 'Control.Monad.Trans.Resource.ResourceT'. -- -- If the setup throws an 'IO' exception or otherwise aborts, it __MUST__ -- ensure any allocated resource are freed. If it completes without an -- exception, the cleanup is guaranteed to run (assuming proper use of -- bracketing with the returned 'BracketOnError'). -- -- @since 0.2.1 runParallelWith :: forall a b m n r . (MonadIO m, MonadIO n) => ThreadBracket -- ^ Bracketing action used to manage resources across thread spawns -> Either (b -> n r) (r -> b -> n r) -- ^ Output yielder -> Handler IO a -- ^ Parallel processing exception handler -> Int -- ^ Number of threads to use -> (a -> IO b) -- ^ Function to run in parallel -> ((a -> m ()) -> (a -> m (Maybe b)) -> n r) -- ^ \"Stream\" processing function -> n (BracketOnError n r) runParallelWith threadBracket yielder hndl threads work pipe = do outChanIn <- newBroadcastChan outChanOut <- newBChanListener outChanIn let process :: MonadIO f => a -> f () process = liftIO . (work >=> void . writeBChan outChanIn . Just) notifyDrop :: IO () notifyDrop = void $ writeBChan outChanIn Nothing (allocate, cleanup, bufferValue, wait) <- parallelCore hndl threads notifyDrop threadBracket process let queueAndYield :: a -> m (Maybe b) queueAndYield x = do ~(Just v) <- liftIO $ readBChan outChanOut <* bufferValue x return v finish :: r -> n r finish r = do next <- readBChan outChanOut case next of Nothing -> return r Just Nothing -> finish r Just (Just v) -> foldFun r v >>= finish action :: n r action = do result <- pipe (liftIO . bufferValue) queueAndYield wait closeBChan outChanIn finish result return Bracket{allocate,cleanup,action} where foldFun = case yielder of Left g -> const g Right g -> g -- | Sets up parallel processing for functions where we ignore the result. -- -- The stream processing argument is the workhorse of this function. It gets a -- (rate-limited) function @a -> m ()@ that queues @a@ values for processing. -- This function should be applied to all @a@ elements that should be -- processed. This would be either a partially applied 'Control.Monad.forM_' -- for parallel processing, or something like conduit's 'Data.Conduit.mapM_' to -- construct a \"sink\" for @a@ values. See "BroadcastChan" or -- @broadcast-chan-conduit@ for examples. -- -- The returned 'BracketOnError' has a 'allocate' action that takes care of -- setting up 'Control.Concurrent.forkIO' threads and exception handlers. Th -- 'cleanup' action ensures all threads are terminate in case of an exception. -- Finally, 'action' performs the actual parallel processing of elements. runParallel_ :: (MonadIO m, MonadIO n) => Handler IO a -- ^ Parallel processing exception handler -> Int -- ^ Number of threads to use -> (a -> IO ()) -- ^ Function to run in parallel -> ((a -> m ()) -> n r) -- ^ \"Stream\" processing function -> n (BracketOnError n r) runParallel_ = runParallelWith_ noopBracket -- | Like 'runParallel_', but accepts a setup and cleanup action that will be -- run before spawning a new thread and upon thread exit respectively. -- -- The main use case is to properly manage the resource reference counts of -- 'Control.Monad.Trans.Resource.ResourceT'. -- -- If the setup throws an 'IO' exception or otherwise aborts, it __MUST__ -- ensure any allocated resource are freed. If it completes without an -- exception, the cleanup is guaranteed to run (assuming proper use of -- bracketing with the returned 'BracketOnError'). -- -- @since 0.2.1 runParallelWith_ :: (MonadIO m, MonadIO n) => ThreadBracket -- ^ Bracketing action used to manage resources across thread spawns -> Handler IO a -- ^ Parallel processing exception handler -> Int -- ^ Number of threads to use -> (a -> IO ()) -- ^ Function to run in parallel -> ((a -> m ()) -> n r) -- ^ \"Stream\" processing function -> n (BracketOnError n r) runParallelWith_ threadBracket hndl threads workFun processElems = do sem <- liftIO $ newQSem threads let process x = signalQSem sem >> workFun x (allocate, cleanup, bufferValue, wait) <- parallelCore hndl threads (return ()) threadBracket process let action = do result <- processElems $ \v -> liftIO $ do waitQSem sem bufferValue v wait return result return Bracket{allocate,cleanup,action} broadcast-chan-0.2.1.1/BroadcastChan/Internal.hs0000644000000000000000000002060107346545000017516 0ustar0000000000000000{-# LANGUAGE DataKinds #-} {-# LANGUAGE KindSignatures #-} {-# LANGUAGE Trustworthy #-} module BroadcastChan.Internal where import Control.Concurrent.MVar import Control.Exception (mask_) import Control.Monad ((>=>)) import Control.Monad.IO.Unlift (MonadIO(..)) import System.IO.Unsafe (unsafeInterleaveIO) -- | Used with DataKinds as phantom type indicating whether a 'BroadcastChan' -- value is a read or write end. data Direction = In -- ^ Indicates a write 'BroadcastChan' | Out -- ^ Indicates a read 'BroadcastChan' -- | Alias for the 'In' type from the 'Direction' kind, allows users to write -- the @'BroadcastChan' 'In' a@ type without enabling @DataKinds@. type In = 'In -- | Alias for the 'Out' type from the 'Direction' kind, allows users to write -- the @'BroadcastChan' 'Out' a@ type without enabling @DataKinds@. type Out = 'Out -- | The abstract type representing the read or write end of a 'BroadcastChan'. newtype BroadcastChan (dir :: Direction) a = BChan (MVar (Stream a)) deriving (Eq) type Stream a = MVar (ChItem a) data ChItem a = ChItem a {-# UNPACK #-} !(Stream a) | Closed -- | Creates a new 'BroadcastChan' write end. newBroadcastChan :: MonadIO m => m (BroadcastChan In a) newBroadcastChan = liftIO $ do hole <- newEmptyMVar writeVar <- newMVar hole return (BChan writeVar) -- | Close a 'BroadcastChan', disallowing further writes. Returns 'True' if the -- 'BroadcastChan' was closed. Returns 'False' if the 'BroadcastChan' was -- __already__ closed. closeBChan :: MonadIO m => BroadcastChan In a -> m Bool closeBChan (BChan writeVar) = liftIO . mask_ $ do old_hole <- takeMVar writeVar -- old_hole is always empty unless the channel was already closed tryPutMVar old_hole Closed <* putMVar writeVar old_hole -- | Check whether a 'BroadcastChan' is closed. 'True' meaning that future -- read/write operations on the channel will always fail. -- -- ['BroadcastChan' 'In':]: -- -- @True@ indicates the channel is closed and writes will always fail. -- -- __Beware of TOC-TOU races__: It is possible for a 'BroadcastChan' to be -- closed by another thread. If multiple threads use the same channel -- a 'closeBChan' from another thread can result in the channel being -- closed right after 'isClosedBChan' returns. -- -- ['BroadcastChan' 'Out':]: -- -- @True@ indicates the channel is both closed and empty, meaning reads -- will always fail. isClosedBChan :: MonadIO m => BroadcastChan dir a -> m Bool isClosedBChan (BChan mvar) = liftIO $ do old_hole <- readMVar mvar val <- tryReadMVar old_hole case val of Just Closed -> return True _ -> return False -- | Write a value to write end of a 'BroadcastChan'. Any messages written -- while there are no live read ends are dropped on the floor and can be -- immediately garbage collected, thus avoiding space leaks. -- -- The return value indicates whether the write succeeded, i.e., 'True' if the -- message was written, 'False' is the channel is closed. -- See @BroadcastChan.Throw.@'BroadcastChan.Throw.writeBChan' for an -- exception throwing variant. writeBChan :: MonadIO m => BroadcastChan In a -> a -> m Bool writeBChan (BChan writeVar) val = liftIO $ do new_hole <- newEmptyMVar mask_ $ do old_hole <- takeMVar writeVar -- old_hole is only full if the channel was previously closed empty <- tryPutMVar old_hole (ChItem val new_hole) if empty then putMVar writeVar new_hole else putMVar writeVar old_hole return empty {-# INLINE writeBChan #-} -- | Read the next value from the read end of a 'BroadcastChan'. Returns -- 'Nothing' if the 'BroadcastChan' is closed and empty. -- See @BroadcastChan.Throw.@'BroadcastChan.Throw.readBChan' for an exception -- throwing variant. readBChan :: MonadIO m => BroadcastChan Out a -> m (Maybe a) readBChan (BChan readVar) = liftIO $ do modifyMVarMasked readVar $ \read_end -> do -- Note [modifyMVarMasked] -- Use readMVar here, not takeMVar, -- else newBChanListener doesn't work result <- readMVar read_end case result of ChItem val new_read_end -> return (new_read_end, Just val) Closed -> return (read_end, Nothing) {-# INLINE readBChan #-} -- Note [modifyMVarMasked] -- This prevents a theoretical deadlock if an asynchronous exception -- happens during the readMVar while the MVar is empty. In that case -- the read_end MVar will be left empty, and subsequent readers will -- deadlock. Using modifyMVarMasked prevents this. The deadlock can -- be reproduced, but only by expanding readMVar and inserting an -- artificial yield between its takeMVar and putMVar operations. -- | Create a new read end for a 'BroadcastChan'. -- -- ['BroadcastChan' 'In':]: -- -- Will receive all messages written to the channel __after__ this read -- end is created. -- -- ['BroadcastChan' 'Out':]: -- -- Will receive all currently unread messages and all future messages. newBChanListener :: MonadIO m => BroadcastChan dir a -> m (BroadcastChan Out a) newBChanListener (BChan mvar) = liftIO $ do hole <- readMVar mvar newReadVar <- newMVar hole return (BChan newReadVar) -- | Return a lazy list representing the messages written to the channel. -- -- Uses 'unsafeInterleaveIO' to defer the IO operations. -- -- ['BroadcastChan' 'In':]: -- -- The list contains every message written to the channel after this 'IO' -- action completes. -- -- ['BroadcastChan' 'Out':]: -- -- The list contains every currently unread message and all future -- messages. It's safe to keep using the original channel in any thread. -- -- Unlike 'Control.Concurrent.getChanContents' from "Control.Concurrent", -- the list resulting from this function is __not__ affected by reads on -- the input channel. Every message that is unread or written after the -- 'IO' action completes __will__ end up in the result list. getBChanContents :: BroadcastChan dir a -> IO [a] getBChanContents = newBChanListener >=> go where go ch = unsafeInterleaveIO $ do result <- readBChan ch case result of Nothing -> return [] Just x -> do xs <- go ch return (x:xs) -- | Strict fold of the 'BroadcastChan'​'s messages. Can be used with -- "Control.Foldl" from Tekmo's foldl package: -- -- @"Control.Foldl".'Control.Foldl.purely' 'foldBChan' :: ('MonadIO' m, 'MonadIO' n) => 'Control.Foldl.Fold' a b -> 'BroadcastChan' d a -> n (m b)@ -- -- The result of this function is a nested monadic value to give more -- fine-grained control/separation between the start of listening for messages -- and the start of processing. The inner action folds the actual messages and -- completes when the channel is closed and exhausted. The outer action -- controls from when on messages are received. Specifically: -- -- ['BroadcastChan' 'In':]: -- -- Will process all messages sent after the outer action completes. -- -- ['BroadcastChan' 'Out':]: -- -- Will process all messages that are unread when the outer action -- completes, as well as all future messages. -- -- After the outer action completes the fold is unaffected by other -- (concurrent) reads performed on the original channel. So it's safe to -- reuse the channel. foldBChan :: (MonadIO m, MonadIO n) => (x -> a -> x) -> x -> (x -> b) -> BroadcastChan d a -> n (m b) foldBChan step begin done chan = do listen <- newBChanListener chan return $ go listen begin where go listen x = do x' <- readBChan listen case x' of Just x'' -> go listen $! step x x'' Nothing -> return $! done x {-# INLINABLE foldBChan #-} -- | Strict, monadic fold of the 'BroadcastChan'​'s messages. Can be used with -- "Control.Foldl" from Tekmo's foldl package: -- -- @"Control.Foldl".'Control.Foldl.impurely' 'foldBChanM' :: ('MonadIO' m, 'MonadIO' n) => 'Control.Foldl.FoldM' m a b -> 'BroadcastChan' d a -> n (m b)@ -- -- Has the same behaviour and guarantees as 'foldBChan'. foldBChanM :: (MonadIO m, MonadIO n) => (x -> a -> m x) -> m x -> (x -> m b) -> BroadcastChan d a -> n (m b) foldBChanM step begin done chan = do listen <- newBChanListener chan return $ do x0 <- begin go listen x0 where go listen x = do x' <- readBChan listen case x' of Just x'' -> step x x'' >>= go listen Nothing -> done x {-# INLINABLE foldBChanM #-} broadcast-chan-0.2.1.1/BroadcastChan/Prelude.hs0000644000000000000000000000220507346545000017342 0ustar0000000000000000{-# LANGUAGE Safe #-} ------------------------------------------------------------------------------- -- | -- Module : BroadcastChan.Prelude -- Copyright : (C) 2014-2018 Merijn Verstraaten -- License : BSD-style (see the file LICENSE) -- Maintainer : Merijn Verstraaten -- Stability : experimental -- Portability : haha -- -- This module contains convenience functions that clash with names in -- "Prelude" and is intended to be imported qualified. ------------------------------------------------------------------------------- module BroadcastChan.Prelude ( forM_ , mapM_ ) where import Prelude hiding (mapM_) import Control.Monad.IO.Unlift (MonadIO(..)) import BroadcastChan -- | 'mapM_' with it's arguments flipped. forM_ :: MonadIO m => BroadcastChan Out a -> (a -> m b) -> m () forM_ = flip mapM_ -- | Map a monadic function over the elements of a 'BroadcastChan', ignoring -- the results. mapM_ :: MonadIO m => (a -> m b) -> BroadcastChan Out a -> m () mapM_ f ch = do result <- liftIO $ readBChan ch case result of Nothing -> return () Just x -> f x >> mapM_ f ch broadcast-chan-0.2.1.1/BroadcastChan/Throw.hs0000644000000000000000000000467507346545000017062 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE Safe #-} ------------------------------------------------------------------------------- -- | -- Module : BroadcastChan.Throw -- Copyright : (C) 2014-2018 Merijn Verstraaten -- License : BSD-style (see the file LICENSE) -- Maintainer : Merijn Verstraaten -- Stability : experimental -- Portability : haha -- -- This module is identical to "BroadcastChan", but with -- @BroadcastChan.@'BroadcastChan.writeBChan' and -- @BroadcastChan.@'BroadcastChan.readBChan' replaced with versions that throw -- an exception, rather than returning results that the user has to inspect to -- check for success. ------------------------------------------------------------------------------- module BroadcastChan.Throw ( BChanError(..) , readBChan , writeBChan -- * Re-exports from "BroadcastChan" -- ** Datatypes , BroadcastChan , Direction(..) , In , Out -- ** Construction , newBroadcastChan , newBChanListener -- ** Basic Operations , closeBChan , isClosedBChan , getBChanContents -- ** Parallel processing , Action(..) , Handler(..) , parMapM_ , parFoldMap , parFoldMapM -- ** Foldl combinators -- | Combinators for use with Tekmo's @foldl@ package. , foldBChan , foldBChanM ) where import Control.Monad (when) import Control.Exception (Exception, throwIO) import Data.Typeable (Typeable) import BroadcastChan hiding (writeBChan, readBChan) import qualified BroadcastChan as Internal -- | Exception type for 'BroadcastChan' operations. data BChanError = WriteFailed -- ^ Attempted to write to closed 'BroadcastChan' | ReadFailed -- ^ Attempted to read from an empty closed 'BroadcastChan' deriving (Eq, Read, Show, Typeable) instance Exception BChanError -- | Like 'Internal.readBChan', but throws a 'ReadFailed' exception when -- reading from a closed and empty 'BroadcastChan'. readBChan :: BroadcastChan Out a -> IO a readBChan ch = do result <- Internal.readBChan ch case result of Nothing -> throwIO ReadFailed Just x -> return x {-# INLINE readBChan #-} -- | Like 'Internal.writeBChan', but throws a 'WriteFailed' exception when -- writing to closed 'BroadcastChan'. writeBChan :: BroadcastChan In a -> a -> IO () writeBChan ch val = do success <- Internal.writeBChan ch val when (not success) $ throwIO WriteFailed {-# INLINE writeBChan #-} broadcast-chan-0.2.1.1/CHANGELOG.md0000755000000000000000000000142407346545000014530 0ustar00000000000000000.2.1.1 [2020.03.05] -------------------- * Updated imports to support `unliftio-core` 0.2.x 0.2.1 [2019.11.17] ------------------ * Adds `ThreadBracket`, `runParallelWith`, and `runParallelWith_` to `BroadcastChan.Extra` to support thread related resource management. This is required to fix `broadcast-chan-conduit`'s use of `MonadResource`. 0.2.0.2 [2019.03.30] -------------------- * GHC 8.6/MonadFail compatibility fix 0.2.0.1 [2018.09.24] -------------------- * Loosen STM bounds for new stackage release. * Ditch GHC 7.6.3 support. 0.2.0 [2018.09.20] ------------------ * Complete rework to be actually practical. * Switched to standalone module hierarchy. * Added functionality for parallel tasks. * Add module which uses exceptions, instead of results to signal failure. broadcast-chan-0.2.1.1/LICENSE0000644000000000000000000000300307346545000013714 0ustar0000000000000000Copyright (c) 2013-2017, Merijn Verstraaten All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Merijn Verstraaten nor the names of other contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. broadcast-chan-0.2.1.1/README.md0000755000000000000000000000466407346545000014207 0ustar0000000000000000BroadcastChan: Closable, fair, single-wakeup, broadcast channels ================================================================ [![BSD3](https://img.shields.io/badge/License-BSD-blue.svg)](https://en.wikipedia.org/wiki/BSD_License) [![Hackage](https://img.shields.io/hackage/v/broadcast-chan.svg)](https://hackage.haskell.org/package/broadcast-chan) [![hackage-ci](https://matrix.hackage.haskell.org/api/v2/packages/broadcast-chan/badge)](https://matrix.hackage.haskell.org/#/package/broadcast-chan) [![Stackage](https://www.stackage.org/package/broadcast-chan/badge/lts?label=Stackage)](https://www.stackage.org/package/broadcast-chan) [![Build Status](https://travis-ci.org/merijn/broadcast-chan.svg)](https://travis-ci.org/merijn/broadcast-chan) A closable, fair, single-wakeup channel that avoids the 0 reader space leak that `Control.Concurrent.Chan` from base suffers from. The `Chan` type from `Control.Concurrent.Chan` consists of both a read and write end combined into a single value. This means there is always at least 1 read end for a `Chan`, which keeps any values written to it alive. This is a problem for applications/libraries that want to have a channel that can have zero listeners. Suppose we have an library that produces events and we want to let users register to receive events. If we use a channel and write all events to it, we would like to drop and garbage collect any events that take place when there are 0 listeners. The always present read end of `Chan` from base makes this impossible. We end up with a `Chan` that forever accumulates more and more events that will never get removed, resulting in a memory leak. `BroadcastChan` splits channels into separate read and write ends. Any message written to a a channel with no existing read end is immediately dropped so it can be garbage collected. Once a read end is created, all messages written to the channel will be accessible to that read end. Once all read ends for a channel have disappeared and been garbage collected, the channel will return to dropping messages as soon as they are written. Why should I use `BroadcastChan` over `Control.Concurrent.Chan`? --- * `BroadcastChan` is closable, * `BroadcastChan` has no 0 reader space leak, * `BroadcastChan` has comparable or better performance. Why should I use `BroadcastChan` over various (closable) STM channels? --- * `BroadcastChan` is single-wakeup, * `BroadcastChan` is fair, * `BroadcastChan` performs better under contention. broadcast-chan-0.2.1.1/Setup.hs0000644000000000000000000000005607346545000014350 0ustar0000000000000000import Distribution.Simple main = defaultMain broadcast-chan-0.2.1.1/benchmarks/0000755000000000000000000000000007346545000015030 5ustar0000000000000000broadcast-chan-0.2.1.1/benchmarks/Channels.hs0000644000000000000000000001751407346545000017127 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE RecordWildCards #-} import Criterion.Main import Control.Applicative ((<$>)) import Control.Concurrent (setNumCapabilities) import Control.Concurrent.Async import BroadcastChan import qualified BroadcastChan.Throw as Throw import Control.Concurrent.Chan import Control.Concurrent.MVar import Control.Concurrent.STM import Control.DeepSeq (NFData(..)) import Control.Monad (forM, guard, replicateM, void) import qualified Control.Monad as Monad import Data.Bifunctor (second) import Data.Int (Int64) import GHC.Conc (getNumProcessors) import GHC.Generics (Generic) instance NFData (BroadcastChan io a) where rnf !_ = () instance NFData (IO a) where rnf !_ = () replicateM_ :: Monad m => Int64 -> m a -> m () replicateM_ = Monad.replicateM_ . fromIntegral splitEqual :: Integral a => a -> a -> [a] splitEqual _ 0 = [] splitEqual total n = replicate rest (base + 1) ++ replicate (fromIntegral n - rest) base where (base, rest) = second fromIntegral $ total `quotRem` n data Config = Config { writers :: Int , readers :: Int , numMsgs :: Int64 , broadcast :: Bool } data ChanOps = ChanOps { putChan :: !(IO ()) , takeChan :: !(IO ()) , dupTake :: !(IO (IO ())) } deriving (Generic) instance NFData ChanOps data ChanType = Chan { chanName :: String , canBroadcast :: Bool , allocChan :: Int64 -> Int64 -> IO ChanOps } benchBChan :: ChanType benchBChan = Chan "BroadcastChan" True $ \_size numMsgs -> do chan <- newBroadcastChan listener <- newBChanListener chan replicateM_ numMsgs $ writeBChan chan () return ChanOps { putChan = void $ writeBChan chan () , takeChan = void $ readBChan listener , dupTake = void . readBChan <$> newBChanListener chan } {-# INLINE benchBChan #-} benchBChanExcept :: ChanType benchBChanExcept = Chan "BroadcastChan.Throw" True $ \_size numMsgs -> do chan <- newBroadcastChan listener <- newBChanListener chan replicateM_ numMsgs $ writeBChan chan () return ChanOps { putChan = Throw.writeBChan chan () , takeChan = Throw.readBChan listener , dupTake = Throw.readBChan <$> newBChanListener chan } {-# INLINE benchBChanExcept #-} benchBChanDrop :: ChanType benchBChanDrop = Chan "BroadcastChan (drop)" False $ \_ _ -> do chan <- newBroadcastChan return ChanOps { putChan = void $ writeBChan chan () , takeChan = fail "Dropping BroadcastChan doesn't support reading." , dupTake = fail "Dropping BroadcastChan doesn't support broadcasting." } {-# INLINE benchBChanDrop #-} benchBChanDropExcept :: ChanType benchBChanDropExcept = Chan "BroadcastChan.Throw (drop)" False $ \_ _ -> do chan <- newBroadcastChan return ChanOps { putChan = Throw.writeBChan chan () , takeChan = fail "Dropping BroadcastChan doesn't support reading." , dupTake = fail "Dropping BroadcastChan doesn't support broadcasting." } {-# INLINE benchBChanDropExcept #-} benchChan :: ChanType benchChan = Chan "Chan" True $ \_size numMsgs -> do chan <- newChan replicateM_ numMsgs $ writeChan chan () return ChanOps { putChan = writeChan chan () , takeChan = readChan chan , dupTake = readChan <$> dupChan chan } {-# INLINE benchChan #-} benchTChan :: ChanType benchTChan = Chan "TChan" True $ \_size numMsgs -> do chan <- newTChanIO replicateM_ numMsgs . atomically $ writeTChan chan () return ChanOps { putChan = atomically $ writeTChan chan () , takeChan = atomically $ readTChan chan , dupTake = atomically . readTChan <$> atomically (dupTChan chan) } {-# INLINE benchTChan #-} benchTQueue :: ChanType benchTQueue = Chan "TQueue" False $ \_size numMsgs -> do chan <- newTQueueIO replicateM_ numMsgs . atomically $ writeTQueue chan () return ChanOps { putChan = atomically $ writeTQueue chan () , takeChan = atomically $ readTQueue chan , dupTake = return (fail "TQueue doesn't support broadcasting") } {-# INLINE benchTQueue #-} benchTBQueue :: ChanType benchTBQueue = Chan "TBQueue" False $ \size numMsgs -> do chan <- newTBQueueIO (fromIntegral size) replicateM_ numMsgs . atomically $ writeTBQueue chan () return ChanOps { putChan = atomically $ writeTBQueue chan () , takeChan = atomically $ readTBQueue chan , dupTake = return (fail "TBQueue doesn't support broadcasting") } {-# INLINE benchTBQueue #-} benchWrites :: ChanType -> Benchmark benchWrites Chan{..} = bench chanName $ perBatchEnv (\i -> allocChan i 0) putChan benchReads :: ChanType -> Benchmark benchReads Chan{..} = bench chanName $ perBatchEnv (\i -> allocChan i i) takeChan benchConcurrent :: Config -> ChanType -> Benchmark benchConcurrent Config{..} Chan{..} = if broadcast && not canBroadcast then bgroup "" [] else bench chanName $ perRunEnv setupConcurrent id where splitMsgs :: Integral a => a -> [Int64] splitMsgs = splitEqual numMsgs . fromIntegral preloadedMsgs :: Int64 preloadedMsgs | writers == 0 = numMsgs | otherwise = 0 launchReaders :: ChanOps -> IO [Async ()] launchReaders ChanOps{..} | broadcast = replicateM readers $ do doTake <- dupTake async $ replicateM_ numMsgs doTake | otherwise = forM (splitMsgs readers) $ async . \n -> do replicateM_ n takeChan setupConcurrent :: IO (IO ()) setupConcurrent = do start <- newEmptyMVar chan@ChanOps{..} <- allocChan numMsgs preloadedMsgs wThreads <- forM (splitMsgs writers) $ async . \n -> do readMVar start replicateM_ n putChan rThreads <- launchReaders chan return $ putMVar start () >> mapM_ wait (wThreads ++ rThreads) {-# INLINE benchConcurrent #-} runConcurrent :: String -> [Int] -> [Int] -> [Int64] -> Bool -> [ChanType] -> Benchmark runConcurrent typeName writerCounts readerCounts msgs broadcast chans = bgroup typeName $ map makeBenchGroup threads where threads = do ws <- writerCounts rs <- readerCounts guard $ (ws, rs) `notElem` [(0,0),(0,1),(1,0)] return (ws, rs) makeBenchGroup :: (Int, Int) -> Benchmark makeBenchGroup (writers, readers) = bgroup groupName $ map mkBench msgs where groupName :: String groupName | writers == 0 = show readers ++ " readers" | readers == 0 = show writers ++ " writers" | otherwise = show writers ++ " to " ++ show readers mkBench :: Int64 -> Benchmark mkBench numMsgs = bgroup name $ map (benchConcurrent Config{..}) chans where name = show numMsgs ++ " messages" chanTypes :: [ChanType] chanTypes = [ benchBChan , benchBChanExcept , benchChan , benchTChan , benchTQueue , benchTBQueue ] writeChanTypes :: [ChanType] writeChanTypes = [ benchBChanDrop, benchBChanDropExcept ] ++ chanTypes main :: IO () main = do getNumProcessors >>= setNumCapabilities defaultMain [ bgroup "Write" $ map benchWrites writeChanTypes , bgroup "Read" $ map benchReads chanTypes , bgroup "Concurrent" [ runConcurrentWrites False writeChanTypes , runConcurrentReads False chanTypes , runConcurrentBench False chanTypes ] , bgroup "Broadcast" [ runConcurrentWrites True chanTypes , runConcurrentReads True chanTypes , runConcurrentBench True chanTypes ] ] where threads = [1,2,5,10,100,1000,10^4] msgCounts = [10^4,10^5,10^6] runConcurrentBench = runConcurrent "Read-Write" threads threads msgCounts runConcurrentWrites = runConcurrent "Write" threads [0] msgCounts runConcurrentReads = runConcurrent "Read" [0] threads msgCounts broadcast-chan-0.2.1.1/benchmarks/Sync.hs0000644000000000000000000001271207346545000016303 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} import Criterion.Main import Control.Concurrent (forkIO, setNumCapabilities, yield) import Control.Concurrent.Async (async) import qualified Control.Concurrent.Async as Async import Control.Concurrent.Chan import Control.Concurrent.MVar import Control.Concurrent.QSem import Control.Concurrent.QSemN import Control.Concurrent.STM import Control.Concurrent.STM.TSem (newTSem, signalTSem, waitTSem) import Control.DeepSeq (NFData(..)) import Control.Monad (replicateM, replicateM_, void, when) import Data.Atomics.Counter import Data.IORef import Data.Function ((&)) import GHC.Conc (getNumProcessors) instance NFData (IO a) where rnf !_ = () benchSync :: (Int -> IO (IO (), IO ())) -> Int -> Benchmarkable benchSync alloc i = perRunEnv setup $ \(start, wait) -> do putMVar start () wait where setup = do start <- newEmptyMVar (signal, wait) <- alloc i replicateM_ i . forkIO $ do void $ readMVar start signal return (start, wait) {-# INLINE benchSync #-} benchSTM :: (Int -> IO (STM (), STM ())) -> Int -> Benchmarkable benchSTM alloc = benchSync $ \i -> do (signal, wait) <- alloc i return (atomically signal, atomically wait) {-# INLINE benchSTM #-} generalSync :: (a -> Int -> Benchmarkable) -> String -> a -> Int -> Benchmark generalSync build s alloc i = bench s $ build alloc i {-# INLINE generalSync #-} syncGeneral :: String -> (Int -> IO (IO (), IO ())) -> Int -> Benchmark syncGeneral = generalSync benchSync {-# INLINE syncGeneral #-} syncSTM :: String -> (Int -> IO (STM (), STM ())) -> Int -> Benchmark syncSTM = generalSync benchSTM {-# INLINE syncSTM #-} syncSingleWaitSTM :: String -> (IO (STM (), STM ())) -> Int -> Benchmark syncSingleWaitSTM s alloc = bgroup s . sequence [ bench "single transaction" . benchSTM singleTransaction , bench "multi transaction" . benchSync multiTransaction ] where singleTransaction :: Int -> IO (STM (), STM ()) singleTransaction i = do (signal, wait) <- alloc return (signal, replicateM_ i wait) {-# INLINE singleTransaction #-} multiTransaction :: Int -> IO (IO (), IO ()) multiTransaction i = do (signal, wait) <- alloc return (atomically signal, replicateM_ i (atomically wait)) {-# INLINE multiTransaction #-} {-# INLINE syncSingleWaitSTM #-} syncAsync :: Int -> Benchmark syncAsync = generalSync run "Async" () where setup i = do start <- newEmptyMVar threads <- replicateM i . async $ readMVar start return (start, mapM_ Async.wait threads) {-# INLINE setup #-} run () i = perRunEnv (setup i) $ \(start, wait) -> do putMVar start() wait {-# INLINE run #-} {-# INLINE syncAsync #-} syncAtomicCounter :: Int -> Benchmark syncAtomicCounter = syncGeneral "AtomicCounter" $ \i -> do cnt <- newCounter 0 let spinLoop = do n <- readCounter cnt yield when (n /= i) spinLoop {-# INLINE spinLoop #-} return (void (incrCounter 1 cnt), spinLoop) {-# INLINE syncAtomicCounter #-} syncChan :: Int -> Benchmark syncChan = syncGeneral "Chan" $ \i -> do chan <- newChan return (writeChan chan (), replicateM_ i (readChan chan)) {-# INLINE syncChan #-} syncIORef :: Int -> Benchmark syncIORef = syncGeneral "IORef" $ \i -> do ref <- newIORef 0 let spinLoop = do n <- atomicModifyIORef' ref $ \n -> (n, n) when (n /= i) spinLoop {-# INLINE spinLoop #-} return (atomicModifyIORef' ref (\n -> (n+1, ())), spinLoop) {-# INLINE syncIORef #-} syncMVar :: Int -> Benchmark syncMVar = syncGeneral "MVar" $ \i -> do mvar <- newEmptyMVar return (putMVar mvar (), replicateM_ i (takeMVar mvar)) {-# INLINE syncMVar #-} syncQSem :: Int -> Benchmark syncQSem = syncGeneral "QSem" $ \i -> do qsem <- newQSem 0 return (signalQSem qsem, replicateM_ i (waitQSem qsem)) {-# INLINE syncQSem #-} syncQSemN :: Int -> Benchmark syncQSemN = syncGeneral "QSemN" $ \i -> do qsemn <- newQSemN 0 return (signalQSemN qsemn 1, waitQSemN qsemn i) {-# INLINE syncQSemN #-} syncTChan :: Int -> Benchmark syncTChan = syncSingleWaitSTM "TChan" $ do tchan <- newTChanIO return (writeTChan tchan (), readTChan tchan) {-# INLINE syncTChan #-} syncTMVar :: Int -> Benchmark syncTMVar = syncGeneral "TMVar" $ \i -> do tmvar <- newEmptyTMVarIO let signal = atomically $ putTMVar tmvar () wait = replicateM_ i . atomically $ takeTMVar tmvar return (signal, wait) {-# INLINE syncTMVar #-} syncTQueue :: Int -> Benchmark syncTQueue = syncSingleWaitSTM "TQueue" $ do tqueue <- newTQueueIO return (writeTQueue tqueue (), readTQueue tqueue) {-# INLINE syncTQueue #-} syncTSem :: Int -> Benchmark syncTSem = syncSingleWaitSTM "TSem" $ do tsem <- atomically $ newTSem 0 return (signalTSem tsem, waitTSem tsem) {-# INLINE syncTSem #-} syncTVar :: Int -> Benchmark syncTVar = syncSTM "TVar" $ \i -> do tvar <- newTVarIO 0 return (modifyTVar' tvar (+1), check . (==i) =<< readTVar tvar) {-# INLINE syncTVar #-} benchThreads :: Int -> Benchmark benchThreads i = bgroup (show i ++ " threads") $ i & sequence [ syncAsync , syncAtomicCounter , syncChan , syncIORef , syncMVar , syncQSem , syncQSemN , syncTChan , syncTMVar , syncTVar , syncTQueue , syncTSem ] main :: IO () main = do getNumProcessors >>= setNumCapabilities defaultMain $ map benchThreads [1, 2, 5, 10, 100, 1000, 10000] broadcast-chan-0.2.1.1/benchmarks/Utils.hs0000644000000000000000000000100507346545000016460 0ustar0000000000000000import Control.Concurrent import Control.Monad (forM_) import Data.List (foldl') import GHC.Conc (getNumProcessors) import BroadcastChan main :: IO () main = do getNumProcessors >>= setNumCapabilities start <- newEmptyMVar done <- newEmptyMVar chan <- newBroadcastChan vals <- getBChanContents chan forkIO $ do putMVar start () putMVar done $! foldl' (+) 0 vals readMVar start forM_ [1..10000 :: Int] $ writeBChan chan closeBChan chan takeMVar done >>= print broadcast-chan-0.2.1.1/broadcast-chan.cabal0000644000000000000000000001350607346545000016555 0ustar0000000000000000Name: broadcast-chan Version: 0.2.1.1 Homepage: https://github.com/merijn/broadcast-chan Bug-Reports: https://github.com/merijn/broadcast-chan/issues Author: Merijn Verstraaten Maintainer: Merijn Verstraaten Copyright: Copyright © 2014-2019 Merijn Verstraaten License: BSD3 License-File: LICENSE Category: System Cabal-Version: >= 1.10 Build-Type: Simple Tested-With: GHC == 8.0.2, GHC == 8.2.2, GHC == 8.4.4, GHC == 8.6.5, GHC == 8.8.3, GHC == 8.10.1 Extra-Source-Files: README.md , CHANGELOG.md Synopsis: Closable, fair, single-wakeup channel type that avoids 0 reader space leaks. Description: __WARNING:__ While the code in this library should be fairly stable and production, the API is something I'm still working on. API changes will follow the PVP, but __expect__ breaking API changes in future versions! . A closable, fair, single-wakeup channel that avoids the 0 reader space leak that @"Control.Concurrent.Chan"@ from base suffers from. . The @Chan@ type from @"Control.Concurrent.Chan"@ consists of both a read and write end combined into a single value. This means there is always at least 1 read end for a @Chan@, which keeps any values written to it alive. This is a problem for applications/libraries that want to have a channel that can have zero listeners. . Suppose we have an library that produces events and we want to let users register to receive events. If we use a channel and write all events to it, we would like to drop and garbage collect any events that take place when there are 0 listeners. The always present read end of @Chan@ from base makes this impossible. We end up with a @Chan@ that forever accumulates more and more events that will never get removed, resulting in a memory leak. . @"BroadcastChan"@ splits channels into separate read and write ends. Any message written to a a channel with no existing read end is immediately dropped so it can be garbage collected. Once a read end is created, all messages written to the channel will be accessible to that read end. . Once all read ends for a channel have disappeared and been garbage collected, the channel will return to dropping messages as soon as they are written. . __Why should I use "BroadcastChan" over "Control.Concurrent.Chan"?__ . * @"BroadcastChan"@ is closable, . * @"BroadcastChan"@ has no 0 reader space leak, . * @"BroadcastChan"@ has comparable or better performance. . __Why should I use "BroadcastChan" over various (closable) STM channels?__ . * @"BroadcastChan"@ is single-wakeup, . * @"BroadcastChan"@ is fair, . * @"BroadcastChan"@ performs better under contention. Flag sync Description: Benchmarks synchronisation primitives used in main benchmark. Default: False Manual: True Flag threaded Description: Run benchmarks with threaded backend. Default: True Manual: True Library Default-Language: Haskell2010 GHC-Options: -Wall -O2 -fno-warn-unused-do-bind Exposed-Modules: BroadcastChan BroadcastChan.Extra BroadcastChan.Prelude BroadcastChan.Throw Other-Modules: BroadcastChan.Internal Other-Extensions: DataKinds DeriveDataTypeable KindSignatures NamedFieldPuns Safe ScopedTypeVariables Trustworthy TupleSections Build-Depends: base >= 4.7 && < 4.15 , transformers >= 0.2 && < 0.6 , unliftio-core >= 0.1.1 && < 0.3 Benchmark sync Default-Language: Haskell2010 Type: exitcode-stdio-1.0 Main-Is: Sync.hs GHC-Options: -Wall -O2 -fno-warn-orphans -rtsopts if flag(threaded) GHC-Options: -threaded Hs-Source-Dirs: benchmarks Other-Extensions: BangPatterns if flag(sync) Buildable: True else Buildable: False Build-Depends: base , async >= 2.0 && < 2.3 , atomic-primops == 0.8.* , criterion >= 1.2 && < 1.6 , deepseq >= 1.1 && < 1.5 , stm >= 2.4 && < 2.6 Benchmark channels Default-Language: Haskell2010 Type: exitcode-stdio-1.0 Main-Is: Channels.hs GHC-Options: -Wall -O2 -fno-warn-orphans -fno-warn-unused-do-bind -fno-warn-type-defaults -rtsopts if flag(threaded) GHC-Options: -threaded Hs-Source-Dirs: benchmarks Other-Extensions: BangPatterns DeriveGeneric RecordWildCards Build-Depends: base , broadcast-chan , async >= 2.0 && < 2.3 , criterion >= 1.2 && < 1.6 , deepseq >= 1.1 && < 1.5 , stm >= 2.4 && < 2.6 Benchmark utilities Default-Language: Haskell2010 Type: exitcode-stdio-1.0 Main-Is: Utils.hs GHC-Options: -Wall -O2 -fno-warn-orphans -fno-warn-unused-do-bind -rtsopts if flag(threaded) GHC-Options: -threaded Hs-Source-Dirs: benchmarks Build-Depends: base , broadcast-chan Source-Repository head Type: git Location: ssh://github.com:merijn/broadcast-chan.git