Safe Haskell | None |
---|---|
Language | Haskell2010 |
Synopsis
- pk :: [Char] -> ByteString
- up :: ByteString -> [Char]
- data Node = Node {
- nodeHost :: HostName
- nodePort :: Int
- connection :: Maybe (MVar Pool)
- nodeServices :: [Service]
- newtype Cloud a = Cloud {}
- type UPassword = ByteString
- type Host = ByteString
- type ProxyData = (UPassword, Host, Int)
- rHTTPProxy :: IORef (Maybe (Maybe ProxyData, Maybe ProxyData))
- getHTTProxyParams :: Bool -> TransIO (Maybe ProxyData)
- runCloud :: Cloud a -> TransIO a
- tlsHooks :: IORef (Bool, SData -> ByteString -> IO (), SData -> IO ByteString, Socket -> ByteString -> TransIO (), String -> Socket -> ByteString -> TransIO (), SData -> IO ())
- isTLSIncluded :: Bool
- local :: Loggable a => TransIO a -> Cloud a
- runCloudIO :: Typeable a => Cloud a -> IO (Maybe a)
- runCloudIO' :: Typeable a => Cloud a -> IO (Maybe a)
- onAll :: TransIO a -> Cloud a
- lazy :: TransIO a -> Cloud a
- fixRemote :: TransIO b -> Cloud b
- fixClosure :: Cloud ()
- loggedc :: Loggable a => Cloud a -> Cloud a
- loggedc' :: Loggable a => Cloud a -> Cloud a
- lliftIO :: Loggable a => IO a -> Cloud a
- localIO :: Loggable a => IO a -> Cloud a
- beamTo :: Node -> Cloud ()
- forkTo :: Node -> Cloud ()
- callTo :: Loggable a => Node -> Cloud a -> Cloud a
- callTo' :: (Show a, Read a, Typeable a) => Node -> Cloud a -> Cloud a
- atRemote :: Loggable a => Cloud a -> Cloud a
- runAt :: Loggable a => Node -> Cloud a -> Cloud a
- single :: TransIO a -> TransIO a
- unique :: TransIO a -> TransIO a
- wormhole :: Loggable b => Node -> Cloud b -> Cloud b
- newtype DialogInWormholeInitiated = DialogInWormholeInitiated Bool
- wormhole' :: Loggable a => Node -> Cloud a -> Cloud a
- data CloudException = CloudException Node IdClosure String
- setSynchronous :: Bool -> TransIO ()
- syncStream :: Cloud a -> Cloud a
- teleport :: Cloud ()
- localFix :: Cloud ()
- type ConnectionId = Int
- type HasClosed = Bool
- globalFix :: IORef (Map ConnectionId (HasClosed, [(IdClosure, IORef [ConnectionId])]))
- data LocalFixData = LocalFixData {
- isService :: Bool
- lengthFix :: Int
- closure :: Int
- fixedConnections :: IORef [ConnectionId]
- prevFix :: Maybe LocalFixData
- localFixServ :: Bool -> Bool -> Cloud ()
- reportBack :: TransIO ()
- copyData :: Loggable b => b -> Cloud b
- clustered :: Loggable a => Cloud a -> Cloud a
- mclustered :: (Monoid a, Loggable a) => Cloud a -> Cloud a
- callNodes :: (Loggable a1, Loggable a2) => (Cloud a2 -> Cloud a1 -> Cloud a1) -> Cloud a1 -> Cloud a2 -> Cloud a1
- callNodes' :: (Loggable a1, Loggable a2) => [Node] -> (Cloud a2 -> Cloud a1 -> Cloud a1) -> Cloud a1 -> Cloud a2 -> Cloud a1
- sendRawRecover :: Connection -> ByteString -> TransIO ()
- sendRaw :: MonadIO m => Connection -> ByteString -> m ()
- data NodeMSG = ClosureData IdClosure IdClosure Builder
- msend :: Connection -> StreamData NodeMSG -> TransIO ()
- mread :: Loggable a => Connection -> TransIO (StreamData a)
- receiveData' :: WebSocketsData a => p -> Connection -> IO a
- many' :: Alternative f => f a -> f a
- parallelReadHandler :: Loggable a => Connection -> TransIO (StreamData a)
- getWebServerNode :: TransIO Node
- mclose :: MonadIO m => Connection -> m ()
- rcookie :: IORef ByteString
- conSection :: MVar ()
- exclusiveCon :: MonadIO m => m b -> m b
- mconnect' :: Node -> TransIO Connection
- mconnect1 :: Node -> TransIO Connection
- isLocal :: String -> Bool
- makeParseContext :: MonadIO m => IO ByteString -> m ParseContext
- u :: a
- data ConnectionError = ConnectionError String Node
- mconnect :: Node -> TransIO Connection
- close1 :: Socket -> IO ()
- connectTo' :: Int -> HostName -> PortID -> IO Socket
- type Blocked = MVar (Maybe Integer)
- type BuffSize = Int
- data ConnectionData
- = Node2Node { }
- | TLSNode2Node {
- tlscontext :: SData
- | HTTPS2Node {
- tlscontext :: SData
- | Node2Web { }
- | HTTP2Node { }
- | Self
- data Connection = Connection {
- idConn :: Int
- myNode :: IORef Node
- remoteNode :: IORef (Maybe Node)
- connData :: IORef (Maybe ConnectionData)
- istream :: IORef ParseContext
- bufferSize :: BuffSize
- isBlocked :: Blocked
- calling :: Bool
- synchronous :: Bool
- localClosures :: MVar (Map IdClosure (MVar [EventF], IdClosure, MVar (), EventF))
- closChildren :: IORef (Map Int EventF)
- connectionList :: IORef [Connection]
- defConnection :: TransIO Connection
- noParseContext :: ParseContext
- setBuffSize :: Int -> TransIO ()
- getBuffSize :: TransIO BuffSize
- listen :: Node -> Cloud ()
- listenNew :: PortNumber -> Connection -> TransIO (StreamData NodeMSG)
- rsetHost :: IORef Bool
- noHTTP :: Cloud ()
- listenResponses :: Loggable a => TransIO (StreamData a)
- type IdClosure = Int
- newtype Closure = Closure IdClosure
- type RemoteClosure = (Node, IdClosure)
- newtype JobGroup = JobGroup (Map ByteString RemoteClosure)
- stopRemoteJob :: ByteString -> Cloud ()
- resetRemote :: ByteString -> Cloud ()
- execLog :: StreamData NodeMSG -> TransIO ()
- type Pool = [Connection]
- type SKey = String
- type SValue = String
- type Service = [(SKey, SValue)]
- lookup2 :: Eq a1 => a1 -> [[(a1, a2)]] -> Maybe a2
- filter2 :: Eq a => a -> [[(a, b)]] -> [b]
- readFrom :: Connection -> IO ByteString
- makeWSStreamFromConn :: Connection -> IO Stream
- makeWebsocketConnection :: MonadIO m => Connection -> ByteString -> [(CI ByteString, ByteString)] -> m Connection
- servePages :: (a, ByteString, [(CI ByteString, b)]) -> TransIO ()
- api :: TransIO ByteString -> Cloud ()
- http10 :: ByteString
- isWebSocketsReq :: [(CI ByteString, b)] -> Bool
- data HTTPMethod
- getFirstLine :: TransIO (ByteString, ByteString, ByteString)
- getRawHeaders :: TransIO ByteString
- type PostParams = [(ByteString, String)]
- parsePostUrlEncoded :: TransIO PostParams
- getHeaders :: TransIO [(CI ByteString, ByteString)]
- isBrowserInstance :: Bool
- emptyPool :: MonadIO m => m (MVar Pool)
- createNodeServ :: HostName -> Int -> [Service] -> IO Node
- createNode :: HostName -> Int -> IO Node
- createWebNode :: IO Node
- nodeList :: TVar [Node]
- errorMyNode :: [Char] -> a
- getMyNode :: TransIO Node
- getMyNodeMaybe :: TransIO Node
- getNodes :: MonadIO m => m [Node]
- getEqualNodes :: TransIO [Node]
- getWebNodes :: MonadIO m => m [Node]
- matchNodes :: MonadIO m => (Service -> Bool) -> m [[Service]]
- addNodes :: [Node] -> TransIO ()
- delNodes :: MonadIO m => [Node] -> m ()
- fixNode :: MonadIO m => Node -> m Node
- setNodes :: MonadIO m => [Node] -> m ()
- shuffleNodes :: MonadIO m => m [Node]
- addThisNodeToRemote :: Cloud ()
- setConnectionIn :: Node -> TransIO Node
- connect :: Node -> Node -> Cloud ()
- connect' :: Node -> Cloud ()
- data HTTPHeaders = HTTPHeaders (ByteString, ByteString, ByteString) [(CI ByteString, ByteString)]
- rawHTTP :: Loggable a => Node -> String -> TransIO a
- parseBody :: (Eq a, Loggable b, IsString a) => [(a, ByteString)] -> TransIO b
- getFirstLineResp :: TransIO (ByteString, ByteString, ByteString)
- dechunk :: TransIO (StreamData ByteString)
- foldNet :: Loggable a => (Cloud a -> Cloud a -> Cloud a) -> Cloud a -> Cloud a -> Cloud a
- exploreNet :: (Loggable a, Monoid a) => Cloud a -> Cloud a
- exploreNetUntil :: Loggable a => Cloud a -> Cloud a
- onBrowser :: Cloud a -> Cloud a
- onServer :: Cloud a -> Cloud a
- atBrowser :: Loggable a => Cloud a -> Cloud a
- atServer :: Loggable a => Cloud a -> Cloud a
- delta :: Integer
- connectionTimeouts :: TransIO ()
- cleanConnectionData :: MonadIO m => Connection -> m ()
- loopClosures :: TransIO ()
Documentation
pk :: [Char] -> ByteString Source #
up :: ByteString -> [Char] Source #
Instances
Eq Node Source # | |
Ord Node Source # | |
Read Node Source # | |
Show Node Source # | |
Indexable Suscribed Source # | |
Serializable Suscribed Source # | |
Defined in Transient.Move.PubSub serialize :: Suscribed -> ByteString # deserialize :: ByteString -> Suscribed # deserialKey :: String -> ByteString -> Suscribed # setPersist :: Suscribed -> Maybe Persist # | |
Loggable Node Source # | |
Defined in Transient.Move.Internals serialize :: Node -> Builder # deserializePure :: ByteString -> Maybe (Node, ByteString) # deserialize :: TransIO Node # |
Instances
Monad Cloud Source # | |
Functor Cloud Source # | |
MonadFail Cloud Source # | |
Defined in Transient.Move.Internals | |
Applicative Cloud Source # | |
Alternative Cloud Source # | |
AdditionalOperators Cloud Source # | |
MonadState EventF Cloud Source # | |
(Eq a, Fractional a) => Fractional (Cloud a) Source # | |
(Num a, Eq a) => Num (Cloud a) Source # | |
Monoid a => Semigroup (Cloud a) Source # | |
Monoid a => Monoid (Cloud a) Source # | |
type UPassword = ByteString Source #
type Host = ByteString Source #
runCloud :: Cloud a -> TransIO a Source #
Execute a distributed computation inside a TransIO computation.
All the computations in the TransIO monad that enclose the cloud computation must be logged
tlsHooks :: IORef (Bool, SData -> ByteString -> IO (), SData -> IO ByteString, Socket -> ByteString -> TransIO (), String -> Socket -> ByteString -> TransIO (), SData -> IO ()) Source #
isTLSIncluded :: Bool Source #
runCloudIO :: Typeable a => Cloud a -> IO (Maybe a) Source #
Run a distributed computation inside the IO monad. Enables asynchronous
console input (see keep
).
runCloudIO' :: Typeable a => Cloud a -> IO (Maybe a) Source #
Run a distributed computation inside the IO monad with no console input.
onAll :: TransIO a -> Cloud a Source #
alternative to local
It means that if the computation is translated to other node
this will be executed again if this has not been executed inside a local
computation.
onAll foo local foo' local $ do bar runCloud $ do onAll baz runAt node .... runAt node' .....
foo bar and baz will e executed locally. But foo will be executed remotely also in node' while foo' bar and baz don't.
fixRemote :: TransIO b -> Cloud b Source #
executes a non-serilizable action in the remote node, whose result can be used by subsequent remote invocations
fixClosure :: Cloud () Source #
subsequent remote invocatioms will send logs to this closure. Therefore logs will be shorter.
Also, non serializable statements before it will not be re-executed
forkTo :: Node -> Cloud () Source #
execute in the remote node a process with the same execution state
callTo :: Loggable a => Node -> Cloud a -> Cloud a Source #
open a wormhole to another node and executes an action on it. currently by default it keep open the connection to receive additional requests and responses (streaming)
callTo' :: (Show a, Read a, Typeable a) => Node -> Cloud a -> Cloud a Source #
A connectionless version of callTo for long running remote calls
atRemote :: Loggable a => Cloud a -> Cloud a Source #
Within a connection to a node opened by wormhole
, it run the computation in the remote node and return
the result back to the original node.
If atRemote
is executed in the remote node, then the computation is executed in the original node
wormhole node2 $ do t <- atRemote $ do r <- foo -- executed in node2 s <- atRemote bar r -- executed in the original node baz s -- in node2 bat t -- in the original node
runAt :: Loggable a => Node -> Cloud a -> Cloud a Source #
Execute a computation in the node that initiated the connection.
if the sequence of connections is n1 -> n2 -> n3 then `atCallingNode $ atCallingNode foo` in n3
would execute foo
in n1, -- while `atRemote $ atRemote foo` would execute it in n3
atCallingNode :: Loggable a => Cloud a -> Cloud a
atCallingNode proc= connectCaller $ atRemote proc
synonymous of callTo
single :: TransIO a -> TransIO a Source #
run a single thread with that action for each connection created. When the same action is re-executed within that connection, all the threads generated by the previous execution are killed
box <- foo r <- runAt node . local . single $ getMailbox box localIO $ print r
if foo return different mainbox indentifiers, the above code would print the
messages of the last one.
Without single, it would print the messages of all of them since each call would install a new getMailBox
for each one of them
unique :: TransIO a -> TransIO a Source #
run an unique continuation for each connection. The first thread that execute unique
is
executed for that connection. The rest are ignored.
wormhole :: Loggable b => Node -> Cloud b -> Cloud b Source #
A wormhole opens a connection with another node anywhere in a computation.
teleport
uses this connection to translate the computation back and forth between the two nodes connected.
If the connection fails, it search the network for suitable relay nodes to reach the destination node.
wormhole' :: Loggable a => Node -> Cloud a -> Cloud a Source #
wormhole without searching for relay nodes.
data CloudException Source #
Instances
Read CloudException Source # | |
Defined in Transient.Move.Internals readsPrec :: Int -> ReadS CloudException # readList :: ReadS [CloudException] # | |
Show CloudException Source # | |
Defined in Transient.Move.Internals showsPrec :: Int -> CloudException -> ShowS # show :: CloudException -> String # showList :: [CloudException] -> ShowS # | |
Exception CloudException Source # | |
Defined in Transient.Move.Internals |
setSynchronous :: Bool -> TransIO () Source #
set remote invocations synchronous this is necessary when data is transfered very fast from node to node in a stream non-deterministically in order to keep the continuation of the calling node unchanged until the arrival of the response since all the calls share a single continuation in the calling node.
If there is no response from the remote node, the streaming is interrupted
main= keep $ initNode $ onBrowser $ do local $ setSynchronous True line <- local $ threads 0 $ choose[1..10::Int] localIO $ print ("1",line) atRemote $ localIO $ print line localIO $ print ("2", line)
syncStream :: Cloud a -> Cloud a Source #
One problem of forwarding closures for streaming is that it could transport not only the data but extra information that reconstruct the closure in the destination node. In a single in-single out interaction It may not be a problem, but think, for example, when I have to synchronize N editors by forwarding small modifications, or worst of all, when transmitting packets of audio or video. But the size of the closure, that is, the amount of variables that I have to transport increases when the code is more complex. But transient build closures upon closures, so It has to send only what has changed since the last interaction.
In one-to-one interactions whithin a wormhole, this is automatic, but when there are different wormholes involved, it is necessary
to tell explicitly what is the closure that will continue the execution. this is what localFix
does. otherwise it will use the closure 0.
main= do filename <- local input source <- atServer $ local $ readFile filename local $ render source inEditor -- send upto here one single time please, so I only stream the deltas localFix delta <- react onEachChange forallNodes $ update delta
if forwardChanges send to all the nodes editing the document, the data necessary to reconstruct the closure would include even the source code of the file on EACH change. Fortunately it is possible to fix a closure that will not change in all the remote nodes so after that, I only have to send the only necessary variable, the delta. This is as efficient as an hand-made socket writeforkThreadreadSocket loop for each node.
type ConnectionId = Int Source #
globalFix :: IORef (Map ConnectionId (HasClosed, [(IdClosure, IORef [ConnectionId])])) Source #
data LocalFixData Source #
LocalFixData | |
|
Instances
Show LocalFixData Source # | |
Defined in Transient.Move.Internals showsPrec :: Int -> LocalFixData -> ShowS # show :: LocalFixData -> String # showList :: [LocalFixData] -> ShowS # |
reportBack :: TransIO () Source #
forward exceptions back to the calling node
copyData :: Loggable b => b -> Cloud b Source #
copy a session data variable from the local to the remote node. If there is none set in the local node, The parameter is the default value. In this case, the default value is also set in the local node.
clustered :: Loggable a => Cloud a -> Cloud a Source #
execute a Transient action in each of the nodes connected.
The response of each node is received by the invoking node and processed by the rest of the procedure. By default, each response is processed in a new thread. To restrict the number of threads use the thread control primitives.
this snippet receive a message from each of the simulated nodes:
main = keep $ do let nodes= map createLocalNode [2000..2005] addNodes nodes (foldl (<|>) empty $ map listen nodes) <|> return () r <- clustered $ do Connection (Just(PortNumber port, _, _, _)) _ <- getSData return $ "hi from " ++ show port++ "\n" liftIO $ putStrLn r where createLocalNode n= createNode "localhost" (PortNumber n)
callNodes :: (Loggable a1, Loggable a2) => (Cloud a2 -> Cloud a1 -> Cloud a1) -> Cloud a1 -> Cloud a2 -> Cloud a1 Source #
callNodes' :: (Loggable a1, Loggable a2) => [Node] -> (Cloud a2 -> Cloud a1 -> Cloud a1) -> Cloud a1 -> Cloud a2 -> Cloud a1 Source #
sendRawRecover :: Connection -> ByteString -> TransIO () Source #
sendRaw :: MonadIO m => Connection -> ByteString -> m () Source #
msend :: Connection -> StreamData NodeMSG -> TransIO () Source #
mread :: Loggable a => Connection -> TransIO (StreamData a) Source #
receiveData' :: WebSocketsData a => p -> Connection -> IO a Source #
many' :: Alternative f => f a -> f a Source #
parallelReadHandler :: Loggable a => Connection -> TransIO (StreamData a) Source #
mclose :: MonadIO m => Connection -> m () Source #
conSection :: MVar () Source #
exclusiveCon :: MonadIO m => m b -> m b Source #
makeParseContext :: MonadIO m => IO ByteString -> m ParseContext Source #
data ConnectionError Source #
Instances
Read ConnectionError Source # | |
Defined in Transient.Move.Internals | |
Show ConnectionError Source # | |
Defined in Transient.Move.Internals showsPrec :: Int -> ConnectionError -> ShowS # show :: ConnectionError -> String # showList :: [ConnectionError] -> ShowS # | |
Exception ConnectionError Source # | |
Defined in Transient.Move.Internals |
data ConnectionData Source #
data Connection Source #
Connection | |
|
connectionList :: IORef [Connection] Source #
setBuffSize :: Int -> TransIO () Source #
listenNew :: PortNumber -> Connection -> TransIO (StreamData NodeMSG) Source #
listenResponses :: Loggable a => TransIO (StreamData a) Source #
type RemoteClosure = (Node, IdClosure) Source #
stopRemoteJob :: ByteString -> Cloud () Source #
if there is a remote job identified by th string identifier, it stop that job, and set the
current remote operation (if any) as the current remote job for this identifier.
The purpose is to have a single remote job.
to identify the remote job, it should be used after the wormhole
and before the remote call:
r <- wormhole node $ do stopRemoteJob "streamlog" atRemote myRemotejob
So:
runAtUnique ident node job= wormhole node $ do stopRemoteJob ident; atRemote job
resetRemote :: ByteString -> Cloud () Source #
kill the remote job. Usually, before starting a new one.
type Pool = [Connection] Source #
readFrom :: Connection -> IO ByteString Source #
makeWebsocketConnection :: MonadIO m => Connection -> ByteString -> [(CI ByteString, ByteString)] -> m Connection Source #
servePages :: (a, ByteString, [(CI ByteString, b)]) -> TransIO () Source #
api :: TransIO ByteString -> Cloud () Source #
forward all the result of the Transient computation to the opened connection
http10 :: ByteString Source #
isWebSocketsReq :: [(CI ByteString, b)] -> Bool Source #
data HTTPMethod Source #
Instances
Eq HTTPMethod Source # | |
Defined in Transient.Move.Internals (==) :: HTTPMethod -> HTTPMethod -> Bool # (/=) :: HTTPMethod -> HTTPMethod -> Bool # | |
Read HTTPMethod Source # | |
Defined in Transient.Move.Internals readsPrec :: Int -> ReadS HTTPMethod # readList :: ReadS [HTTPMethod] # readPrec :: ReadPrec HTTPMethod # readListPrec :: ReadPrec [HTTPMethod] # | |
Show HTTPMethod Source # | |
Defined in Transient.Move.Internals showsPrec :: Int -> HTTPMethod -> ShowS # show :: HTTPMethod -> String # showList :: [HTTPMethod] -> ShowS # | |
Loggable HTTPMethod Source # | |
Defined in Transient.Move.Internals serialize :: HTTPMethod -> Builder # deserializePure :: ByteString -> Maybe (HTTPMethod, ByteString) # |
type PostParams = [(ByteString, String)] Source #
getHeaders :: TransIO [(CI ByteString, ByteString)] Source #
isBrowserInstance :: Bool Source #
Returns True
if we are running in the browser.
createNodeServ :: HostName -> Int -> [Service] -> IO Node Source #
Create a node from a hostname (or IP address), port number and a list of services.
createWebNode :: IO Node Source #
errorMyNode :: [Char] -> a Source #
getMyNode :: TransIO Node Source #
Return the local node i.e. the node where this computation is running.
getMyNodeMaybe :: TransIO Node Source #
empty if the node is not set
getEqualNodes :: TransIO [Node] Source #
get the nodes that have the same service definition that the calling node
getWebNodes :: MonadIO m => m [Node] Source #
addNodes :: [Node] -> TransIO () Source #
Add a list of nodes to the list of existing nodes know locally. If the node is already present, It add his services to the already present node services which have the first element equal (usually the "name" field) will be substituted if the match
shuffleNodes :: MonadIO m => m [Node] Source #
Shuffle the list of cluster nodes and return the shuffled list.
addThisNodeToRemote :: Cloud () Source #
add this node to the list of know nodes in the remote node connected by a wormhole
.
This is useful when the node is called back by the remote node.
In the case of web nodes with webSocket connections, this is the way to add it to the list of
known nodes in the server.
connect :: Node -> Node -> Cloud () Source #
Add a node (first parameter) to the cluster using a node that is already part of the cluster (second parameter). The added node starts listening for incoming connections and the rest of the computation is executed on this newly added node.
connect' :: Node -> Cloud () Source #
Reconcile the list of nodes in the cluster using a remote node already part of the cluster. Reconciliation end up in each node in the cluster having the same list of nodes.
data HTTPHeaders Source #
Instances
Show HTTPHeaders Source # | |
Defined in Transient.Move.Internals showsPrec :: Int -> HTTPHeaders -> ShowS # show :: HTTPHeaders -> String # showList :: [HTTPHeaders] -> ShowS # |
foldNet :: Loggable a => (Cloud a -> Cloud a -> Cloud a) -> Cloud a -> Cloud a -> Cloud a Source #
crawl the nodes executing the same action in each node and accumulate the results using a binary operator
onBrowser :: Cloud a -> Cloud a Source #
only execute if the the program is executing in the browser. The code inside can contain calls to the server. Otherwise return empty (so it stop the computation and may execute alternative computations).
onServer :: Cloud a -> Cloud a Source #
only executes the computaion if it is in the server, but the computation can call the browser. Otherwise return empty
atBrowser :: Loggable a => Cloud a -> Cloud a Source #
If the computation is running in the server, translates i to the browser and return back. If it is already in the browser, just execute it
atServer :: Loggable a => Cloud a -> Cloud a Source #
If the computation is running in the browser, translates i to the server and return back. If it is already in the server, just execute it
connectionTimeouts :: TransIO () Source #
cleanConnectionData :: MonadIO m => Connection -> m () Source #
loopClosures :: TransIO () Source #
Orphan instances
Ord PortID Source # | |
Read Builder Source # | |
Show Builder Source # | |
Loggable Value Source # | |
serialize :: Value -> Builder # deserializePure :: ByteString -> Maybe (Value, ByteString) # deserialize :: TransIO Value # | |
Show a => Show (IORef a) Source # | |
Loggable a => Loggable (StreamData a) Source # | |
serialize :: StreamData a -> Builder # deserializePure :: ByteString -> Maybe (StreamData a, ByteString) # deserialize :: TransIO (StreamData a) # |