From 1e862c867420a8e347bb7eef0a56e623dc5e0cd3 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Mon, 25 Nov 2013 17:11:21 +0000 Subject: [PATCH 001/108] documentation updates --- Gemfile | 2 + Makefile | 2 +- _config.yml | 1 - _layouts/{tutorial1.html => tutorial.html} | 8 +- _layouts/tutorial3.html | 42 ---- tutorials/ch-tutorial1.md | 221 ++++++++++++++++++++ tutorials/ch-tutorial2.md | 111 ++++++++++ tutorials/ch-tutorial3.md | 189 +++++++++++++++++ tutorials/{tutorial3.md => ch-tutorial4.md} | 155 ++++++++------ tutorials/ch-tutorial5.md | 15 ++ tutorials/{tutorial2.md => tutorial-NT2.md} | 0 tutorials/tutorial1.md | 205 ------------------ 12 files changed, 634 insertions(+), 317 deletions(-) create mode 100644 Gemfile rename _layouts/{tutorial1.html => tutorial.html} (75%) delete mode 100644 _layouts/tutorial3.html create mode 100644 tutorials/ch-tutorial1.md create mode 100644 tutorials/ch-tutorial2.md create mode 100644 tutorials/ch-tutorial3.md rename tutorials/{tutorial3.md => ch-tutorial4.md} (67%) create mode 100644 tutorials/ch-tutorial5.md rename tutorials/{tutorial2.md => tutorial-NT2.md} (100%) delete mode 100644 tutorials/tutorial1.md diff --git a/Gemfile b/Gemfile new file mode 100644 index 0000000..053c27d --- /dev/null +++ b/Gemfile @@ -0,0 +1,2 @@ +source 'https://rubygems.org' +gem 'github-pages' diff --git a/Makefile b/Makefile index 128d222..97e0cd3 100644 --- a/Makefile +++ b/Makefile @@ -21,4 +21,4 @@ endif .PHONY: serve serve: - jekyll --pygments --no-lsi --safe --server + jekyll serve -w diff --git a/_config.yml b/_config.yml index ca44606..9d01b37 100644 --- a/_config.yml +++ b/_config.yml @@ -1,5 +1,4 @@ exclude: [".rvmrc", ".rbenv-version", "README.md", "Rakefile", "changelog.md"] -auto: true lsi: false pygments: true safe: true diff --git a/_layouts/tutorial1.html b/_layouts/tutorial.html similarity index 75% rename from _layouts/tutorial1.html rename to _layouts/tutorial.html index a72f85a..b44874c 100644 --- a/_layouts/tutorial1.html +++ b/_layouts/tutorial.html @@ -21,9 +21,11 @@ diff --git a/_layouts/tutorial3.html b/_layouts/tutorial3.html deleted file mode 100644 index 9786a38..0000000 --- a/_layouts/tutorial3.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - {% include head.html %} - - - - - {% include nav.html %} -
- -
-
- -
- {% include footer.html %} - {% include js.html %} - - diff --git a/tutorials/ch-tutorial1.md b/tutorials/ch-tutorial1.md new file mode 100644 index 0000000..9d370a2 --- /dev/null +++ b/tutorials/ch-tutorial1.md @@ -0,0 +1,221 @@ +--- +layout: tutorial +categories: tutorial +sections: ['Getting Started', 'Create a node', 'Sending messages', 'Spawning Remote Processes'] +title: Getting Started +--- + +### Getting Started + +----- + +In order to go through this tutorial, you will need a Haskell development +environment and we recommend installing the latest version of the +[Haskell Platform](http://www.haskell.org/platform/) if you've not done +so already. + +Once you're up and running, you'll want to get hold of the distributed-process +library and a choice of network transport backend. This guide will use +the network-transport-tcp backend, but other backends may be available +on github. + +### Installing from source + +If you're installing from source, the simplest method is to checkout the +[Umbrella Project](https://github.com/haskell-distributed/cloud-haskell) and +run `make` to obtain the complete set of source repositories for building +Cloud Haskell. The additional makefiles bundled with the umbrella assume +that you have a recent version of cabal-dev installed. + +### Create a node + +Cloud Haskell's *lightweight processes* reside on a "node", which must +be initialised with a network transport implementation and a remote table. +The latter is required so that physically separate nodes can identify known +objects in the system (such as types and functions) when receiving messages +from other nodes. We will look at inter-node communication later, for now +it will suffice to pass the default remote table, which defines the built-in +types that Cloud Haskell needs at a minimum in order to run. + +We start with our imports: + +{% highlight haskell %} +import Network.Transport.TCP (createTransport, defaultTCPParameters) +import Control.Distributed.Process +import Control.Distributed.Process.Node +{% endhighlight %} + +Our TCP network transport backend needs an IP address and port to get started +with: + +{% highlight haskell %} +main :: IO () +main = do + Right t <- createTransport "127.0.0.1" "10501" defaultTCPParameters + node <- newLocalNode t initRemoteTable + .... +{% endhighlight %} + +And now we have a running node. + +### Sending messages + +We start a new process by evaluating `forkProcess`, which takes a node, +a `Process` action - because our concurrent code will run in the `Process` +monad - and returns an address for the process in the form of a `ProcessId`. +The process id can be used to send messages to the running process - here we +will send one to ourselves! + +{% highlight haskell %} +-- in main + _ <- forkProcess node $ do + -- get our own process id + self <- getSelfPid + send self "hello" + hello <- expect :: Process String + liftIO $ putStrLn hello + return () +{% endhighlight %} + +Lightweight processes are implemented as `forkIO` threads. In general we will +try to forget about this implementation detail, but let us note that we +haven't deadlocked our own thread by sending to and receiving from its mailbox +in this fashion. Sending messages is a completely asynchronous operation - even +if the recipient doesn't exist, no error will be raised and evaluating `send` +will not block the caller, not even if the caller is sending messages to itself! + +Receiving works quite the other way around, blocking the caller until a message +matching the expected type arrives in our (conceptual) mailbox. If multiple +messages of that type are in the queue, they will be returned in FIFO +order, otherwise the caller will be blocked until a message arrives that can be +decoded to the correct type. + +Let's spawn two processes on the same node and have them talk to each other. + +{% highlight haskell %} +import Control.Concurrent (threadDelay) +import Control.Monad (forever) +import Control.Distributed.Process +import Control.Distributed.Process.Node +import Network.Transport.TCP (createTransport, defaultTCPParameters) + +replyBack :: (ProcessId, String) -> Process () +replyBack (sender, msg) = send sender msg + +logMessage :: String -> Process () +logMessage msg = say $ "handling " ++ msg + +main :: IO () +main = do + Right t <- createTransport "127.0.0.1" "10501" defaultTCPParameters + node <- newLocalNode t initRemoteTable + forkProcess node $ do + -- Spawn another worker on the local node + echoPid <- spawnLocal $ forever $ do + -- Test our matches in order against each message in the queue + receiveWait [match logMessage, match replyBack] + + -- The `say` function sends a message to a process registered as "logger". + -- By default, this process simply loops through its mailbox and sends + -- any received log message strings it finds to stderr. + + say "send some messages!" + send echoPid "hello" + self <- getSelfPid + send echoPid (self, "hello") + + -- `expectTimeout` waits for a message or times out after "delay" + m <- expectTimeout 1000000 + case m of + -- Die immediately - throws a ProcessExitException with the given reason. + Nothing -> die "nothing came back!" + (Just s) -> say $ "got " ++ s ++ " back!" + return () + + -- A 1 second wait. Otherwise the main thread can terminate before + -- our messages reach the logging process or get flushed to stdio + liftIO $ threadDelay (1*1000000) + return () +{% endhighlight %} + +Note that we've used the `receive` class of functions this time around. +These can be used with the [`Match`][5] data type to provide a range of +advanced message processing capabilities. The `match` primitive allows you +to construct a "potential message handler" and have it evaluated +against received (or incoming) messages. As with `expect`, if the mailbox does +not contain a message that can be matched, the evaluating process will be +blocked until a message arrives which _can_ be matched. + +In the _echo server_ above, our first match prints out whatever string it +receives. If first message in out mailbox is not a `String`, then our second +match is evaluated. This, given a tuple `t :: (ProcessId, String)`, will send +the `String` component back to the sender's `ProcessId`. If neither match +succeeds, the echo server process blocks until another message arrives and +tries again. + +### Serializable Data + +Processes may send any datum whose type implements the `Serializable` typeclass, +which is done indirectly by implementing `Binary` and deriving `Typeable`. +Implementations are already provided for off of Cloud Haskell's primitives +and the most commonly used data structures. + +### Spawning Remote Processes + +In order to spawn processes on a remote node without additional compiler +infrastructure, we make use of "static values": values that are known at +compile time. Closures in functional programming arise when we partially +apply a function. In Cloud Haskell, a closure is a code pointer, together +with requisite runtime data structures representing the value of any free +variables of the function. A remote spawn therefore, takes a closure around +an action running in the `Process` monad: `Closure (Process ())`. + +In distributed-process if `f : T1 -> T2` then + +{% highlight haskell %} + $(mkClosure 'f) :: T1 -> Closure T2 +{% endhighlight %} + +That is, the first argument to the function we pass to mkClosure will act +as the closure environment for that process. If you want multiple values +in the closure environment, you must "tuple them up". + +We need to configure our remote table (see the documentation for more details) +and the easiest way to do this, is to let the library generate the relevant +code for us. For example (taken from the distributed-process-platform test suites): + +{% highlight haskell %} +sampleTask :: (TimeInterval, String) -> Process String +sampleTask (t, s) = sleep t >> return s + +$(remotable ['sampleTask]) +{% endhighlight %} + +We can now create a closure environment for `sampleTask` like so: + +{% highlight haskell %} +($(mkClosure 'sampleTask) (seconds 2, "foobar")) +{% endhighlight %} + +The call to `remotable` generates a remote table and a definition +`__remoteTable :: RemoteTable -> RemoteTable` in our module for us. +We compose this with other remote tables in order to come up with a +final, merged remote table for use in our program: + +{% highlight haskell %} +myRemoteTable :: RemoteTable +myRemoteTable = Main.__remoteTable initRemoteTable + +main :: IO () +main = do + localNode <- newLocalNode transport myRemoteTable + -- etc +{% endhighlight %} + +------ + +[1]: /static/doc/distributed-process/Control-Distributed-Process.html#v:Message +[2]: http://hackage.haskell.org/package/distributed-process +[3]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async.html +[4]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.htmlv:callAsync +[5]: http://hackage.haskell.org/packages/archive/distributed-process/latest/doc/html/Control-Distributed-Process-Internal-Primitives.html#t:Match diff --git a/tutorials/ch-tutorial2.md b/tutorials/ch-tutorial2.md new file mode 100644 index 0000000..10db1d0 --- /dev/null +++ b/tutorials/ch-tutorial2.md @@ -0,0 +1,111 @@ +--- +layout: tutorial +categories: tutorial +sections: ['Overview', 'A Simple Example', 'Master Slave Configurations', 'Other Topologies and Backends'] +title: Managing Topologies +--- + +### Overview + +In Cloud Haskell, the system topology is determined by your choice of _Cloud Haskell Backend_. +The basic topology that Cloud Haskell currently ships with is determined by the +[`simplelocalnet`][1] backend, which provides for a fully connected grid of nodes with optional +master-slave configuration. This backend allows nodes to discover one another using UDP multicast. +It is a zero-configuration backend designed to get you going with Cloud Haskell quickly without +imposing any particular structure on your application. + +Other backends might work in a completely different manner, offering different types of (and +relationships between) nodes, or simply by handling discovery differently (e.g., pre-defined +node names/addresses, or by using some form of registrar such as DNS-SD/Bonjour). + +### A Simple Example + +Here is an example program built against the [`simplelocalnet`][1] backend, that periodically +searches for a list of peer nodes, and sends a message to a registered (named) process on each. + +{% highlight haskell %} +import System.Environment (getArgs) +import Control.Distributed.Process +import Control.Distributed.Process.Node (initRemoteTable) +import Control.Distributed.Process.Backend.SimpleLocalnet +import Control.Monad (forever, mapM_) + +main = do + [host, port] <- getArgs + + backend <- initializeBackend host port initRemoteTable + node <- newLocalNode backend + runProcess node $ forever $ do + findPeers backend >>= mapM_ $ \peer -> nsendRemote peer "echo-server" "hello!" + +{% endhighlight %} + +Clearly the program isn't very useful, but it illustrates the two key concepts that +`simplelocalnet` relies on. Firstly, that we `initializeBackend` in order to get +connected to an underlying communications infrastructure and secondly, that we can +evaluate `findPeers` at any time to obtain the set of other nodes that have broadcast +their presence. + +### Master Slave Configurations + +Here we simply rehash the master/slave example from the `simplelocalnet` documentation. +With the same imports as the example above, we add a no-op slave and a master that +takes a list of its (known) slaves, which it prints out before terminating them all. + +{% highlight haskell %} +main :: IO () +main = do + args <- getArgs + + case args of + ["master", host, port] -> do + backend <- initializeBackend host port initRemoteTable + startMaster backend (master backend) + ["slave", host, port] -> do + backend <- initializeBackend host port initRemoteTable + startSlave backend + +{% endhighlight %} + +And the master node is defined thus: + +{% highlight haskell %} +master :: Backend -> [NodeId] -> Process () +master backend slaves = do + -- Do something interesting with the slaves + liftIO . putStrLn $ "Slaves: " ++ show slaves + -- Terminate the slaves when the master terminates (this is optional) + terminateAllSlaves backend +{% endhighlight %} + +### Other Topologies and Backends + +Many other topologies are in development, including one that runs on Windows Azure, +which is available [here][2]. Some third party backends have also been developed, +such as the [`distributed-process-p2p`][3] backend, which given a known node address, +discovers and maintains knowledge of it's peers. + +Here is an example of node discovery using the [`distributed-process-p2p`][3] +backend: + +{% highlight haskell %} +import System.Environment (getArgs) +import Control.Distributed.Process +import Control.Distributed.Process.Node (initRemoteTable) +import Control.Distributed.Process.Backend.P2P +import Control.Monad (forever, mapM_) + +main = do + [host, port] <- getArgs + + backend <- initializeBackend host port initRemoteTable + node <- newLocalNode backend + runProcess node $ forever $ do + findPeers >>= mapM_ $ \peer -> nsend peer "echo-server" "hello!" + +{% endhighlight %} + +[1]: http://hackage.haskell.org/package/distributed-process-simplelocalnet +[2]: http://hackage.haskell.org/package/distributed-process-azure +[3]: https://bitbucket.org/dpwiz/distributed-process-p2p + diff --git a/tutorials/ch-tutorial3.md b/tutorials/ch-tutorial3.md new file mode 100644 index 0000000..dfc4bb0 --- /dev/null +++ b/tutorials/ch-tutorial3.md @@ -0,0 +1,189 @@ +--- +layout: tutorial +categories: tutorial +sections: ['Message Ordering', 'Selective Receive', 'Advanced Mailbox Processing'] +title: Getting to know Processes +--- + +### Message Ordering + +We have already met the `send` primitive, which is used to deliver +a message to another process. Here's a review of what we've learned +about `send` thus far: + +1. sending is asynchronous (i.e., it does not block the caller) +2. sending _never_ fails, regardless of the state of the recipient process +3. even if a message is received, there is **no** guarantee *when* it will arrive +4. there are **no** guarantees that the message will be received at all + +Asynchronous sending buys us several benefits. Improved concurrency is +possible, because processes do not need to block and wait for acknowledgements +and error handling need not be implemented each time a message is sent. +Consider a stream of messages sent from one process to another. If the +stream consists of messages `a, b, c` and we have seen `c`, then we know for +certain that we will have already seen `a, b` (in that order), so long as the +messages were sent to us by the same process. + +When two concurrent process exchange messages, Cloud Haskell guarantees that +messages will be delivered in FIFO order, if at all. No such guarantee exists +between N processes where N > 1, so if processes _A_ and _B_ are both +communicating (concurrently) with process _C_, the ordering guarantee will +only hold for each pair of interactions, i.e., between _A_ and _C_ and/or +_B_ and _C_ the ordering will be guaranteed, but not between _A_ and _B_ +with regards messages sent to _C_. + +Of course, we may not want to process received messages in the +precise order which they arrived. When this need arises, the platform +supplies a set of primitives of allow the caller to _selectively_ process +their mailbox. + +### Selective Receive + +Processes dequeue messages (from their mailbox) using the [`expect`][1] +and [`recieve`][2] family of primitives. Both take an optional timeout, +which leads to the expression evaluating to `Nothing` if no matching input +is found. + +The [`expect`][1] primitive blocks until a message matching the expected type +(of the expression) is found in the process' mailbox. If such a message can be +found by scanning the mailbox, it is dequeued and given to the caller. If no +message (matching the expected type) can be found, the caller (i.e., the +calling thread) is blocked until a matching message is delivered to the mailbox. +Let's take a look at this in action: + +{% highlight haskell %} +demo :: Process () +demo = do + listener <- spawnLocal listen + send listener "hello" + getSelfPid >>= send listener + () <- expect + where + listen = do + third <- expect :: Process ProcessId + first <- expect :: Process String + Nothing <- expectTimeout 100000 :: Process String + say first + send third () +{% endhighlight %} + +This program will print `"hello"`, then `Nothing` and finally `pid://...`. +The first `expect` - labelled "third" because of the order in which it is +due to be received - **will** succeed, since the parent process sends its +`ProcessId` after the string "hello", yet the listener blocks until it can dequeue +the `ProcessId` before "expecting" a string. The second `expect` (labelled "first") +also succeeds, demonstrating that the listener has selectively removed messages +from its mailbox based on their type rather than the order in which they arrived. +The third `expect` will timeout and evaluate to `Nothing`, because only one string +is ever sent to the listener and that has already been removed from the mailbox. +The removal of messages from the process' mailbox based on type is what makes this +program viable - without this "selective receiving", the program would block and +never complete. + +By contrast, the [`recieve`][2] family of primitives take a list of `Match` +objects, each derived from evaluating a [`match`][3] style primitive. This +subject was covered briefly in the first tutorial. Matching on messages allows +us to separate the type(s) of messages we can handle from the type that the +whole `receive` expression evaluates to. + +The behaviour of `receiveWait` differs from `receiveTimeout` in that it +blocks forever (until a match is found in the process' mailbox), whereas the +variant taking a timeout will return `Nothing` unless a match is found within +the specified time interval. Note that as with `System.Timeout`, the only +guarantee we have about a timeout based function is that it will not +expire _before_ the given interval. Both functions scan the mailbox in FIFO +order, evaluating the list of `match` expressions in declarative +(i.e., insertion) order until one of the matches succeeds or the operation +times out. + +### Advanced Mailbox Processing + +There are times when it is desirable to take a message from our mailbox without +explicitly specifying its type. For example, let's consider the `relay` primitive +that ships with distributed-process. This utility function starts a process that +simply dequeues _any_ messages it receives and forwards them to some other process. +In order to dequeue messages regardless of their type, this code relies on the +`matchAny` primitive, which has the following type: + +{% highlight haskell %} +matchAny :: forall b. (Message -> Process b) -> Match b +{% endhighlight %} + +Since forwarding _raw messages_ (without decoding them first) is a common pattern +in Cloud Haskell programs, there is also a primitive to do that for us: + +{% highlight haskell %} +forward :: Message -> ProcessId -> Process () +{% endhighlight %} + +Given these types, we can see that in order to combine `matchAny` with `forward` +we need to either _flip_ `forward` and apply the `ProcessId` (leaving us with +the required type `Message -> Process b`) or use a lambda - the actual implementation +does the latter and looks like this: + +{% highlight haskell %} +relay :: ProcessId -> Process () +relay !pid = receiveWait [ matchAny (\m -> forward m pid) ] >> relay pid +{% endhighlight %} + +This is pretty useful, but since `matchAny` operates on the raw `Message` type, +we're limited in what we can do with the messages we receive. In order to delve +_inside_ a message, we have to know its type. If we have an expression that operates +on a specific type, we can _attempt_ to decode the message to that type and examine +the result to see whether the decoding succeeds or not. There are two primitives +we can use to that effect: `unwrapMessage` and `handleMessage`. Their types look like +this: + +{% highlight haskell %} +unwrapMessage :: forall m a. (Monad m, Serializable a) => Message -> m (Maybe a) + +handleMessage :: forall m a b. (Monad m, Serializable a) => Message -> (a -> m b) -> m (Maybe b) +{% endhighlight %} + +Both primitives are generalised to any `Monad m`, so we're not limited to operating in +the `Process` monad. Of the two, `unwrapMessage` is the simpler, taking a raw `Message` +and evaluating to `Maybe a` before returning that value in the monad `m`. If the type +of the raw `Message` does not match our expectation, the result will be `Nothing`, otherwise +`Just a`. The approach `handleMessage` takes is a bit more flexible, taking a function +from `a -> m b` and returning `Just b` if the underlying message is of type `a` (hence the +operation can be executed and evaluate to `Maybe b`) or `Nothing` if the message's type +is incompatible with the handler function. + +Let's look at `handleMessage` in action. Earlier on we looked at `relay` from +distributed-process and now we'll consider its sibling `proxy` - this takes a predicate, +evaluates some input of type `a` and returns `Process Bool`, allowing us to run arbitrary +`Process` code in order to decide whether or not the `a` is eligible to be forwarded to +the relay `ProcessId`. The type of `proxy` is thus: + +{% highlight haskell %} +proxy :: Serializable a => ProcessId -> (a -> Process Bool) -> Process () +{% endhighlight %} + +Since `matchAny` operates on `(Message -> Process b)` and `handleMessage` operates on +`a -> Process b` we can compose the two and make our proxy server quite simply. We must +not forward messages for which the predicate function evaluates to `Just False`, nor +can we sensibly forward messages which the predicate function is unable to evaluate due +to type incompatibility. This leaves us with the definition found in distributed-process: + +{% highlight haskell %} +proxy pid proc = do + receiveWait [ + matchAny (\m -> do + next <- handleMessage m proc + case next of + Just True -> forward m pid + Just False -> return () -- explicitly ignored + Nothing -> return ()) -- un-routable + ] + proxy pid proc +{% endhighlight %} + +Beyond simple relays and proxies, the raw message handling capabilities available in +distributed-process can be utilised to develop highly generic message processing code. +All the richness of the distributed-process-platform APIs (such as `ManagedProcess`) which +will be discussed in later tutorials are, in fact, built upon these families of primitives. + +[1]: hackage.haskell.org/package/distributed-process/docs/Control-Distributed-Process.html#v:receiveWait +[2]: hackage.haskell.org/package/distributed-process/docs/Control-Distributed-Process.html#v:expect +[3]: http://hackage.haskell.org/package/distributed-process-0.4.2/docs/Control-Distributed-Process.html#v:match +[4]: /static/semantics.pdf diff --git a/tutorials/tutorial3.md b/tutorials/ch-tutorial4.md similarity index 67% rename from tutorials/tutorial3.md rename to tutorials/ch-tutorial4.md index ca2572e..9b60324 100644 --- a/tutorials/tutorial3.md +++ b/tutorials/ch-tutorial4.md @@ -1,27 +1,30 @@ --- -layout: tutorial3 +layout: tutorial +sections: ['Introduction', 'Implementing the Client', 'Implementing the Server', 'Making use of Async', 'Wiring up Handlers', 'Putting it all together', 'Performance Considerations'] categories: tutorial title: Managed Process Tutorial --- ### Introduction -The source code on which this tutorial is based is kept on github, +The source code on which this tutorial is (loosely) based is kept on github, and can be accessed [here][1]. Please note that this tutorial is based on the stable (master) branch of distributed-process-platform. -The main idea behind `ManagedProcess` is to separate the functional -and non-functional aspects of a process. By functional, we mean whatever -application specific task the process performs, and by non-functional +### Managed Processes + +The main idea behind a `ManagedProcess` is to separate the functional +and non-functional aspects of an actor. By functional, we mean whatever +application specific task the actor performs, and by non-functional we mean the *concurrency* or, more precisely, handling of the process' -mailbox. +mailbox and its interaction with other actors (i.e., clients). -Another effect that `ManagedProcess` has is to provide client code -with a typed, specific API for interacting with the process, much as -a TypedChannel does. We achieve this by writing and exporting functions -that operate on the types we want clients to see, and using the API -from `Control.Distributed.Process.Platform.ManagedProcess.Client` to -interact with the server. +Another effect of the `ManagedProcess` API is to provide client code +with a typed (i.e., type specific) API for interacting with the process, +much as a `TypedChannel` does. We achieve this by writing and exporting +functions that operate on the types we want clients to see, and using +the API from `Control.Distributed.Process.Platform.ManagedProcess.Client` +to interact with the server. Let's imagine we want to execute tasks on an arbitrary node, using a mechanism much as we would with the `call` API from distributed-process. @@ -31,22 +34,25 @@ concurrent tasks. We will use `ManagedProcess` to implement a generic task server with the following characteristics * requests to enqueue a task are handled immediately -* callers will block until the task completes (or fails) +* callers however, are blocked until the task completes (or fails) * an upper bound is placed on the number of concurrent running tasks Once the upper bound is reached, tasks will be queued up for later -execution, and only when we drop below the limit will tasks be taken +execution. Only when we drop below this limit will tasks be taken from the backlog and executed. `ManagedProcess` provides a basic protocol for *server-like* processes such as this, based on the synchronous `call` and asynchronous `cast` -functions used by code we provide to client clients and matching -*handler* functions in the process itself, for which there is a similar -API on the *server*. Although `call` is a synchronous protocol, -communication with the *server process* is out of band, both from the -client and the server's point of view. The server implementation chooses -whether to reply to a call request immediately, or defer its reply until -a later stage and go back to receiving other messages in the meanwhile. +functions. We use these to determine client behaviour, and matching +*handler* functions are set up in the process itself, to process the +requests and (if required) replies. This style of programming will +already be familiar if you've used some combination of `send` in your +clients and the `receive [ match ... ]` family of functions to write +your servers. The primary difference here, is that the choice of when +to return to (potentially blocking on) the server's mailbox is taken +out of the programmer's hands, leaving the implementor to worry only +about the logic to be applied once a message of one type or another +is received. ### Implementing the client @@ -74,10 +80,19 @@ executeTask :: forall s a . (Addressable s, Serializable a) executeTask sid t = call sid t {% endhighlight %} -That's it for the client! Note that the type signature we expose to -our consumers is specific, and that we do not expose them to either -arbitrary messages arriving in their mailbox or to exceptions being -thrown in their thread. Instead we return an `Either`. +Although `call` is a synchronous protocol, communication with the +*server process* is out of band, both from the client and the server's +point of view. The server implementation chooses whether to reply to a +call request immediately, or defer its reply until a later stage (and +thus go back to receiving other messages in the meanwhile). + +In terms of code, that's all there is to it for our client! Note that +the type signature we expose to our consumers is specific, and that +we do not expose them to either arbitrary messages arriving in their +mailbox or to exceptions being thrown in their thread. Instead we +return an `Either`. One very important thing about this approach is +that if the server replies with some other type (i.e., a type other +than `Either String a`) then our client will be blocked indefinitely! There are several varieties of the `call` API that deal with error handling in different ways. Consult the haddocks for more info about @@ -96,7 +111,7 @@ data Pool a = Pool a I've called the state type `Pool` as we're providing a fixed size resource pool from the consumer's perspective. We could think of this as a bounded -size latch or barrier of sorts, but that conflates the example a bit too +queue, latch or barrier of sorts, but that conflates the example a bit too much. We parameterise the state by the type of data that can be returned by submitted tasks. @@ -124,11 +139,14 @@ data Pool a = Pool { } deriving (Typeable) {% endhighlight %} +Given a pool of closures, we must now work out how to execute them +on the caller's behalf. + ### Making use of Async So **how** can we execute this `Closure (Process a)` without blocking the server process itself? We will use the `Control.Distributed.Process.Platform.Async` API -to execute the task asynchronously and provide a means for waiting on the result. +to execute each task asynchronously and provide a means for waiting on the result. In order to use the `Async` handle to get the result of the computation once it's complete, we'll have to hang on to a reference. We also need a way to associate the @@ -152,11 +170,12 @@ proc <- unClosure task' asyncHandle <- async proc {% endhighlight %} -Of course, we decided that we wouldn't block on each `Async` handle, and we're not -able to sit in a *loop* polling all the handles representing tasks we're running, -because no submissions would be handled whilst spinning and waiting for results. -We're relying on monitors instead, so we need to store the `MonitorRef` so we know -which monitor signal relates to which async task (and recipient). +Of course, we decided not to block on each `Async` handle, and we can't sit +in a *loop* polling all the handles representing tasks we're running +(since no submissions would be handled whilst we're spinning waiting +for results). Instead we rely on monitors instead, so we must store a +`MonitorRef` in order to know which monitor signal relates to which +async task (and recipient). {% highlight haskell %} data Pool a = Pool { @@ -193,17 +212,16 @@ ref, caller ref and the async handle together in the `active` field. Prepending to the list of active/running tasks is a somewhat arbitrary choice. One might argue that heuristically, the younger a task is the less likely it is that it will run for a long time. Either way, I've done this to avoid cluttering the -example other data structures, so we can focus on the `ManagedProcess` APIs -only. - -Now we will write a function that handles the results. When the monitor signal -arrives, we use the async handle to obtain the result and send it back to the caller. -Because, even if we were running at capacity, we've now seen a task complete (and -therefore reduce the number of active tasks by one), we will also pull off a pending -task from the backlog (i.e., accepted), if any exists, and execute it. As with the -active task list, we're going to take from the backlog in FIFO order, which is -almost certainly not what you'd want in a real application, but that's not the -point of the example either. +example with data structures, so we can focus on the `ManagedProcess` APIs. + +Now we will write a function that handles the results. When a monitor signal +arrives, we lookup an async handle that we can use to obtain the result +and send it back to the caller. Because, even if we were running at capacity, +we've now seen a task complete (and therefore reduced the number of active tasks +by one), we will also pull off a pending task from the backlog (i.e., accepted), +if any exists, and execute it. As with the active task list, we're going to +take from the backlog in FIFO order, which is almost certainly not what you'd want +in a real application, but that's not the point of the example either. The steps then, are @@ -216,9 +234,9 @@ The steps then, are This chain then, looks like `wait h >>= respond c >> bump s t >>= continue`. Item (3) requires special API support from `ManagedProcess`, because we're not -just sending *any* message back to the caller. We're replying to a `call` -that has already taken place and is, in fact, still running. The API call for -this is `replyTo`. +just sending *any* message back to the caller. We're replying to a specific `call` +that has taken place and is, from the client's perspective, still running. +The `ManagedProcess` API call for this is `replyTo`. {% highlight haskell %} taskComplete :: forall a . Serializable a @@ -258,23 +276,24 @@ deleteFromRunQueue :: (MonitorRef, Recipient, Async a) deleteFromRunQueue c@(p, _, _) runQ = deleteBy (\_ (b, _, _) -> b == p) c runQ {% endhighlight %} -That was pretty simple. We've deal with mapping the `AsyncResult` to `Either` values, +That was pretty simple. We've dealt with mapping the `AsyncResult` to `Either` values, which we *could* have left to the caller, but this makes the client facing API much simpler to work with. ### Wiring up handlers The `ProcessDefinition` takes a number of different kinds of handler. The only ones -we care about are the call handler for submission handling, and the handler that +_we_ care about are the call handler for submissions, and the handler that deals with monitor signals. Call and cast handlers live in the `apiHandlers` list of a `ProcessDefinition` and must have the type `Dispatcher s` where `s` is the state type for the process. We cannot construct a `Dispatcher` ourselves, but a range of functions in the `ManagedProcess.Server` module exist to lift functions like the ones we've just -defined. The particular function we need is `handleCallFrom`, which works with -functions over the state, `Recipient` and the call data/message. All the varieties -of `handleCall` need to return a `ProcessReply`, which has the following type +defined, to the correct type. The particular function we need is `handleCallFrom`, +which works with functions over the state, `Recipient` and call data/message. +All varieties of `handleCall` need to return a `ProcessReply`, which has the +following type: {% highlight haskell %} data ProcessReply s a = @@ -282,11 +301,11 @@ data ProcessReply s a = | NoReply (ProcessAction s) {% endhighlight %} -There are also various utility function in the API to construct a `ProcessAction` -and we will make use of `noReply_` here, which constructs `NoReply` for us and +There are also various utility functions in the API to construct a `ProcessAction` +and we make use of `noReply_` here, which constructs `NoReply` for us and presets the `ProcessAction` to `ProcessContinue`, which goes back to receiving -messages without further action. We already have a function over the right input -domain which evaluates to a new state so we end up with: +messages from clients. We already have a function over our input domain, which +evaluates to a new state, so we end up with: {% highlight haskell %} storeTask :: Serializable a @@ -321,6 +340,9 @@ poolServer = {% endhighlight %} Starting the pool is fairly simple and `ManagedProcess` has some utilities to help. +The `start` function takes an _initialising_ thunk, which must generate the initial +state and per-call timeout setting, and the process definition which we've already +encountered. {% highlight haskell %} simplePool :: forall a . (Serializable a) @@ -335,8 +357,8 @@ simplePool sz server = start sz init' server ### Putting it all together Starting up a pool locally or on a remote node is just a matter of using `spawn` -or `spawnLocal` with `simplePool`. The second argument should specify the type of -results, e.g., +or `spawnLocal` with `simplePool`. The second argument should add specificity to +the type of results the process definition operates on, e.g., {% highlight haskell %} let s' = poolServer :: ProcessDefinition (Pool String) @@ -356,22 +378,25 @@ And executing them is just as simple too. Given a pool which has been registered locally as "mypool", we can simply call it directly: {% highlight haskell %} -job <- return $ ($(mkClosure 'sampleTask) (seconds 2, "foobar")) -call "mypool" job >>= wait >>= stash result +tsk <- return $ ($(mkClosure 'sampleTask) (seconds 2, "foobar")) +executeTask "mypool" tsk {% endhighlight %} -Hopefully this has demonstrated a few benefits of the `ManagedProcess` API, although -it's really just scratching the surface. We have focussed on the code that matters - -state transitions and decision making, without getting bogged down (much) with receiving -or sending messages, apart from using some simple APIs when we needed to. +In this tutorial, we've really just scratched the surface of the `ManagedProcess` +API. By handing over control of the client/server protocol to the framework, we +are able to focus on the code that matters, such as state transitions and decision +making, without getting bogged down (much) with the business of sending and +receiving messages, handling client/server failures and such like. ### Performance Considerations We did not take much care over our choice of data structures. Might this have profound consequences for clients? The LIFO nature of the pending backlog is surprising, but -we can change that quite easily by changing data structures. +we can change that quite easily by changing data structures. In fact, the code on which +this example is based uses `Data.Sequence` to provide both strictness and FIFO +execution ordering. -What's perhaps more of a concern is the cost of using `Async` everywhere - remember +Perhaps more of a concern is the cost of using `Async` everywhere - remember we used this in the *server* to handle concurrently executing tasks and obtaining their results. The `Async` module is also used by `ManagedProcess` to handle the `call` mechanism, and there *are* some overheads to using it. An invocation of diff --git a/tutorials/ch-tutorial5.md b/tutorials/ch-tutorial5.md new file mode 100644 index 0000000..91fd483 --- /dev/null +++ b/tutorials/ch-tutorial5.md @@ -0,0 +1,15 @@ +--- +layout: tutorial +categories: tutorial +sections: ['Introduction'] +title: Supervision Principles +--- + +### Introduction + +In the previous tutorial, we looked at utilities for linking processes together +and monitoring their lifecycle as it changes. The ability to link and monitor are +foundational tools for building _reliable_ systems, and are the bedrock principles +on which Cloud Haskell's supervision capabilities are built. + + diff --git a/tutorials/tutorial2.md b/tutorials/tutorial-NT2.md similarity index 100% rename from tutorials/tutorial2.md rename to tutorials/tutorial-NT2.md diff --git a/tutorials/tutorial1.md b/tutorials/tutorial1.md deleted file mode 100644 index 0575704..0000000 --- a/tutorials/tutorial1.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -layout: tutorial1 -categories: tutorial -title: Getting Started ---- - -### Getting Started - ------ - -Please note that this tutorial is a work in progress. We highly recommend -reading the haddock documentation and reading the Well-Typed blog, which -are offer the best quality sources of information at this time. - -In order to go through this tutorial you will need a Haskell development -environment and we recommend installing the latest version of the -[Haskell Platform](http://www.haskell.org/platform/) if you've not done -so already. - -Once you're up and running, you'll want to get hold of the distributed-process -library and a choice of network transport backend. This guide will use -the network-transport-tcp backend, but the simplelocalnet or inmemory -backends are also available on github, along with some other experimental -options. - -### Create a node - -Cloud Haskell's *lightweight processes* reside on a 'node', which must -be initialised with a network transport implementation and a remote table. -The latter is required so that physically separate nodes can identify known -objects in the system (such as types and functions) when receiving messages -from other nodes. We'll look at inter-node communication later, so for now -it will suffice to pass the default remote table, which defines the built-in -stuff Cloud Haskell needs at a minimum. - -Let's start with imports first: - -{% highlight haskell %} -import Network.Transport.TCP (createTransport, defaultTCPParameters) -import Control.Distributed.Process -import Control.Distributed.Process.Node -{% endhighlight %} - -Our TCP network transport backend needs an IP address and port to get started -with, and we're good to go... - -{% highlight haskell %} -main :: IO () -main = do - Right t <- createTransport "127.0.0.1" "10501" defaultTCPParameters - node <- newLocalNode t initRemoteTable - .... -{% endhighlight %} - -And now we have a running node. - -### Send messages - -We can start a new lightweight process with `forkProcess`, which takes a node, -a `Process` action - because our concurrent code will run in the `Process` -monad - and returns an address for the process in the form of a `ProcessId`. -The process id can be used to send messages to the running process - here we -will send one to ourselves! - -{% highlight haskell %} --- in main - _ <- forkProcess node $ do - -- get our own process id - self <- getSelfPid - send self "hello" - hello <- expect :: Process String - liftIO $ putStrLn hello - return () -{% endhighlight %} - -Lightweight processes are implemented as `forkIO` threads. In general we will -try to forget about this implementation detail, but for now just note that we -haven't deadlocked ourself by sending to and receiving from our own mailbox -in this fashion. Sending a message is a completely asynchronous operation - even -if the recipient doesn't exist, no error will be raised and evaluating `send` -will not block the caller. - -Receiving messages works the other way around, blocking the caller until a message -matching the expected type arrives in the process (conceptual) mailbox. -If multiple messages of that type are in the queue, they will be returned in FIFO -order, otherwise the caller will be blocked until a message arrives that can be -decoded to the correct type. - -Let's spawn another process on the same node and make the two talk to each other. - -{% highlight haskell %} -import Control.Concurrent (threadDelay) -import Control.Monad (forever) -import Control.Distributed.Process -import Control.Distributed.Process.Node -import Network.Transport.TCP (createTransport, defaultTCPParameters) - -replyBack :: (ProcessId, String) -> Process () -replyBack (sender, msg) = send sender msg - -logMessage :: String -> Process () -logMessage msg = say $ "handling " ++ msg - -main :: IO () -main = do - Right t <- createTransport "127.0.0.1" "10501" defaultTCPParameters - node <- newLocalNode t initRemoteTable - -- Spawn a new process on a local node - forkProcess node $ do - -- Spawn worker inside one more process on the local node - echoPid <- spawnLocal $ forever $ do - -- Test the matches in order against each message in the queue - receiveWait [match logMessage, match replyBack] - - -- `say` sends a message to the process registered as logger. - -- By default, this process simply sends the string to stderr. - say "send some messages!" - send echoPid "hello" - self <- getSelfPid - send echoPid (self, "hello") - -- like `expect` (waits for a message), but with timeout - m <- expectTimeout 1000000 - case m of - -- Die immediately - throws a ProcessExitException with the given reason. - Nothing -> die "nothing came back!" - (Just s) -> say $ "got back " ++ s - return () - - -- A 1 second wait. Otherwise the main thread can terminate before - -- our messages reach the logging process or get flushed to stdio - liftIO $ threadDelay (1*1000000) - return () -{% endhighlight %} - -Note that we've used a `receive` class of function this time around. These -functions work with the [`Match`][Match] data type, and provide a range of -advanced dispatching options. The `match` construct allows you to construct a -list of potential message handlers and have them evaluated against incoming -messages. Our first match indicates that, given a tuple `t :: (ProcessId, -String)` we will send the `String` component back to the sender's -`ProcessId`. Our second match prints out whatever string it receives. - -Also note the use of a 'timeout' (given in microseconds), which is available for -both the `expect` and `receive` variants. This returns `Nothing` unless a message -can be dequeued from the mailbox within the specified time interval. - -### Serializable - -Processes can send data if the type implements the `Serializable` typeclass, which is -done indirectly by implementing `Binary` and deriving `Typeable`. Implementations are -already provided for primitives and some commonly used data structures. - -### Spawning Remote Processes - -In order to spawn a process on a node we need something of type `Closure (Process ())`. -In distributed-process if `f : T1 -> T2` then - -{% highlight haskell %} - $(mkClosure 'f) :: T1 -> Closure T2 -{% endhighlight %} - -That is, the first argument the function we pass to mkClosure will act as the closure -environment for that process; if you want multiple values in the closure environment, -you must tuple them up. - -In order to spawn a process remotely we will need to configure the remote table -(see the documentation for more details) and the easiest way to do this, is to -let the library generate the relevant code for us. For example (taken from the -distributed-process-platform test suites): - -{% highlight haskell %} -sampleTask :: (TimeInterval, String) -> Process String -sampleTask (t, s) = sleep t >> return s - -$(remotable ['sampleTask]) -{% endhighlight %} - -We can now create a closure environment for `sampleTask` like so: - -{% highlight haskell %} -($(mkClosure 'sampleTask) (seconds 2, "foobar")) -{% endhighlight %} - -The call to `remotable` generates a remote table and generates a definition -`__remoteTable :: RemoteTable -> RemoteTable` in our module for us. We can -compose this with other remote tables in order to come up with a final, merged -remote table for use in our program: - -{% highlight haskell %} -myRemoteTable :: RemoteTable -myRemoteTable = Main.__remoteTable initRemoteTable - -main :: IO () -main = do - localNode <- newLocalNode transport myRemoteTable - -- etc -{% endhighlight %} - ------- - -[1]: /static/doc/distributed-process/Control-Distributed-Process.html#v:Message -[2]: http://hackage.haskell.org/package/distributed-process -[3]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async.html -[4]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.htmlv:callAsync -[Match]: http://hackage.haskell.org/packages/archive/distributed-process/latest/doc/html/Control-Distributed-Process-Internal-Primitives.html#t:Match From e3b7172f10baf830ad993b9bae1ed7f7f0aa5d30 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Thu, 28 Nov 2013 16:43:55 +0000 Subject: [PATCH 002/108] Tutorial 3 improvements --- documentation.md | 2 +- tutorials/ch-tutorial3.md | 126 +++++++++++++++++++++++++++++++++++++- 2 files changed, 126 insertions(+), 2 deletions(-) diff --git a/documentation.md b/documentation.md index 949430f..36409fc 100644 --- a/documentation.md +++ b/documentation.md @@ -373,7 +373,7 @@ The [distributed-process-platform][18] library implements parts of the in the original paper and implemented by the [remote][14] package. In particular, we diverge from the original design and defer to many of the principles defined by Erlang's [Open Telecom Platform][13], taking in some well established -Haskell concurrency design patterns alongside. +Haskell concurrency design patterns along the way. In fact, [distributed-process-platform][18] does not really consider the *task layer* in great detail. We provide an API comparable to remote's diff --git a/tutorials/ch-tutorial3.md b/tutorials/ch-tutorial3.md index dfc4bb0..6b317f1 100644 --- a/tutorials/ch-tutorial3.md +++ b/tutorials/ch-tutorial3.md @@ -1,10 +1,27 @@ --- layout: tutorial categories: tutorial -sections: ['Message Ordering', 'Selective Receive', 'Advanced Mailbox Processing'] +sections: ['The Thing About Nodes', 'Message Ordering', 'Selective Receive', 'Advanced Mailbox Processing', 'Process Lifetime', 'Monitoring And Linking', 'Getting Process Info'] title: Getting to know Processes --- +### The Thing About Nodes + +Before we can really get to know _processes_, we need to consider the role of +the _Node Controller_ in Cloud Haskell. As per the [_semantics_][4], Cloud +Haskell makes the role of _Node Controller_ (occasionally referred to by the original +"Unified Semantics for Future Erlang" paper on which our semantics are modelled +as the "ether") explicit. + +Architecturally, Cloud Haskell's _Node Controller_ consists of a pair of message +buss processes, one of which listens for network-transport level events whilst the +other is busy processing _signal events_ (most of which pertain to either message +delivery or process lifecycle notification). Both these _event loops_ runs sequentially +in the system at all times. + +With this in mind, let's consider Cloud Haskell's lightweight processes in a bit +more detail... + ### Message Ordering We have already met the `send` primitive, which is used to deliver @@ -86,6 +103,19 @@ subject was covered briefly in the first tutorial. Matching on messages allows us to separate the type(s) of messages we can handle from the type that the whole `receive` expression evaluates to. +Consider the following snippet: + +{% highlight haskell %} +usingReceive = do + () <- receiveWait [ + match (\(s :: String) -> say s) + , match (\(i :: Int) -> say $ show i) + ] +{% endhighlight %} + +Note that each of the matches in the list must evaluate to the same type, +as the type signature indicates: `receiveWait :: [Match b] -> Process b`. + The behaviour of `receiveWait` differs from `receiveTimeout` in that it blocks forever (until a match is found in the process' mailbox), whereas the variant taking a timeout will return `Nothing` unless a match is found within @@ -183,7 +213,101 @@ distributed-process can be utilised to develop highly generic message processing All the richness of the distributed-process-platform APIs (such as `ManagedProcess`) which will be discussed in later tutorials are, in fact, built upon these families of primitives. +### Process Lifetime + +A process will continue executing until it has evaluated to some value, or is abruptly +terminated either by crashing (with an un-handled exception) or being instructed to +stop executing. Stop instructions to stop take one of two forms: a `ProcessExitException` +or `ProcessKillException`. As the names suggest, these _signals_ are delivered in the form +of asynchronous exceptions, however you should not to rely on that fact! After all, +we cannot throw an exception to a thread that is executing in some other operating +system process or on a remote host! Instead, you should use the [`exit`][5] and [`kill`][6] +primitives from distributed-process, which not only ensure that remote target processes +are handled seamlessly, but also maintain a guarantee that if you send a message and +*then* an exit signal, the message will be delivered to the destination process (via its +local node controller) before the exception is thrown - note that this does not guarantee +that the destination process will have time to _do anything_ with the message before it +is terminated. + +The `ProcessExitException` signal is sent from one process to another, indicating that the +receiver is being asked to terminate. A process can choose to tell itself to exit, and since +this is a useful way for processes to terminate _abnormally_, distributed-processes provides +the [`die`][7] primitive to simplify doing so. In fact, [`die`][7] has slightly different +semantics from [`exit`][5], since the latter involves sending an internal signal to the +local node controller. A direct consequence of this is that the _exit signal_ may not +arrive immediately, since the _Node Controller_ could be busy processing other events. +On the other hand, the [`die`][7] primitive throws a `ProcessExitException` directly +in the calling thread, thus terminating it without delay. + +The `ProcessExitException` type holds a _reason_ field, which is serialised as a raw `Message`. +This exception type is exported, so it is possible to catch these _exit signals_ and decide how +to respond to them. Catching _exit signals_ is done via a set of primitives in +distributed-process, and the use of them forms a key component of the various fault tolerance +strategies provided by distributed-process-platform. For example, most of the utility +code found in distributed-process-platform relies on processes terminating with a +`ProcessKillException` or `ProcessExitException` where the _reason_ has the type +`ExitReason` - processes which fail with other exception types are routinely converted to +`ProcessExitException $ ExitOther reason {- reason :: String -}` automatically. This pattern +is most prominently found in supervisors and supervised _managed processes_, which will be +covered in subsequent tutorials. + +A `ProcessKillException` is intended to be an _untrappable_ exit signal, so its type is +not exported and therefore you can __only__ handle it by catching all exceptions, which +as we all know is very bad practise. The [`kill`][6] primitive is intended to be a +_brutal_ means for terminating process - e.g., it is used to terminate supervised child +processes that haven't shutdown on request, or to terminate processes that don't require +any special cleanup code to run when exiting - although it does behave like [`exit`][5] +in so much as it is dispatched (to the target process) via the _Node Controller_. + +### Monitoring and Linking + +Processes can be linked to other processes (or nodes or channels). A link, which is +unidirectional, guarantees that once any object we have linked to *dies*, we will also +be terminated. A simple way to test this is to spawn a child process, link to it and then +terminate it, noting that we will subsequently die ourselves. Here's a simple example, +in which we link to a child process and then cause it to terminate (by sending it a message +of the type it is waiting for). Even though the child terminates "normally", our process +is also terminated since `link` will _link the lifetime of two processes together_ regardless +of exit reasons. + +{% highlight haskell %} +demo = do + pid <- spawnLocal $ receive >>= return + link pid + send pid () + () <- receive +{% endhighlight %} + +The medium that link failures uses to signal exit conditions is the same as exit and kill +signals - asynchronous exceptions. Once again, it is a bad idea to rely on this (not least +because it might fail in some future release) and the exception type (`ProcessLinkException`) +is not exported so as to prevent developers from abusing exception handling code in this +special case. + +Whilst the built-in `link` primitive terminates the link-ee regardless of exit reason, +distributed-process-platform provides an alternate function `linkOnFailure`, which only +dispatches the `ProcessLinkException` if the link-ed process dies abnormally (i.e., with +some `DiedReason` other than `DiedNormal`). + +Monitors on the other hand, do not cause the *listening* process to exit at all, instead +putting a `ProcessMonitorNotification` into the process' mailbox. This signal and its +constituent fields can be introspected in order to decide what action (if any) the receiver +can/should take in response to the monitored processes death. + +Linking and monitoring are foundational tools for *supervising* processes, where a top level +process manages a set of children, starting, stopping and restarting them as necessary. + +### Getting Process Info + +The `getProcessInfo` function provides a means for us to obtain information about a running +process. The `ProcessInfo` type it returns contains the local node id and a list of +registered names, monitors and links for the process. The call returns `Nothing` if the +process in question is not alive. + [1]: hackage.haskell.org/package/distributed-process/docs/Control-Distributed-Process.html#v:receiveWait [2]: hackage.haskell.org/package/distributed-process/docs/Control-Distributed-Process.html#v:expect [3]: http://hackage.haskell.org/package/distributed-process-0.4.2/docs/Control-Distributed-Process.html#v:match [4]: /static/semantics.pdf +[5]: http://hackage.haskell.org/package/distributed-process-0.4.2/docs/Control-Distributed-Process.html#v:exit +[6]: http://hackage.haskell.org/package/distributed-process-0.4.2/docs/Control-Distributed-Process.html#v:kill +[7]: http://hackage.haskell.org/package/distributed-process-0.4.2/docs/Control-Distributed-Process.html#v:die From 34dcf398b15debc5126e6f256e75c55c314dbef8 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Fri, 29 Nov 2013 15:46:09 +0000 Subject: [PATCH 003/108] more documentation/tutorial improvements --- documentation.md | 28 +++++----- tutorials/ch-tutorial3.md | 105 +++++++++++++++++++++++++++----------- tutorials/ch-tutorial4.md | 2 +- tutorials/ch-tutorial5.md | 2 +- 4 files changed, 89 insertions(+), 48 deletions(-) diff --git a/documentation.md b/documentation.md index 36409fc..3a1afa9 100644 --- a/documentation.md +++ b/documentation.md @@ -238,14 +238,14 @@ types such as `TMVar` just as normal Haskell threads would. ### Typed Channels Channels provides an alternative to message transmission with `send` and `expect`. -While `send` and `expect` allow transmission of messages of any `Serializable` +While `send` and `expect` allow us to transmit messages of any `Serializable` type, channels require a uniform type. Channels work like a distributed equivalent of Haskell's `Control.Concurrent.Chan`, however they have distinct ends: a single receiving port and a corollary send port. Channels provide a nice alternative to *bare send and receive*, which is a bit -*unHaskellish*, because the processes message queue has messages of multiple -types, and we have to do dynamic type checking. +*un-Haskell-ish*, since our process' message queue can contain messages of multiple +types, forcing us to undertake dynamic type checking at runtime. We create channels with a call to `newChan`, and send/receive on them using the `{send,receive}Chan` primitives: @@ -264,19 +264,17 @@ channelsDemo = do {% endhighlight %} Channels are particularly useful when you are sending a message that needs a -response, because the code that receives the response knows exactly where it -came from - i.e., it knows that it came from the `SendPort` connected to -the `ReceivePort` on which it just received a response. +response, because we know exactly where to look for the reply. -Channels can sometimes allows message types to be simplified, as passing a -`ProcessId` to reply to isn't required. Channels are not so useful when you -need to spawn a process and then send a bunch a messages to it and wait for -replies, because we can’t send the `ReceivePort`. +Channels can also allow message types to be simplified, as passing a +`ProcessId` for the reply isn't required. Channels aren't so useful when we +need to spawn a process and send a bunch a messages to it, then wait for +replies however; we can’t send a `ReceivePort` since it is not `Serializable`. -ReceivePorts can be merged, so you can listen on several simultaneously. In the -latest version of [distributed-process][2], you can listen for *regular* messages -and on multiple channels at the same time, using `matchChan` in the list of -allowed matches passed `receive`. +`ReceivePort`s can be merged, so we can listen on several simultaneously. In the +latest version of [distributed-process][2], we can listen for *regular* messages +and multiple channels at the same time, using `matchChan` in the list of +allowed matches passed `receiveWait` and `receiveTimeout`. ### Linking and monitoring @@ -318,7 +316,7 @@ function, which sends an exit signal that cannot be handled. #### __An important note about exit signals__ Exit signals in Cloud Haskell are unlike asynchronous exceptions in regular -haskell code. Whilst processes *can* use asynchronous exceptions - there's +haskell code. Whilst a process *can* use asynchronous exceptions - there's nothing stoping this since the `Process` monad is an instance of `MonadIO` - exceptions thrown are not bound by the same ordering guarantees as messages delivered to a process. Link failures and exit signals *might* be implemented diff --git a/tutorials/ch-tutorial3.md b/tutorials/ch-tutorial3.md index 6b317f1..3bb08ba 100644 --- a/tutorials/ch-tutorial3.md +++ b/tutorials/ch-tutorial3.md @@ -8,25 +8,29 @@ title: Getting to know Processes ### The Thing About Nodes Before we can really get to know _processes_, we need to consider the role of -the _Node Controller_ in Cloud Haskell. As per the [_semantics_][4], Cloud -Haskell makes the role of _Node Controller_ (occasionally referred to by the original -"Unified Semantics for Future Erlang" paper on which our semantics are modelled -as the "ether") explicit. +the _Node Controller_ in Cloud Haskell. In our formal [_semantics_][4], Cloud +Haskell hides the role of _Node Controller_ (explicitly defined in the original +"Unified Semantics for Future Erlang" paper on which our semantics are modelled). +Nonetheless, each Cloud Haskell _node_ is serviced and managed by a +conceptual _Node Controller_. Architecturally, Cloud Haskell's _Node Controller_ consists of a pair of message buss processes, one of which listens for network-transport level events whilst the other is busy processing _signal events_ (most of which pertain to either message delivery or process lifecycle notification). Both these _event loops_ runs sequentially -in the system at all times. +in the system at all times. Messages are delivered via the _Node Controller's_ +_event loops_, which broadly correspond to the _system queue (or "ether")_ mentioned +in the [_semantics_][4]. The _system queue_ delivers messages to individual process +mailboxes in a completely transparent fashion, leaving us with the illusion that +processes exist in a unidimensional space. -With this in mind, let's consider Cloud Haskell's lightweight processes in a bit +With all this in mind, let's consider Cloud Haskell's lightweight processes in a bit more detail... ### Message Ordering -We have already met the `send` primitive, which is used to deliver -a message to another process. Here's a review of what we've learned -about `send` thus far: +We have already met the `send` primitive, used to deliver messages from one +process to another. Here's a review of what we've learned about `send` thus far: 1. sending is asynchronous (i.e., it does not block the caller) 2. sending _never_ fails, regardless of the state of the recipient process @@ -34,8 +38,8 @@ about `send` thus far: 4. there are **no** guarantees that the message will be received at all Asynchronous sending buys us several benefits. Improved concurrency is -possible, because processes do not need to block and wait for acknowledgements -and error handling need not be implemented each time a message is sent. +possible, because processes need not block or wait for acknowledgements, +nor does error handling need to be implemented each time a message is sent. Consider a stream of messages sent from one process to another. If the stream consists of messages `a, b, c` and we have seen `c`, then we know for certain that we will have already seen `a, b` (in that order), so long as the @@ -58,15 +62,14 @@ their mailbox. Processes dequeue messages (from their mailbox) using the [`expect`][1] and [`recieve`][2] family of primitives. Both take an optional timeout, -which leads to the expression evaluating to `Nothing` if no matching input +allowing the expression to evaluate to `Nothing` if no matching input is found. The [`expect`][1] primitive blocks until a message matching the expected type -(of the expression) is found in the process' mailbox. If such a message can be -found by scanning the mailbox, it is dequeued and given to the caller. If no -message (matching the expected type) can be found, the caller (i.e., the -calling thread) is blocked until a matching message is delivered to the mailbox. -Let's take a look at this in action: +(of the expression) is found in the process' mailbox. If a match is found by +scanning the mailbox, it is dequeued and given to the caller, otherwise the +caller (i.e., the calling thread) is blocked until a message of the expected +type is delivered to the mailbox. Let's take a look at this in action: {% highlight haskell %} demo :: Process () @@ -79,23 +82,23 @@ demo = do listen = do third <- expect :: Process ProcessId first <- expect :: Process String - Nothing <- expectTimeout 100000 :: Process String - say first + second <- expectTimeout 100000 :: Process String + mapM_ (say . show) [first, second, third] send third () {% endhighlight %} This program will print `"hello"`, then `Nothing` and finally `pid://...`. -The first `expect` - labelled "third" because of the order in which it is -due to be received - **will** succeed, since the parent process sends its -`ProcessId` after the string "hello", yet the listener blocks until it can dequeue -the `ProcessId` before "expecting" a string. The second `expect` (labelled "first") -also succeeds, demonstrating that the listener has selectively removed messages -from its mailbox based on their type rather than the order in which they arrived. -The third `expect` will timeout and evaluate to `Nothing`, because only one string -is ever sent to the listener and that has already been removed from the mailbox. -The removal of messages from the process' mailbox based on type is what makes this -program viable - without this "selective receiving", the program would block and -never complete. +The first `expect` - labelled "third" because of the order in which we +know it will arrive in our mailbox - **will** succeed, since the parent process +sends its `ProcessId` after the string "hello", yet the listener blocks until it +can dequeue the `ProcessId` before "expecting" a string. The second `expect` +(labelled "first") also succeeds, demonstrating that the listener has selectively +removed messages from its mailbox based on their type rather than the order in +which they arrived. The third `expect` will timeout and evaluate to `Nothing`, +because only one string is ever sent to the listener and that has already been +removed from the mailbox. The removal of messages from the process' mailbox based +on type is what makes this program viable - without this "selective receiving", +the program would block and never complete. By contrast, the [`recieve`][2] family of primitives take a list of `Match` objects, each derived from evaluating a [`match`][3] style primitive. This @@ -292,7 +295,47 @@ some `DiedReason` other than `DiedNormal`). Monitors on the other hand, do not cause the *listening* process to exit at all, instead putting a `ProcessMonitorNotification` into the process' mailbox. This signal and its constituent fields can be introspected in order to decide what action (if any) the receiver -can/should take in response to the monitored processes death. +can/should take in response to the monitored processes death. Let's take a look at how +monitors can be used to determine both when and _how_ a process has terminated. Tucked +away in distributed-process-platform, the `linkOnFailure` primitive works just like our +built-in `link` except that it only terminates the process which evaluated it (the +_linker_), if the process it is linking with (the _linkee_) terminates abnormally. +Let's take a look... + +{% highlight haskell %} +linkOnFailure them = do + us <- getSelfPid + tid <- liftIO $ myThreadId + void $ spawnLocal $ do + callerRef <- P.monitor us + calleeRef <- P.monitor them + reason <- receiveWait [ + matchIf (\(ProcessMonitorNotification mRef _ _) -> + mRef == callerRef) -- nothing left to do + (\_ -> return DiedNormal) + , matchIf (\(ProcessMonitorNotification mRef' _ _) -> + mRef' == calleeRef) + (\(ProcessMonitorNotification _ _ r') -> return r') + ] + case reason of + DiedNormal -> return () + _ -> liftIO $ throwTo tid (ProcessLinkException us reason) +{% endhighlight %} + +As we can see, this code makes use of monitors to track both processes involved in the +link. In order to track _both_ processes and react to changes in their status, it is +necessary to spawn a third process which will do the monitoring. This doesn't happen +with the built-in link primitive, but is necessary in this case since the link handling +code resides outside the _Node Controller_. + +The two matches passed to `receiveWait` both handle a `ProcessMonitorNotification`, and +the predicate passed to `matchIf` is used to determine whether the notification we're +receiving is for the _linker_ or the _linkee_. If the _linker_ dies, we've nothing more +to do, since links are unidirectional. If the _linkee_ dies however, we must examine +the `DiedReason` the `ProcessMonitorNotification` provides us with, to determine whether +the _linkee_ exited normally (i.e., with `DiedNormal`) or otherwise. In the latter case, +we throw a `ProcessLinkException` to the _linker_, which is exactly how an ordinary link +would behave. Linking and monitoring are foundational tools for *supervising* processes, where a top level process manages a set of children, starting, stopping and restarting them as necessary. diff --git a/tutorials/ch-tutorial4.md b/tutorials/ch-tutorial4.md index 9b60324..6bb486b 100644 --- a/tutorials/ch-tutorial4.md +++ b/tutorials/ch-tutorial4.md @@ -406,7 +406,7 @@ cheap, but not free as each process is a haskell thread, plus some additional bo keeping data. The cost of spawning two processes for each computation/task might represent just that -bit too much overhead for some applications. In our next tutorial, we'll look at the +bit too much overhead for some applications. In a forthcoming tutorial, we'll look at the `Control.Distributed.Process.Platform.Task` API, which looks a lot like `Async` but manages exit signals in a single thread and makes configurable task pools and task supervision strategy part of its API. diff --git a/tutorials/ch-tutorial5.md b/tutorials/ch-tutorial5.md index 91fd483..3f268b4 100644 --- a/tutorials/ch-tutorial5.md +++ b/tutorials/ch-tutorial5.md @@ -7,7 +7,7 @@ title: Supervision Principles ### Introduction -In the previous tutorial, we looked at utilities for linking processes together +In previous tutorial, we've looked at utilities for linking processes together and monitoring their lifecycle as it changes. The ability to link and monitor are foundational tools for building _reliable_ systems, and are the bedrock principles on which Cloud Haskell's supervision capabilities are built. From b4d984e9989ce0f51ad3b4d8253314f4029f3303 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Wed, 5 Feb 2014 20:48:51 +0000 Subject: [PATCH 004/108] further tutorial improvements --- documentation.md | 106 +++++------- tutorials/ch-tutorial1.md | 26 +-- tutorials/ch-tutorial3.md | 165 ++++++++++-------- tutorials/ch-tutorial4.md | 348 +++++++++++++++++++++++++++++--------- 4 files changed, 416 insertions(+), 229 deletions(-) diff --git a/documentation.md b/documentation.md index 3a1afa9..6b75977 100644 --- a/documentation.md +++ b/documentation.md @@ -287,71 +287,27 @@ a set of children, starting, stopping and restarting them as necessary. ### Stopping Processes -Some processes, like the *outer* process in the previous example, will run until -they've completed and then return their value. This is just as we find with IO action, -and there is an instance of `MonadIO` for the `Process` monad, so you can `liftIO` if -you need to evaluate IO actions. - Because processes are implemented with `forkIO` we might be tempted to stop them by throwing an asynchronous exception to the process, but this is almost -certainly the wrong thing to do. Instead we might send a kind of poison pill, -which the process *ought* to handle by shutting down gracefully. Unfortunately -because of the asynchronous nature of sending, this is no good because `send` -will not fail under any circumstances. In fact, because `send` doesn't block, -we therefore have no way to know if the recipient existed at the time we sent the -poison pill. Even if the recipient did exist, we still have no guarantee that -the message we sent actually arrived - the network connection between the nodes -could have broken, for example. Making this *shutdown* protocol synchronous is -no good either - how long would we wait for a reply? Indefinitely? - -Exit signals come in two flavours - those that can -be caught and those that cannot. A call to -`exit :: (Serializable a) => ProcessId -> a -> Process ()` will dispatch an -exit signal to the specified process. These *signals* can be intercepted and -handled by the destination process however, so if you need to terminate the -process in a brutal way, you can use the `kill :: ProcessId -> String -> Process ()` -function, which sends an exit signal that cannot be handled. - ------- -#### __An important note about exit signals__ - -Exit signals in Cloud Haskell are unlike asynchronous exceptions in regular -haskell code. Whilst a process *can* use asynchronous exceptions - there's -nothing stoping this since the `Process` monad is an instance of `MonadIO` - -exceptions thrown are not bound by the same ordering guarantees as messages -delivered to a process. Link failures and exit signals *might* be implemented -using asynchronous exceptions - that is the case in the current -implementation - but these are implemented in such a fashion that if you -send a message and *then* an exit signal, the message is guaranteed to arrive -first. - -You should avoid throwing your own exceptions in code where possible. Instead, -you should terminate yourself, or another process, using the built-in primitives -`exit`, `kill` and `die`. - -{% highlight haskell %} -exit pid reason -- force `pid` to exit - reason can be any `Serializable` message -kill pid reason -- reason is a string - the *kill* signal cannot be caught -die reason -- as 'exit' but kills *us* -{% endhighlight %} - -The `exit` and `kill` primitives do essentially the same thing, but catching -the specific exception thrown by `kill` is impossible, making `kill` an -*untrappable exit signal*. Of course you could trap **all** exceptions, but -you already know that's a very bad idea right!? - -The `exit` primitive is a little different. This provides support for trapping -exit signals in a generic way, so long as your *exit handler* is able to -recognise the underlying type of the 'exit reason'. This (reason for exiting) -is stored as a raw `Message`, so if your handler takes the appropriate type -as an input (and therefore the `Message` can be decoded and passed to the -handler) then the handler will run. This is pretty much the same approach as -exception handling using `Typeable`, except that we decide whether or not the -exception can be handled based on the type of `reason` instead of the type of -the exception itself. - -Calling `die` will immediately raise an exit signal (i.e., `ProcessExitException`) -in the calling process. +certainly the wrong thing to do. Firstly, processes might reside on a remote +node, in which case throwing an exception is impossible. Secondly, if we send +some messages to a process' mailbox and then dispatch an exception to kill it, +there is no guarantee that the subject will receive our message before being +terminated by the asynchronous exception. + +To terminate a process unconditionally, we use the `kill` primitive, which +dispatches an asynchronous exception (killing the subject) safely, respecting +remote calls to processes on disparate nodes and observing message ordering +guarantees such that `send pid "hello" >> kill pid "goodbye"` behaves quite +unsurprisingly, delivering the message before the kill signal. + +Exit signals come in two flavours however - those that can be caught and those +that cannot. Whilst a call to `kill` results in an _un-trappable_ exception, +a call to `exit :: (Serializable a) => ProcessId -> a -> Process ()` will dispatch +an exit signal to the specified process that can be caught. These *signals* are +intercepted and handled by the destination process using `catchExit`, allowing +the receiver to match on the `Serializable` datum tucked away in the *exit signal* +and decide whether to oblige or not. ---- @@ -463,6 +419,12 @@ The API for `Async` is fairly rich, so reading the haddocks is suggested. #### Managed Processes +The main idea behind a `ManagedProcess` is to separate the functional +and non-functional aspects of an actor. By functional, we mean whatever +application specific task the actor performs, and by non-functional +we mean the *concurrency* or, more precisely, handling of the process' +mailbox and its interaction with other actors (i.e., clients). + Looking at *typed channels*, we noted that their insistence on a specific input domain was more *haskell-ish* than working with bare send and receive primitives. The `Async` sub-package also provides a type safe interface for receiving data, @@ -471,12 +433,12 @@ although it is limited to running a computation and waiting for its result. The [Control.Distributed.Processes.Platform.ManagedProcess][21] API provides a number of different abstractions that can be used to achieve similar benefits in your code. It works by introducing a standard protocol between your process -and the *world around*, which governs how to handle request/reply processing, -exit signals, timeouts, sleep/hibernation with `threadDelay` and even provides +and the *world outside*, which governs how to handle request/reply processing, +exit signals, timeouts, sleeping/hibernation with `threadDelay` and even provides hooks that terminating processes can use to clean up residual state. The [API documentation][21] is quite extensive, so here we will simply point -out the obvious differences. A implemented implemented with `ManagedProcess` +out the obvious differences. A process implemented with `ManagedProcess` can present a type safe API to its callers (and the server side code too!), although that's not its primary benefit. For a very simplified example: @@ -514,6 +476,18 @@ just provides callback functions which take some state and either return a new state and a reply, or just a new state. The process is *managed* in the sense that its mailbox is under someone else's control. +A NOTE ABOUT THE CALL API AND THAT IT WILL FAIL (WITH UNHANDLED MESSAGE) IF +THE CALLER IS EXPECTING A TYPE THAT DIFFERS FROM THE ONE THE SERVER PLANS +TO RETURN, SINCE THE RETURN TYPE IS ENCODED IN THE CALL-MESSAGE TYPE ITSELF. + +TODO: WRITE A TEST TO PROVE THE ABOVE + +TODO: ADD AN API BASED ON SESSION TYPES AS A KIND OF MANAGED PROCESS..... + +In a forthcoming tutorial, we'll look at the `Control.Distributed.Process.Platform.Task` +API, which looks a lot like `Async` but manages exit signals in a single thread and makes +configurable task pools and task supervision strategy part of its API. + More complex examples of the `ManagedProcess` API can be seen in the [Managed Processes tutorial][22]. API documentation for HEAD is available [here][21]. diff --git a/tutorials/ch-tutorial1.md b/tutorials/ch-tutorial1.md index 9d370a2..65a0c1f 100644 --- a/tutorials/ch-tutorial1.md +++ b/tutorials/ch-tutorial1.md @@ -1,7 +1,7 @@ --- layout: tutorial categories: tutorial -sections: ['Getting Started', 'Create a node', 'Sending messages', 'Spawning Remote Processes'] +sections: ['Getting Started', 'Installing from source', 'Creating a node', 'Sending messages', 'Spawning Remote Processes'] title: Getting Started --- @@ -27,7 +27,7 @@ run `make` to obtain the complete set of source repositories for building Cloud Haskell. The additional makefiles bundled with the umbrella assume that you have a recent version of cabal-dev installed. -### Create a node +### Creating a node Cloud Haskell's *lightweight processes* reside on a "node", which must be initialised with a network transport implementation and a remote table. @@ -78,16 +78,16 @@ will send one to ourselves! {% endhighlight %} Lightweight processes are implemented as `forkIO` threads. In general we will -try to forget about this implementation detail, but let us note that we +try to forget about this implementation detail, but let's note that we haven't deadlocked our own thread by sending to and receiving from its mailbox in this fashion. Sending messages is a completely asynchronous operation - even if the recipient doesn't exist, no error will be raised and evaluating `send` -will not block the caller, not even if the caller is sending messages to itself! +will not block the caller, even if the caller is sending messages to itself! -Receiving works quite the other way around, blocking the caller until a message +Receiving works the opposite way, blocking the caller until a message matching the expected type arrives in our (conceptual) mailbox. If multiple -messages of that type are in the queue, they will be returned in FIFO -order, otherwise the caller will be blocked until a message arrives that can be +messages of that type are present in the mailbox, they're be returned in FIFO +order, if not, the caller is blocked until a message arrives that can be decoded to the correct type. Let's spawn two processes on the same node and have them talk to each other. @@ -150,15 +150,14 @@ In the _echo server_ above, our first match prints out whatever string it receives. If first message in out mailbox is not a `String`, then our second match is evaluated. This, given a tuple `t :: (ProcessId, String)`, will send the `String` component back to the sender's `ProcessId`. If neither match -succeeds, the echo server process blocks until another message arrives and +succeeds, the echo server blocks until another message arrives and tries again. ### Serializable Data Processes may send any datum whose type implements the `Serializable` typeclass, -which is done indirectly by implementing `Binary` and deriving `Typeable`. -Implementations are already provided for off of Cloud Haskell's primitives -and the most commonly used data structures. +which is done indirectly by deriving `Binary` and `Typeable`. Implementations are +provided for most of Cloud Haskell's primitives and various common data types. ### Spawning Remote Processes @@ -212,6 +211,11 @@ main = do -- etc {% endhighlight %} +Note that we're not limited to sending `Closure`s - it is possible to send data +without having static values, and assuming the receiving code is able to decode +this data and operate on it, we can easily put together a simple AST that maps +to operations we wish to execute remotely. + ------ [1]: /static/doc/distributed-process/Control-Distributed-Process.html#v:Message diff --git a/tutorials/ch-tutorial3.md b/tutorials/ch-tutorial3.md index 3bb08ba..bd1a2ab 100644 --- a/tutorials/ch-tutorial3.md +++ b/tutorials/ch-tutorial3.md @@ -1,32 +1,10 @@ --- layout: tutorial categories: tutorial -sections: ['The Thing About Nodes', 'Message Ordering', 'Selective Receive', 'Advanced Mailbox Processing', 'Process Lifetime', 'Monitoring And Linking', 'Getting Process Info'] +sections: ['Message Ordering', 'Selective Receive', 'Advanced Mailbox Processing', 'Typed Channels', 'Process Lifetime', 'Monitoring And Linking', 'Getting Process Info'] title: Getting to know Processes --- -### The Thing About Nodes - -Before we can really get to know _processes_, we need to consider the role of -the _Node Controller_ in Cloud Haskell. In our formal [_semantics_][4], Cloud -Haskell hides the role of _Node Controller_ (explicitly defined in the original -"Unified Semantics for Future Erlang" paper on which our semantics are modelled). -Nonetheless, each Cloud Haskell _node_ is serviced and managed by a -conceptual _Node Controller_. - -Architecturally, Cloud Haskell's _Node Controller_ consists of a pair of message -buss processes, one of which listens for network-transport level events whilst the -other is busy processing _signal events_ (most of which pertain to either message -delivery or process lifecycle notification). Both these _event loops_ runs sequentially -in the system at all times. Messages are delivered via the _Node Controller's_ -_event loops_, which broadly correspond to the _system queue (or "ether")_ mentioned -in the [_semantics_][4]. The _system queue_ delivers messages to individual process -mailboxes in a completely transparent fashion, leaving us with the illusion that -processes exist in a unidimensional space. - -With all this in mind, let's consider Cloud Haskell's lightweight processes in a bit -more detail... - ### Message Ordering We have already met the `send` primitive, used to deliver messages from one @@ -43,7 +21,7 @@ nor does error handling need to be implemented each time a message is sent. Consider a stream of messages sent from one process to another. If the stream consists of messages `a, b, c` and we have seen `c`, then we know for certain that we will have already seen `a, b` (in that order), so long as the -messages were sent to us by the same process. +messages were sent to us by the same peer process. When two concurrent process exchange messages, Cloud Haskell guarantees that messages will be delivered in FIFO order, if at all. No such guarantee exists @@ -53,10 +31,13 @@ only hold for each pair of interactions, i.e., between _A_ and _C_ and/or _B_ and _C_ the ordering will be guaranteed, but not between _A_ and _B_ with regards messages sent to _C_. -Of course, we may not want to process received messages in the -precise order which they arrived. When this need arises, the platform -supplies a set of primitives of allow the caller to _selectively_ process -their mailbox. +Because the mailbox contains messages of varying types, when we `expect` +a message, we eschew the ordering because we're searching for a message +whose contents can be decoded to a specific type. Of course, we may _want_ +to process messages in the precise order which they arrived. To achieve +this, we must defer the type checking that would normally cause a traversal +of the mailbox and extract the _raw_ message ourselves. This can be achieved +using `recieve` and `matchAny`, as we will demonstrate later. ### Selective Receive @@ -67,8 +48,8 @@ is found. The [`expect`][1] primitive blocks until a message matching the expected type (of the expression) is found in the process' mailbox. If a match is found by -scanning the mailbox, it is dequeued and given to the caller, otherwise the -caller (i.e., the calling thread) is blocked until a message of the expected +scanning the mailbox, it is dequeued and returned, otherwise the caller +(i.e., the calling thread/process) is blocked until a message of the expected type is delivered to the mailbox. Let's take a look at this in action: {% highlight haskell %} @@ -132,8 +113,11 @@ times out. ### Advanced Mailbox Processing There are times when it is desirable to take a message from our mailbox without -explicitly specifying its type. For example, let's consider the `relay` primitive -that ships with distributed-process. This utility function starts a process that +explicitly specifying its type. Not only is this a useful capability, it is the +_only_ way to process messages in the precise order they were received. + +To see how this works in practise, let's consider the `relay` primitive that +ships with distributed-process. This utility function starts a process that simply dequeues _any_ messages it receives and forwards them to some other process. In order to dequeue messages regardless of their type, this code relies on the `matchAny` primitive, which has the following type: @@ -156,7 +140,7 @@ does the latter and looks like this: {% highlight haskell %} relay :: ProcessId -> Process () -relay !pid = receiveWait [ matchAny (\m -> forward m pid) ] >> relay pid +relay !pid = forever' $ receiveWait [ matchAny (\m -> forward m pid) ] {% endhighlight %} This is pretty useful, but since `matchAny` operates on the raw `Message` type, @@ -173,11 +157,11 @@ unwrapMessage :: forall m a. (Monad m, Serializable a) => Message -> m (Maybe a) handleMessage :: forall m a b. (Monad m, Serializable a) => Message -> (a -> m b) -> m (Maybe b) {% endhighlight %} -Both primitives are generalised to any `Monad m`, so we're not limited to operating in -the `Process` monad. Of the two, `unwrapMessage` is the simpler, taking a raw `Message` -and evaluating to `Maybe a` before returning that value in the monad `m`. If the type -of the raw `Message` does not match our expectation, the result will be `Nothing`, otherwise -`Just a`. The approach `handleMessage` takes is a bit more flexible, taking a function +Of the two, `unwrapMessage` is the simpler, taking a raw `Message` and evaluating to +`Maybe a` before returning that value in the monad `m`. If the type of the raw `Message` +does not match our expectation, the result will be `Nothing`, otherwise `Just a`. + +The approach `handleMessage` takes is a bit more flexible, taking a function from `a -> m b` and returning `Just b` if the underlying message is of type `a` (hence the operation can be executed and evaluate to `Maybe b`) or `Nothing` if the message's type is incompatible with the handler function. @@ -193,10 +177,10 @@ proxy :: Serializable a => ProcessId -> (a -> Process Bool) -> Process () {% endhighlight %} Since `matchAny` operates on `(Message -> Process b)` and `handleMessage` operates on -`a -> Process b` we can compose the two and make our proxy server quite simply. We must -not forward messages for which the predicate function evaluates to `Just False`, nor -can we sensibly forward messages which the predicate function is unable to evaluate due -to type incompatibility. This leaves us with the definition found in distributed-process: +`a -> Process b` we can compose these to make our proxy server. We must not forward +messages for which the predicate function evaluates to `Just False`, nor can we sensibly +forward messages which the predicate function is unable to evaluate due to type +incompatibility. This leaves us with the definition found in distributed-process: {% highlight haskell %} proxy pid proc = do @@ -206,7 +190,7 @@ proxy pid proc = do case next of Just True -> forward m pid Just False -> return () -- explicitly ignored - Nothing -> return ()) -- un-routable + Nothing -> return ()) -- un-routable / cannot decode ] proxy pid proc {% endhighlight %} @@ -216,11 +200,27 @@ distributed-process can be utilised to develop highly generic message processing All the richness of the distributed-process-platform APIs (such as `ManagedProcess`) which will be discussed in later tutorials are, in fact, built upon these families of primitives. +### Typed Channels + +While being able to send and receive any `Serializable` datum is very powerful, the burden +of decoding types correctly at runtime is levied on the programmer and there are runtime +overheads to be aware of (which will be covered in later tutorials). Fortunately, +distributed-provides provides a type safe alternative to `send` and `receive`, in the form +of _Typed Channels_. Represented by distinct ends, a `SendPort a` (which is `Serializable`) +and `ReceivePort a` (which is not), channels are a lightweight and useful abstraction that +provides a type safe interface for interacting with processes separately from their primary +mailbox. + +Channels are created with `newChan :: Process (SendPort a, ReceivePort a)`, with +messages sent via `sendChan :: SendPort a -> a -> Process ()`. The `ReceivePort` can be +passed directly to `receiveChan`, or used in a `receive{Wait, Timeout}` call via the +`matchChan` primitive, so as to combine mailbox scans with channel reads. + ### Process Lifetime A process will continue executing until it has evaluated to some value, or is abruptly terminated either by crashing (with an un-handled exception) or being instructed to -stop executing. Stop instructions to stop take one of two forms: a `ProcessExitException` +stop executing. Deliberate stop instructions take one of two forms: a `ProcessExitException` or `ProcessKillException`. As the names suggest, these _signals_ are delivered in the form of asynchronous exceptions, however you should not to rely on that fact! After all, we cannot throw an exception to a thread that is executing in some other operating @@ -233,26 +233,33 @@ that the destination process will have time to _do anything_ with the message be is terminated. The `ProcessExitException` signal is sent from one process to another, indicating that the -receiver is being asked to terminate. A process can choose to tell itself to exit, and since -this is a useful way for processes to terminate _abnormally_, distributed-processes provides -the [`die`][7] primitive to simplify doing so. In fact, [`die`][7] has slightly different -semantics from [`exit`][5], since the latter involves sending an internal signal to the -local node controller. A direct consequence of this is that the _exit signal_ may not -arrive immediately, since the _Node Controller_ could be busy processing other events. -On the other hand, the [`die`][7] primitive throws a `ProcessExitException` directly -in the calling thread, thus terminating it without delay. +receiver is being asked to terminate. A process can choose to tell itself to exit, and the +[`die`][7] primitive simplifies doing so without worrying about the expected type for the +action. In fact, [`die`][7] has slightly different semantics from [`exit`][5], since the +latter involves sending an internal signal to the local node controller. A direct consequence +of this is that the _exit signal_ may not arrive immediately, since the _Node Controller_ could +be busy processing other events. On the other hand, the [`die`][7] primitive throws a +`ProcessExitException` directly in the calling thread, thus terminating it without delay. +In practise, this means the following two functions could behave quite differently at +runtime: + +{% highlight haskell %} + +-- this will never print anything... +demo1 = die "Boom" >> expect >>= say + +-- this /might/ print something before it exits +demo2 = do + self <- getSelfPid + exit self "Boom" + expect >>= say +{% endhighlight %} The `ProcessExitException` type holds a _reason_ field, which is serialised as a raw `Message`. This exception type is exported, so it is possible to catch these _exit signals_ and decide how to respond to them. Catching _exit signals_ is done via a set of primitives in distributed-process, and the use of them forms a key component of the various fault tolerance -strategies provided by distributed-process-platform. For example, most of the utility -code found in distributed-process-platform relies on processes terminating with a -`ProcessKillException` or `ProcessExitException` where the _reason_ has the type -`ExitReason` - processes which fail with other exception types are routinely converted to -`ProcessExitException $ ExitOther reason {- reason :: String -}` automatically. This pattern -is most prominently found in supervisors and supervised _managed processes_, which will be -covered in subsequent tutorials. +strategies provided by distributed-process-platform. A `ProcessKillException` is intended to be an _untrappable_ exit signal, so its type is not exported and therefore you can __only__ handle it by catching all exceptions, which @@ -265,7 +272,7 @@ in so much as it is dispatched (to the target process) via the _Node Controller_ ### Monitoring and Linking Processes can be linked to other processes (or nodes or channels). A link, which is -unidirectional, guarantees that once any object we have linked to *dies*, we will also +unidirectional, guarantees that once any object we have linked to *exits*, we will also be terminated. A simple way to test this is to spawn a child process, link to it and then terminate it, noting that we will subsequently die ourselves. Here's a simple example, in which we link to a child process and then cause it to terminate (by sending it a message @@ -283,9 +290,10 @@ demo = do The medium that link failures uses to signal exit conditions is the same as exit and kill signals - asynchronous exceptions. Once again, it is a bad idea to rely on this (not least -because it might fail in some future release) and the exception type (`ProcessLinkException`) +because it might change in some future release) and the exception type (`ProcessLinkException`) is not exported so as to prevent developers from abusing exception handling code in this -special case. +special case. Since link exit signals cannot be caught directly, if you find yourself wanting +to _trap_ a link failure, you probably want to use a monitor instead. Whilst the built-in `link` primitive terminates the link-ee regardless of exit reason, distributed-process-platform provides an alternate function `linkOnFailure`, which only @@ -295,12 +303,10 @@ some `DiedReason` other than `DiedNormal`). Monitors on the other hand, do not cause the *listening* process to exit at all, instead putting a `ProcessMonitorNotification` into the process' mailbox. This signal and its constituent fields can be introspected in order to decide what action (if any) the receiver -can/should take in response to the monitored processes death. Let's take a look at how +can/should take in response to the monitored process' death. Let's take a look at how monitors can be used to determine both when and _how_ a process has terminated. Tucked -away in distributed-process-platform, the `linkOnFailure` primitive works just like our -built-in `link` except that it only terminates the process which evaluated it (the -_linker_), if the process it is linking with (the _linkee_) terminates abnormally. -Let's take a look... +away in distributed-process-platform, the `linkOnFailure` primitive works in exactly this +way, only terminating the caller if the subject terminates abnormally. Let's take a look... {% highlight haskell %} linkOnFailure them = do @@ -330,16 +336,29 @@ code resides outside the _Node Controller_. The two matches passed to `receiveWait` both handle a `ProcessMonitorNotification`, and the predicate passed to `matchIf` is used to determine whether the notification we're -receiving is for the _linker_ or the _linkee_. If the _linker_ dies, we've nothing more -to do, since links are unidirectional. If the _linkee_ dies however, we must examine -the `DiedReason` the `ProcessMonitorNotification` provides us with, to determine whether -the _linkee_ exited normally (i.e., with `DiedNormal`) or otherwise. In the latter case, -we throw a `ProcessLinkException` to the _linker_, which is exactly how an ordinary link -would behave. +receiving is for the process that called us, or the _linked to_ process. If the former +dies, we've nothing more to do, since links are unidirectional. If the latter dies +however, we must examine the `DiedReason` the `ProcessMonitorNotification` provides us +with, to determine whether the subject exited normally (i.e., with `DiedNormal`). +If the exit was _abnormal_, we throw a `ProcessLinkException` to the original caller, +which is exactly how an ordinary link would behave. Linking and monitoring are foundational tools for *supervising* processes, where a top level process manages a set of children, starting, stopping and restarting them as necessary. +Exit signals in Cloud Haskell then, are unlike asynchronous exceptions in other +haskell code. Whilst a process *can* use asynchronous exceptions - there's +nothing stoping this since the `Process` monad is an instance of `MonadIO` - +as we've seen, exceptions thrown are not bound by the same ordering guarantees +as messages delivered to a process. Link failures and exit signals *might* work +via asynchronous exceptions - that is the case in the current implementation - but +these are implemented in such a fashion that if you send a message and *then* an +exit signal, the message is guaranteed to arrive first. + +You should avoid throwing your own exceptions in code where possible. Instead, +you should terminate yourself, or another process, using the built-in primitives +`exit`, `kill` and `die`. + ### Getting Process Info The `getProcessInfo` function provides a means for us to obtain information about a running diff --git a/tutorials/ch-tutorial4.md b/tutorials/ch-tutorial4.md index 6bb486b..d9b1362 100644 --- a/tutorials/ch-tutorial4.md +++ b/tutorials/ch-tutorial4.md @@ -1,75 +1,269 @@ --- layout: tutorial -sections: ['Introduction', 'Implementing the Client', 'Implementing the Server', 'Making use of Async', 'Wiring up Handlers', 'Putting it all together', 'Performance Considerations'] +sections: ['Introduction', 'Managed Processes', 'A Basic Example', 'Building a Task Queue', 'Implementing the Client', 'Implementing the Server', 'Making use of Async', 'Wiring up Handlers', 'Putting it all together', 'Performance Considerations'] categories: tutorial title: Managed Process Tutorial --- ### Introduction -The source code on which this tutorial is (loosely) based is kept on github, -and can be accessed [here][1]. Please note that this tutorial is -based on the stable (master) branch of distributed-process-platform. +The source code for this tutorial is based on the `BlockingQueue` module +from distributed-process-platform and can be accessed [here][1]. +Please note that this tutorial is based on the stable (master) branch +of distributed-process-platform. ### Managed Processes -The main idea behind a `ManagedProcess` is to separate the functional -and non-functional aspects of an actor. By functional, we mean whatever -application specific task the actor performs, and by non-functional -we mean the *concurrency* or, more precisely, handling of the process' -mailbox and its interaction with other actors (i.e., clients). - -Another effect of the `ManagedProcess` API is to provide client code -with a typed (i.e., type specific) API for interacting with the process, -much as a `TypedChannel` does. We achieve this by writing and exporting -functions that operate on the types we want clients to see, and using -the API from `Control.Distributed.Process.Platform.ManagedProcess.Client` -to interact with the server. - -Let's imagine we want to execute tasks on an arbitrary node, using a -mechanism much as we would with the `call` API from distributed-process. -As with `call`, we want the caller to block whilst the remote task is -executing, but we also want to put an upper bound on the number of -concurrent tasks. We will use `ManagedProcess` to implement a generic -task server with the following characteristics +There are subtle bugs waiting in code that evaluated `send` and `receive` +directly. Forgetting to monitor the destination whilst waiting for a reply +and failing to match on the correct message types are the most common ones, +but others exist (such as badly formed `Binary` instances for user defined +data types). + +The /Managed Process/ API handles _all_ sending and receiving of messages, +error handling and decoding problems on your behalf, leaving you to focus +on writing code that describes _what the server process does_ when it receives +messages, rather than how it receives them. The API also provides a set of +pre-defined client interactions, all of which have well defined semantics +and failure modes. + +A managed process server definition is defined using record syntax, with +a list of `Dispatcher` types that describe how the server should handle +particular kinds of client interaction, for specific types. The fields +of the `ProcessDefinition` record also provide for error handling (in case +of either server code crashing _or_ exit signals dispatched to the server +process) and _cleanup_ code required to run on terminate/shutdown. + +{% highlight haskell %} +myServer :: ProcessDefinition MyStateType +myServer = + ProcessDefinition { + apiHandlers = [ + -- a list of Dispatcher, derived from calling + -- handleInfo or handleRaw with a suitable function, e.g., + handleCast myFunctionThatDoesNotReply + , handleCall myFunctionThatDoesReply + , handleRpcChan myFunctionThatRepliesViaTypedChannels + ] + , infoHandlers = [ + -- a list of DeferredDispatcher, derived from calling + -- handleInfo or handleRaw with a suitable function, e.g., + handleInfo myFunctionThatHandlesOneSpecificNonCastNonCallMessageType + , handleRaw myFunctionThatHandlesRawMessages + ] + , exitHandlers = [ + -- a list of ExitSignalDispatcher, derived from calling + -- handleExit with a suitable function, e.g., + handleExit myExitHandlingFunction + ] + -- what should I do just before stopping? + , terminateHandler = myTerminateFunction + -- what should I do about messages that cannot be handled? + , unhandledMessagePolicy = Drop -- Terminate | (DeadLetter ProcessId) + } + +{% endhighlight %} + +Client interactions with a managed process come in various flavours. It is +still possible to send an arbitrary message to a managed process, just as +you would a regular process. When defining a protocol between client and +server processes however, it is useful to define a specific set of types +that the server expects to receive from the client and possibly replies +that the server may send back. The `cast` and `call` mechanisms in the +/managed process/ API cater for this requirement specifically, allowing +the developer tighter control over the domain of input messages from +clients, whilst ensuring that client code handles errors (such as server +failures) consistently and those input messages are routed to a suitable +message handling function in the server process. + +--------- + +### A Basic Example + +Let's consider a simple _math server_ like the one in the main documentation +page. We could allow clients to send us `(ProcessId, Double, Double)` and +reply to the first tuple element with the sum of the second and third. But +what happens if our process is killed while the client is waiting for the +reply? (The client would deadlock). The client could always set up a monitor +and wait for the reply _or_ a monitor signal, and could even write that code +generically, but what if the code evaluating the client's utility function +`expect`s the wrong type? We could use a typed channel to alleviate that ill, +but that only helps with the client receiving messages, not the server. How +can we ensure that the server receives the correct type(s) as well? Creating +multiple typed channels (one for each kind of message we're expecting) and +then distributing those to all our clients seems like a kludge. + +The `call` and `cast` APIs help us to avoid precisely this conundrum by +providing a uniform API for both the client _and_ the server to observe. Whilst +there is nothing to stop clients from sending messages directly to a managed +process, it is simple enough to prevent this as well (just by hiding its +`ProcessId`, either behind a newtype or some other opaque structure). The +author of the server is then able to force clients through API calls that +can enforce the required types _and_ ensure that the correct client-server +protocol is used. Here's a better example of that math server that does +just so: + +---- + +{% highlight haskell %} +module MathServer + ( -- client facing API + add + -- starting/spawning the server process + , launchMathServer + ) where + +import .... -- elided + +-- We keep this data-type hidden from the outside world, and we ignore +-- messages sent to us that we do not recognise, so misbehaving clients +-- (who do not use our API) are basically ignored. +data Add = Add Double Double + deriving (Typeable, Generic) +instance Binary Add where + +-- client facing API + +-- This is the only way clients can get a message through to us that +-- we will respond to, and since we control the type(s), there is no +-- risk of decoding errors on the server. The /call/ API ensures that +-- if the server does fail for some other reason however (such as being +-- killed by another process), the client will get an exit signal also. +-- +add :: ProcessId -> Double -> Double -> Process Double +add sid = call sid . Add + +-- server side code + +launchMathServer :: Process ProcessId +launchMathServer = + let server = statelessProcess { + apiHandlers = [ handleCall_ (\(Add x y) -> return (x + y)) ] + , unhandledMessagePolicy = Drop + } + in spawnLocal $ start () (statelessInit Infinity) server >> return () +{% endhighlight %} + + +This style of programming will already be familiar if you've used some +combination of `send` in your clients and the `receive [ match ... ]` +family of functions to write your servers. The primary difference here, +is that the choice of when to return to (potentially blocking on) the +server's mailbox is taken out of the programmer's hands, leaving the +implementor to worry only about the logic to be applied once a message +of one type or another is received. + +---- + +Of course, it would still be possible to write the server and client code +and encounter a type resolution failure, since `call` still takes an +arbitrary `Serializable` datum just like `send`. We can solve that for +the return type of the _remote_ call by sending a typed channel and +replying explicitly to it in our server side code. Whilst this doesn't +make the server code any prettier (since it has to reply to the channel +explicitly, rather than just evaluating to a result), it does the +likelihood of runtime errors somewhat. + +{% highlight haskell %} +-- This is the only way clients can get a message through to us that +-- we will respond to, and since we control the type(s), there is no +-- risk of decoding errors on the server. The /call/ API ensures that +-- if the server does fail for some other reason however (such as being +-- killed by another process), the client will get an exit signal also. +-- +add :: ProcessId -> Double -> Double -> Process Double +add sid = syncCallChan sid . Add + +launchMathServer :: Process ProcessId +launchMathServer = + let server = statelessProcess { + apiHandlers = [ handleRpcChan_ (\chan (Add x y) -> sendChan chan (x + y)) ] + , unhandledMessagePolicy = Drop + } + in spawnLocal $ start () (statelessInit Infinity) server >> return () +{% endhighlight %} + +Ensuring that only valid types are sent to the server is relatively simple, +given that we do not expose the client directly to `call` and write our own +wrapper functions. An additional level of isolation and safety is available +when using /control channels/, which will be covered in a subsequent tutorial. + +Before we leave the math server behind, let's take a brief look at the `cast` +side of the client-server protocol. Unlike its synchronous cousin, `cast` does +not expect a reply at all - it is a fire and forget call, much like `send`, +but carries the same additional type information that a `call` does (about its +inputs) and is also routed to a `Dispatcher` in the `apiHandlers` field of the +process definition. + +We will use cast with the existing `Add` type, to implement a function that +takes an /add request/ and prints the result instead of returning it. If we +were implementing this with `call` we would be a bit stuck, because there is +nothing to differentiate between two `Add` instances and the server would +choose the first valid (i.e., type safe) handler and ignore the others. + +Note that because the client doesn't wait for a reply, if you execute this +function in a test/demo application, you'll need to block the main thread +for a while to wait for the server to receive the message and print out +the result. + +{% highlight haskell %} + +printSum :: ProcessId -> Double -> Double -> Process () +printSum sid = cast sid . Add + +launchMathServer :: Process ProcessId +launchMathServer = + let server = statelessProcess { + apiHandlers = [ handleRpcChan_ (\chan (Add x y) -> sendChan chan (x + y)) + , handleCast_ (\(Add x y) -> liftIO $ putStrLn $ show (x + y) >> continue_) ] + , unhandledMessagePolicy = Drop + } + in spawnLocal $ start () (statelessInit Infinity) server >> return () +{% endhighlight %} + + +Of course this is a toy example - why defer simple computations like addition +and/or printing results to a separate process? Next, we'll build something a bit +more interesting and useful. + +### Building a Task Queue + +This section of the tutorial is based on a real module from the +distributed-process-platform library, called `BlockingQueue`. + +Let's imagine we want to execute tasks on an arbitrary node, but want +the caller to block whilst the remote task is executing. We also want +to put an upper bound on the number of concurrent tasks/callers that +the server will accept. Let's use `ManagedProcess` to implement a generic +task server like this, with the following characteristics * requests to enqueue a task are handled immediately * callers however, are blocked until the task completes (or fails) * an upper bound is placed on the number of concurrent running tasks -Once the upper bound is reached, tasks will be queued up for later -execution. Only when we drop below this limit will tasks be taken -from the backlog and executed. - -`ManagedProcess` provides a basic protocol for *server-like* processes -such as this, based on the synchronous `call` and asynchronous `cast` -functions. We use these to determine client behaviour, and matching -*handler* functions are set up in the process itself, to process the -requests and (if required) replies. This style of programming will -already be familiar if you've used some combination of `send` in your -clients and the `receive [ match ... ]` family of functions to write -your servers. The primary difference here, is that the choice of when -to return to (potentially blocking on) the server's mailbox is taken -out of the programmer's hands, leaving the implementor to worry only -about the logic to be applied once a message of one type or another -is received. +Once the upper bound is reached, tasks will be queued up for execution. +Only when we drop below this limit will tasks be taken from the backlog +and executed. -### Implementing the client +Since we want the server to proceed with its work whilst the client is +blocked, the asynchronous `cast` API may sound like the ideal approach, +or we might use the asynchronous cousin of our typed-channel +handling API `callChan`. The `call` API however, offers exactly the +tools we need to keep the client blocked (waiting for a reply) whilst +the server is allowed to proceed with its work. -Before we figure out the shape of our state, let's think about the types -we'll need to consume in the server process: the tasks we perform and the -maximum pool size. +### Implementing the client -{% highlight haskell %} -type PoolSize = Int -type SimpleTask a = Closure (Process a) -{% endhighlight %} +We'll start by thinking about the types we need to consume in the server +and client processes: the tasks we're being asked to perform. To submit a task, our clients will submit an action in the process monad, wrapped in a `Closure` environment. We will use the `Addressable` typeclass to allow clients to specify the server's location in whatever -manner suits them: - +manner suits them: The type of a task will be `Closure (Process a)` and +the server will explicitly return an /either/ value with `Left String` +for errors and `Right a` for successful results. + {% highlight haskell %} -- enqueues the task in the pool and blocks -- the caller until the task is complete @@ -231,7 +425,7 @@ The steps then, are 4. bump another task from the backlog (if there is one) 5. carry on -This chain then, looks like `wait h >>= respond c >> bump s t >>= continue`. +This chain then, looks like `wait >>= respond >> bump-next-task >>= continue`. Item (3) requires special API support from `ManagedProcess`, because we're not just sending *any* message back to the caller. We're replying to a specific `call` @@ -284,10 +478,10 @@ simpler to work with. The `ProcessDefinition` takes a number of different kinds of handler. The only ones _we_ care about are the call handler for submissions, and the handler that -deals with monitor signals. +deals with monitor signals. TODO: THIS DOES NOT READ WELL -Call and cast handlers live in the `apiHandlers` list of a `ProcessDefinition` and -must have the type `Dispatcher s` where `s` is the state type for the process. We +Call and cast handlers live in the `apiHandlers` list of our `ProcessDefinition` +and have the type `Dispatcher s` where `s` is the state type for the process. We cannot construct a `Dispatcher` ourselves, but a range of functions in the `ManagedProcess.Server` module exist to lift functions like the ones we've just defined, to the correct type. The particular function we need is `handleCallFrom`, @@ -301,11 +495,11 @@ data ProcessReply s a = | NoReply (ProcessAction s) {% endhighlight %} -There are also various utility functions in the API to construct a `ProcessAction` -and we make use of `noReply_` here, which constructs `NoReply` for us and -presets the `ProcessAction` to `ProcessContinue`, which goes back to receiving -messages from clients. We already have a function over our input domain, which -evaluates to a new state, so we end up with: +Again, various utility functions are defined by the API for constructing a +`ProcessAction` and we make use of `noReply_` here, which constructs `NoReply` +for us and presets the `ProcessAction` to `continue`, which goes back to +receiving messages from clients. We already have a function over our input domain, +which evaluates to a new state, so we end up with: {% highlight haskell %} storeTask :: Serializable a @@ -324,7 +518,7 @@ handleCallFrom (\s f (p :: Closure (Process a)) -> storeTask s f p) {% endhighlight %} No such thing is required for `taskComplete`, as there's no ambiguity about its -type. Our process definition is finished, and here it is: +type. Our process definition is now finished, and here it is: {% highlight haskell %} poolServer :: forall a . (Serializable a) => ProcessDefinition (Pool a) @@ -339,9 +533,10 @@ poolServer = } :: ProcessDefinition (Pool a) {% endhighlight %} -Starting the pool is fairly simple and `ManagedProcess` has some utilities to help. +Starting the pool is simple: `ManagedProcess` provides several utility functions +to help with spawning and running processes. The `start` function takes an _initialising_ thunk, which must generate the initial -state and per-call timeout setting, and the process definition which we've already +state and per-call timeout settings, then the process definition which we've already encountered. {% highlight haskell %} @@ -351,7 +546,10 @@ simplePool :: forall a . (Serializable a) -> Process (Either (InitResult (Pool a)) TerminateReason) simplePool sz server = start sz init' server where init' :: PoolSize -> Process (InitResult (Pool a)) - init' sz' = return $ InitOk (Pool sz' [] []) Infinity + init' sz' = return $ InitOk (emptyPool sz') Infinity + + emptyPool :: Int -> Pool a + emptyPool s = Pool s [] [] {% endhighlight %} ### Putting it all together @@ -361,8 +559,8 @@ or `spawnLocal` with `simplePool`. The second argument should add specificity to the type of results the process definition operates on, e.g., {% highlight haskell %} -let s' = poolServer :: ProcessDefinition (Pool String) -in simplePool s s' +let svr' = poolServer :: ProcessDefinition (Pool String) +in simplePool s svr' {% endhighlight %} Defining tasks is as simple as making them remote-worthy: @@ -375,7 +573,7 @@ $(remotable ['sampleTask]) {% endhighlight %} And executing them is just as simple too. Given a pool which has been registered -locally as "mypool", we can simply call it directly: +locally as "mypool": {% highlight haskell %} tsk <- return $ ($(mkClosure 'sampleTask) (seconds 2, "foobar")) @@ -398,17 +596,9 @@ execution ordering. Perhaps more of a concern is the cost of using `Async` everywhere - remember we used this in the *server* to handle concurrently executing tasks and obtaining -their results. The `Async` module is also used by `ManagedProcess` to handle the -`call` mechanism, and there *are* some overheads to using it. An invocation of -`async` will create two new processes: one to perform the calculation and another -to monitor the first and handle failure and/or cancellation. Spawning processes is -cheap, but not free as each process is a haskell thread, plus some additional book -keeping data. - -The cost of spawning two processes for each computation/task might represent just that -bit too much overhead for some applications. In a forthcoming tutorial, we'll look at the -`Control.Distributed.Process.Platform.Task` API, which looks a lot like `Async` but -manages exit signals in a single thread and makes configurable task pools and task -supervision strategy part of its API. - -[1]: https://github.com/haskell-distributed/distributed-process-platform/blob/master/tests/SimplePool.hs +their results. An invocation of `async` will create two new processes: one to perform +the calculation and another to monitor the first and handle failures and/or cancellation. +Spawning processes is cheap, but not free as each process is a haskell thread, plus +some additional book keeping data. + +[1]: https://github.com/haskell-distributed/distributed-process-platform/blob/master/src/Control/Distributed/Process/Platform/Task/Queue/BlockingQueue.hs From 6b847526984aad63ad22dc662394fc665f297878 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Wed, 5 Feb 2014 21:25:16 +0000 Subject: [PATCH 005/108] Update documentation.md --- documentation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation.md b/documentation.md index 949430f..986417b 100644 --- a/documentation.md +++ b/documentation.md @@ -3,9 +3,9 @@ layout: documentation title: Documentation --- -### Cloud Haskell Platform +### Cloud Haskell -This is the [*Cloud Haskell Platform*][1]. Cloud Haskell is a set of libraries +This is [*Cloud Haskell*][1]. Cloud Haskell is a set of libraries that bring Erlang-style concurrency and distribution to Haskell programs. This project is an implementation of that distributed computing interface, where processes communicate with one another through explicit message passing rather From ef0b0716fd3e61db9f24082930c21ad8264ea003 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Thu, 6 Feb 2014 10:54:15 +0000 Subject: [PATCH 006/108] Merge haskell-distributed.gitub.com/development --- Gemfile | 2 + Makefile | 2 +- _config.yml | 4 +- _layouts/{tutorial1.html => tutorial.html} | 8 +- _layouts/tutorial3.html | 42 -- documentation.md | 138 ++--- tutorials/ch-tutorial1.md | 225 +++++++ tutorials/ch-tutorial2.md | 111 ++++ tutorials/ch-tutorial3.md | 375 ++++++++++++ tutorials/ch-tutorial4.md | 624 ++++++++++++++++++++ tutorials/ch-tutorial5.md | 15 + tutorials/{tutorial2.md => tutorial-NT2.md} | 0 tutorials/tutorial1.md | 205 ------- tutorials/tutorial3.md | 389 ------------ 14 files changed, 1415 insertions(+), 725 deletions(-) create mode 100644 Gemfile rename _layouts/{tutorial1.html => tutorial.html} (75%) delete mode 100644 _layouts/tutorial3.html create mode 100644 tutorials/ch-tutorial1.md create mode 100644 tutorials/ch-tutorial2.md create mode 100644 tutorials/ch-tutorial3.md create mode 100644 tutorials/ch-tutorial4.md create mode 100644 tutorials/ch-tutorial5.md rename tutorials/{tutorial2.md => tutorial-NT2.md} (100%) delete mode 100644 tutorials/tutorial1.md delete mode 100644 tutorials/tutorial3.md diff --git a/Gemfile b/Gemfile new file mode 100644 index 0000000..053c27d --- /dev/null +++ b/Gemfile @@ -0,0 +1,2 @@ +source 'https://rubygems.org' +gem 'github-pages' diff --git a/Makefile b/Makefile index 128d222..97e0cd3 100644 --- a/Makefile +++ b/Makefile @@ -21,4 +21,4 @@ endif .PHONY: serve serve: - jekyll --pygments --no-lsi --safe --server + jekyll serve -w diff --git a/_config.yml b/_config.yml index ca44606..d4f89de 100644 --- a/_config.yml +++ b/_config.yml @@ -1,6 +1,6 @@ exclude: [".rvmrc", ".rbenv-version", "README.md", "Rakefile", "changelog.md"] -auto: true lsi: false +auto: true pygments: true safe: true @@ -13,4 +13,4 @@ author: github: hyperthunk twitter: hyperthunk -production_url: http://haskell-distributed.github.com/distributed-process-platform +production_url: http://hyperthunk.github.com/website-next-preview diff --git a/_layouts/tutorial1.html b/_layouts/tutorial.html similarity index 75% rename from _layouts/tutorial1.html rename to _layouts/tutorial.html index a72f85a..b44874c 100644 --- a/_layouts/tutorial1.html +++ b/_layouts/tutorial.html @@ -21,9 +21,11 @@ diff --git a/_layouts/tutorial3.html b/_layouts/tutorial3.html deleted file mode 100644 index 9786a38..0000000 --- a/_layouts/tutorial3.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - {% include head.html %} - - - - - {% include nav.html %} -
- -
- - {% include footer.html %} - {% include js.html %} - - diff --git a/documentation.md b/documentation.md index 986417b..6b75977 100644 --- a/documentation.md +++ b/documentation.md @@ -3,9 +3,9 @@ layout: documentation title: Documentation --- -### Cloud Haskell +### Cloud Haskell Platform -This is [*Cloud Haskell*][1]. Cloud Haskell is a set of libraries +This is the [*Cloud Haskell Platform*][1]. Cloud Haskell is a set of libraries that bring Erlang-style concurrency and distribution to Haskell programs. This project is an implementation of that distributed computing interface, where processes communicate with one another through explicit message passing rather @@ -238,14 +238,14 @@ types such as `TMVar` just as normal Haskell threads would. ### Typed Channels Channels provides an alternative to message transmission with `send` and `expect`. -While `send` and `expect` allow transmission of messages of any `Serializable` +While `send` and `expect` allow us to transmit messages of any `Serializable` type, channels require a uniform type. Channels work like a distributed equivalent of Haskell's `Control.Concurrent.Chan`, however they have distinct ends: a single receiving port and a corollary send port. Channels provide a nice alternative to *bare send and receive*, which is a bit -*unHaskellish*, because the processes message queue has messages of multiple -types, and we have to do dynamic type checking. +*un-Haskell-ish*, since our process' message queue can contain messages of multiple +types, forcing us to undertake dynamic type checking at runtime. We create channels with a call to `newChan`, and send/receive on them using the `{send,receive}Chan` primitives: @@ -264,19 +264,17 @@ channelsDemo = do {% endhighlight %} Channels are particularly useful when you are sending a message that needs a -response, because the code that receives the response knows exactly where it -came from - i.e., it knows that it came from the `SendPort` connected to -the `ReceivePort` on which it just received a response. +response, because we know exactly where to look for the reply. -Channels can sometimes allows message types to be simplified, as passing a -`ProcessId` to reply to isn't required. Channels are not so useful when you -need to spawn a process and then send a bunch a messages to it and wait for -replies, because we can’t send the `ReceivePort`. +Channels can also allow message types to be simplified, as passing a +`ProcessId` for the reply isn't required. Channels aren't so useful when we +need to spawn a process and send a bunch a messages to it, then wait for +replies however; we can’t send a `ReceivePort` since it is not `Serializable`. -ReceivePorts can be merged, so you can listen on several simultaneously. In the -latest version of [distributed-process][2], you can listen for *regular* messages -and on multiple channels at the same time, using `matchChan` in the list of -allowed matches passed `receive`. +`ReceivePort`s can be merged, so we can listen on several simultaneously. In the +latest version of [distributed-process][2], we can listen for *regular* messages +and multiple channels at the same time, using `matchChan` in the list of +allowed matches passed `receiveWait` and `receiveTimeout`. ### Linking and monitoring @@ -289,71 +287,27 @@ a set of children, starting, stopping and restarting them as necessary. ### Stopping Processes -Some processes, like the *outer* process in the previous example, will run until -they've completed and then return their value. This is just as we find with IO action, -and there is an instance of `MonadIO` for the `Process` monad, so you can `liftIO` if -you need to evaluate IO actions. - Because processes are implemented with `forkIO` we might be tempted to stop them by throwing an asynchronous exception to the process, but this is almost -certainly the wrong thing to do. Instead we might send a kind of poison pill, -which the process *ought* to handle by shutting down gracefully. Unfortunately -because of the asynchronous nature of sending, this is no good because `send` -will not fail under any circumstances. In fact, because `send` doesn't block, -we therefore have no way to know if the recipient existed at the time we sent the -poison pill. Even if the recipient did exist, we still have no guarantee that -the message we sent actually arrived - the network connection between the nodes -could have broken, for example. Making this *shutdown* protocol synchronous is -no good either - how long would we wait for a reply? Indefinitely? - -Exit signals come in two flavours - those that can -be caught and those that cannot. A call to -`exit :: (Serializable a) => ProcessId -> a -> Process ()` will dispatch an -exit signal to the specified process. These *signals* can be intercepted and -handled by the destination process however, so if you need to terminate the -process in a brutal way, you can use the `kill :: ProcessId -> String -> Process ()` -function, which sends an exit signal that cannot be handled. - ------- -#### __An important note about exit signals__ - -Exit signals in Cloud Haskell are unlike asynchronous exceptions in regular -haskell code. Whilst processes *can* use asynchronous exceptions - there's -nothing stoping this since the `Process` monad is an instance of `MonadIO` - -exceptions thrown are not bound by the same ordering guarantees as messages -delivered to a process. Link failures and exit signals *might* be implemented -using asynchronous exceptions - that is the case in the current -implementation - but these are implemented in such a fashion that if you -send a message and *then* an exit signal, the message is guaranteed to arrive -first. - -You should avoid throwing your own exceptions in code where possible. Instead, -you should terminate yourself, or another process, using the built-in primitives -`exit`, `kill` and `die`. - -{% highlight haskell %} -exit pid reason -- force `pid` to exit - reason can be any `Serializable` message -kill pid reason -- reason is a string - the *kill* signal cannot be caught -die reason -- as 'exit' but kills *us* -{% endhighlight %} - -The `exit` and `kill` primitives do essentially the same thing, but catching -the specific exception thrown by `kill` is impossible, making `kill` an -*untrappable exit signal*. Of course you could trap **all** exceptions, but -you already know that's a very bad idea right!? - -The `exit` primitive is a little different. This provides support for trapping -exit signals in a generic way, so long as your *exit handler* is able to -recognise the underlying type of the 'exit reason'. This (reason for exiting) -is stored as a raw `Message`, so if your handler takes the appropriate type -as an input (and therefore the `Message` can be decoded and passed to the -handler) then the handler will run. This is pretty much the same approach as -exception handling using `Typeable`, except that we decide whether or not the -exception can be handled based on the type of `reason` instead of the type of -the exception itself. - -Calling `die` will immediately raise an exit signal (i.e., `ProcessExitException`) -in the calling process. +certainly the wrong thing to do. Firstly, processes might reside on a remote +node, in which case throwing an exception is impossible. Secondly, if we send +some messages to a process' mailbox and then dispatch an exception to kill it, +there is no guarantee that the subject will receive our message before being +terminated by the asynchronous exception. + +To terminate a process unconditionally, we use the `kill` primitive, which +dispatches an asynchronous exception (killing the subject) safely, respecting +remote calls to processes on disparate nodes and observing message ordering +guarantees such that `send pid "hello" >> kill pid "goodbye"` behaves quite +unsurprisingly, delivering the message before the kill signal. + +Exit signals come in two flavours however - those that can be caught and those +that cannot. Whilst a call to `kill` results in an _un-trappable_ exception, +a call to `exit :: (Serializable a) => ProcessId -> a -> Process ()` will dispatch +an exit signal to the specified process that can be caught. These *signals* are +intercepted and handled by the destination process using `catchExit`, allowing +the receiver to match on the `Serializable` datum tucked away in the *exit signal* +and decide whether to oblige or not. ---- @@ -373,7 +327,7 @@ The [distributed-process-platform][18] library implements parts of the in the original paper and implemented by the [remote][14] package. In particular, we diverge from the original design and defer to many of the principles defined by Erlang's [Open Telecom Platform][13], taking in some well established -Haskell concurrency design patterns alongside. +Haskell concurrency design patterns along the way. In fact, [distributed-process-platform][18] does not really consider the *task layer* in great detail. We provide an API comparable to remote's @@ -465,6 +419,12 @@ The API for `Async` is fairly rich, so reading the haddocks is suggested. #### Managed Processes +The main idea behind a `ManagedProcess` is to separate the functional +and non-functional aspects of an actor. By functional, we mean whatever +application specific task the actor performs, and by non-functional +we mean the *concurrency* or, more precisely, handling of the process' +mailbox and its interaction with other actors (i.e., clients). + Looking at *typed channels*, we noted that their insistence on a specific input domain was more *haskell-ish* than working with bare send and receive primitives. The `Async` sub-package also provides a type safe interface for receiving data, @@ -473,12 +433,12 @@ although it is limited to running a computation and waiting for its result. The [Control.Distributed.Processes.Platform.ManagedProcess][21] API provides a number of different abstractions that can be used to achieve similar benefits in your code. It works by introducing a standard protocol between your process -and the *world around*, which governs how to handle request/reply processing, -exit signals, timeouts, sleep/hibernation with `threadDelay` and even provides +and the *world outside*, which governs how to handle request/reply processing, +exit signals, timeouts, sleeping/hibernation with `threadDelay` and even provides hooks that terminating processes can use to clean up residual state. The [API documentation][21] is quite extensive, so here we will simply point -out the obvious differences. A implemented implemented with `ManagedProcess` +out the obvious differences. A process implemented with `ManagedProcess` can present a type safe API to its callers (and the server side code too!), although that's not its primary benefit. For a very simplified example: @@ -516,6 +476,18 @@ just provides callback functions which take some state and either return a new state and a reply, or just a new state. The process is *managed* in the sense that its mailbox is under someone else's control. +A NOTE ABOUT THE CALL API AND THAT IT WILL FAIL (WITH UNHANDLED MESSAGE) IF +THE CALLER IS EXPECTING A TYPE THAT DIFFERS FROM THE ONE THE SERVER PLANS +TO RETURN, SINCE THE RETURN TYPE IS ENCODED IN THE CALL-MESSAGE TYPE ITSELF. + +TODO: WRITE A TEST TO PROVE THE ABOVE + +TODO: ADD AN API BASED ON SESSION TYPES AS A KIND OF MANAGED PROCESS..... + +In a forthcoming tutorial, we'll look at the `Control.Distributed.Process.Platform.Task` +API, which looks a lot like `Async` but manages exit signals in a single thread and makes +configurable task pools and task supervision strategy part of its API. + More complex examples of the `ManagedProcess` API can be seen in the [Managed Processes tutorial][22]. API documentation for HEAD is available [here][21]. diff --git a/tutorials/ch-tutorial1.md b/tutorials/ch-tutorial1.md new file mode 100644 index 0000000..65a0c1f --- /dev/null +++ b/tutorials/ch-tutorial1.md @@ -0,0 +1,225 @@ +--- +layout: tutorial +categories: tutorial +sections: ['Getting Started', 'Installing from source', 'Creating a node', 'Sending messages', 'Spawning Remote Processes'] +title: Getting Started +--- + +### Getting Started + +----- + +In order to go through this tutorial, you will need a Haskell development +environment and we recommend installing the latest version of the +[Haskell Platform](http://www.haskell.org/platform/) if you've not done +so already. + +Once you're up and running, you'll want to get hold of the distributed-process +library and a choice of network transport backend. This guide will use +the network-transport-tcp backend, but other backends may be available +on github. + +### Installing from source + +If you're installing from source, the simplest method is to checkout the +[Umbrella Project](https://github.com/haskell-distributed/cloud-haskell) and +run `make` to obtain the complete set of source repositories for building +Cloud Haskell. The additional makefiles bundled with the umbrella assume +that you have a recent version of cabal-dev installed. + +### Creating a node + +Cloud Haskell's *lightweight processes* reside on a "node", which must +be initialised with a network transport implementation and a remote table. +The latter is required so that physically separate nodes can identify known +objects in the system (such as types and functions) when receiving messages +from other nodes. We will look at inter-node communication later, for now +it will suffice to pass the default remote table, which defines the built-in +types that Cloud Haskell needs at a minimum in order to run. + +We start with our imports: + +{% highlight haskell %} +import Network.Transport.TCP (createTransport, defaultTCPParameters) +import Control.Distributed.Process +import Control.Distributed.Process.Node +{% endhighlight %} + +Our TCP network transport backend needs an IP address and port to get started +with: + +{% highlight haskell %} +main :: IO () +main = do + Right t <- createTransport "127.0.0.1" "10501" defaultTCPParameters + node <- newLocalNode t initRemoteTable + .... +{% endhighlight %} + +And now we have a running node. + +### Sending messages + +We start a new process by evaluating `forkProcess`, which takes a node, +a `Process` action - because our concurrent code will run in the `Process` +monad - and returns an address for the process in the form of a `ProcessId`. +The process id can be used to send messages to the running process - here we +will send one to ourselves! + +{% highlight haskell %} +-- in main + _ <- forkProcess node $ do + -- get our own process id + self <- getSelfPid + send self "hello" + hello <- expect :: Process String + liftIO $ putStrLn hello + return () +{% endhighlight %} + +Lightweight processes are implemented as `forkIO` threads. In general we will +try to forget about this implementation detail, but let's note that we +haven't deadlocked our own thread by sending to and receiving from its mailbox +in this fashion. Sending messages is a completely asynchronous operation - even +if the recipient doesn't exist, no error will be raised and evaluating `send` +will not block the caller, even if the caller is sending messages to itself! + +Receiving works the opposite way, blocking the caller until a message +matching the expected type arrives in our (conceptual) mailbox. If multiple +messages of that type are present in the mailbox, they're be returned in FIFO +order, if not, the caller is blocked until a message arrives that can be +decoded to the correct type. + +Let's spawn two processes on the same node and have them talk to each other. + +{% highlight haskell %} +import Control.Concurrent (threadDelay) +import Control.Monad (forever) +import Control.Distributed.Process +import Control.Distributed.Process.Node +import Network.Transport.TCP (createTransport, defaultTCPParameters) + +replyBack :: (ProcessId, String) -> Process () +replyBack (sender, msg) = send sender msg + +logMessage :: String -> Process () +logMessage msg = say $ "handling " ++ msg + +main :: IO () +main = do + Right t <- createTransport "127.0.0.1" "10501" defaultTCPParameters + node <- newLocalNode t initRemoteTable + forkProcess node $ do + -- Spawn another worker on the local node + echoPid <- spawnLocal $ forever $ do + -- Test our matches in order against each message in the queue + receiveWait [match logMessage, match replyBack] + + -- The `say` function sends a message to a process registered as "logger". + -- By default, this process simply loops through its mailbox and sends + -- any received log message strings it finds to stderr. + + say "send some messages!" + send echoPid "hello" + self <- getSelfPid + send echoPid (self, "hello") + + -- `expectTimeout` waits for a message or times out after "delay" + m <- expectTimeout 1000000 + case m of + -- Die immediately - throws a ProcessExitException with the given reason. + Nothing -> die "nothing came back!" + (Just s) -> say $ "got " ++ s ++ " back!" + return () + + -- A 1 second wait. Otherwise the main thread can terminate before + -- our messages reach the logging process or get flushed to stdio + liftIO $ threadDelay (1*1000000) + return () +{% endhighlight %} + +Note that we've used the `receive` class of functions this time around. +These can be used with the [`Match`][5] data type to provide a range of +advanced message processing capabilities. The `match` primitive allows you +to construct a "potential message handler" and have it evaluated +against received (or incoming) messages. As with `expect`, if the mailbox does +not contain a message that can be matched, the evaluating process will be +blocked until a message arrives which _can_ be matched. + +In the _echo server_ above, our first match prints out whatever string it +receives. If first message in out mailbox is not a `String`, then our second +match is evaluated. This, given a tuple `t :: (ProcessId, String)`, will send +the `String` component back to the sender's `ProcessId`. If neither match +succeeds, the echo server blocks until another message arrives and +tries again. + +### Serializable Data + +Processes may send any datum whose type implements the `Serializable` typeclass, +which is done indirectly by deriving `Binary` and `Typeable`. Implementations are +provided for most of Cloud Haskell's primitives and various common data types. + +### Spawning Remote Processes + +In order to spawn processes on a remote node without additional compiler +infrastructure, we make use of "static values": values that are known at +compile time. Closures in functional programming arise when we partially +apply a function. In Cloud Haskell, a closure is a code pointer, together +with requisite runtime data structures representing the value of any free +variables of the function. A remote spawn therefore, takes a closure around +an action running in the `Process` monad: `Closure (Process ())`. + +In distributed-process if `f : T1 -> T2` then + +{% highlight haskell %} + $(mkClosure 'f) :: T1 -> Closure T2 +{% endhighlight %} + +That is, the first argument to the function we pass to mkClosure will act +as the closure environment for that process. If you want multiple values +in the closure environment, you must "tuple them up". + +We need to configure our remote table (see the documentation for more details) +and the easiest way to do this, is to let the library generate the relevant +code for us. For example (taken from the distributed-process-platform test suites): + +{% highlight haskell %} +sampleTask :: (TimeInterval, String) -> Process String +sampleTask (t, s) = sleep t >> return s + +$(remotable ['sampleTask]) +{% endhighlight %} + +We can now create a closure environment for `sampleTask` like so: + +{% highlight haskell %} +($(mkClosure 'sampleTask) (seconds 2, "foobar")) +{% endhighlight %} + +The call to `remotable` generates a remote table and a definition +`__remoteTable :: RemoteTable -> RemoteTable` in our module for us. +We compose this with other remote tables in order to come up with a +final, merged remote table for use in our program: + +{% highlight haskell %} +myRemoteTable :: RemoteTable +myRemoteTable = Main.__remoteTable initRemoteTable + +main :: IO () +main = do + localNode <- newLocalNode transport myRemoteTable + -- etc +{% endhighlight %} + +Note that we're not limited to sending `Closure`s - it is possible to send data +without having static values, and assuming the receiving code is able to decode +this data and operate on it, we can easily put together a simple AST that maps +to operations we wish to execute remotely. + +------ + +[1]: /static/doc/distributed-process/Control-Distributed-Process.html#v:Message +[2]: http://hackage.haskell.org/package/distributed-process +[3]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async.html +[4]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.htmlv:callAsync +[5]: http://hackage.haskell.org/packages/archive/distributed-process/latest/doc/html/Control-Distributed-Process-Internal-Primitives.html#t:Match diff --git a/tutorials/ch-tutorial2.md b/tutorials/ch-tutorial2.md new file mode 100644 index 0000000..10db1d0 --- /dev/null +++ b/tutorials/ch-tutorial2.md @@ -0,0 +1,111 @@ +--- +layout: tutorial +categories: tutorial +sections: ['Overview', 'A Simple Example', 'Master Slave Configurations', 'Other Topologies and Backends'] +title: Managing Topologies +--- + +### Overview + +In Cloud Haskell, the system topology is determined by your choice of _Cloud Haskell Backend_. +The basic topology that Cloud Haskell currently ships with is determined by the +[`simplelocalnet`][1] backend, which provides for a fully connected grid of nodes with optional +master-slave configuration. This backend allows nodes to discover one another using UDP multicast. +It is a zero-configuration backend designed to get you going with Cloud Haskell quickly without +imposing any particular structure on your application. + +Other backends might work in a completely different manner, offering different types of (and +relationships between) nodes, or simply by handling discovery differently (e.g., pre-defined +node names/addresses, or by using some form of registrar such as DNS-SD/Bonjour). + +### A Simple Example + +Here is an example program built against the [`simplelocalnet`][1] backend, that periodically +searches for a list of peer nodes, and sends a message to a registered (named) process on each. + +{% highlight haskell %} +import System.Environment (getArgs) +import Control.Distributed.Process +import Control.Distributed.Process.Node (initRemoteTable) +import Control.Distributed.Process.Backend.SimpleLocalnet +import Control.Monad (forever, mapM_) + +main = do + [host, port] <- getArgs + + backend <- initializeBackend host port initRemoteTable + node <- newLocalNode backend + runProcess node $ forever $ do + findPeers backend >>= mapM_ $ \peer -> nsendRemote peer "echo-server" "hello!" + +{% endhighlight %} + +Clearly the program isn't very useful, but it illustrates the two key concepts that +`simplelocalnet` relies on. Firstly, that we `initializeBackend` in order to get +connected to an underlying communications infrastructure and secondly, that we can +evaluate `findPeers` at any time to obtain the set of other nodes that have broadcast +their presence. + +### Master Slave Configurations + +Here we simply rehash the master/slave example from the `simplelocalnet` documentation. +With the same imports as the example above, we add a no-op slave and a master that +takes a list of its (known) slaves, which it prints out before terminating them all. + +{% highlight haskell %} +main :: IO () +main = do + args <- getArgs + + case args of + ["master", host, port] -> do + backend <- initializeBackend host port initRemoteTable + startMaster backend (master backend) + ["slave", host, port] -> do + backend <- initializeBackend host port initRemoteTable + startSlave backend + +{% endhighlight %} + +And the master node is defined thus: + +{% highlight haskell %} +master :: Backend -> [NodeId] -> Process () +master backend slaves = do + -- Do something interesting with the slaves + liftIO . putStrLn $ "Slaves: " ++ show slaves + -- Terminate the slaves when the master terminates (this is optional) + terminateAllSlaves backend +{% endhighlight %} + +### Other Topologies and Backends + +Many other topologies are in development, including one that runs on Windows Azure, +which is available [here][2]. Some third party backends have also been developed, +such as the [`distributed-process-p2p`][3] backend, which given a known node address, +discovers and maintains knowledge of it's peers. + +Here is an example of node discovery using the [`distributed-process-p2p`][3] +backend: + +{% highlight haskell %} +import System.Environment (getArgs) +import Control.Distributed.Process +import Control.Distributed.Process.Node (initRemoteTable) +import Control.Distributed.Process.Backend.P2P +import Control.Monad (forever, mapM_) + +main = do + [host, port] <- getArgs + + backend <- initializeBackend host port initRemoteTable + node <- newLocalNode backend + runProcess node $ forever $ do + findPeers >>= mapM_ $ \peer -> nsend peer "echo-server" "hello!" + +{% endhighlight %} + +[1]: http://hackage.haskell.org/package/distributed-process-simplelocalnet +[2]: http://hackage.haskell.org/package/distributed-process-azure +[3]: https://bitbucket.org/dpwiz/distributed-process-p2p + diff --git a/tutorials/ch-tutorial3.md b/tutorials/ch-tutorial3.md new file mode 100644 index 0000000..bd1a2ab --- /dev/null +++ b/tutorials/ch-tutorial3.md @@ -0,0 +1,375 @@ +--- +layout: tutorial +categories: tutorial +sections: ['Message Ordering', 'Selective Receive', 'Advanced Mailbox Processing', 'Typed Channels', 'Process Lifetime', 'Monitoring And Linking', 'Getting Process Info'] +title: Getting to know Processes +--- + +### Message Ordering + +We have already met the `send` primitive, used to deliver messages from one +process to another. Here's a review of what we've learned about `send` thus far: + +1. sending is asynchronous (i.e., it does not block the caller) +2. sending _never_ fails, regardless of the state of the recipient process +3. even if a message is received, there is **no** guarantee *when* it will arrive +4. there are **no** guarantees that the message will be received at all + +Asynchronous sending buys us several benefits. Improved concurrency is +possible, because processes need not block or wait for acknowledgements, +nor does error handling need to be implemented each time a message is sent. +Consider a stream of messages sent from one process to another. If the +stream consists of messages `a, b, c` and we have seen `c`, then we know for +certain that we will have already seen `a, b` (in that order), so long as the +messages were sent to us by the same peer process. + +When two concurrent process exchange messages, Cloud Haskell guarantees that +messages will be delivered in FIFO order, if at all. No such guarantee exists +between N processes where N > 1, so if processes _A_ and _B_ are both +communicating (concurrently) with process _C_, the ordering guarantee will +only hold for each pair of interactions, i.e., between _A_ and _C_ and/or +_B_ and _C_ the ordering will be guaranteed, but not between _A_ and _B_ +with regards messages sent to _C_. + +Because the mailbox contains messages of varying types, when we `expect` +a message, we eschew the ordering because we're searching for a message +whose contents can be decoded to a specific type. Of course, we may _want_ +to process messages in the precise order which they arrived. To achieve +this, we must defer the type checking that would normally cause a traversal +of the mailbox and extract the _raw_ message ourselves. This can be achieved +using `recieve` and `matchAny`, as we will demonstrate later. + +### Selective Receive + +Processes dequeue messages (from their mailbox) using the [`expect`][1] +and [`recieve`][2] family of primitives. Both take an optional timeout, +allowing the expression to evaluate to `Nothing` if no matching input +is found. + +The [`expect`][1] primitive blocks until a message matching the expected type +(of the expression) is found in the process' mailbox. If a match is found by +scanning the mailbox, it is dequeued and returned, otherwise the caller +(i.e., the calling thread/process) is blocked until a message of the expected +type is delivered to the mailbox. Let's take a look at this in action: + +{% highlight haskell %} +demo :: Process () +demo = do + listener <- spawnLocal listen + send listener "hello" + getSelfPid >>= send listener + () <- expect + where + listen = do + third <- expect :: Process ProcessId + first <- expect :: Process String + second <- expectTimeout 100000 :: Process String + mapM_ (say . show) [first, second, third] + send third () +{% endhighlight %} + +This program will print `"hello"`, then `Nothing` and finally `pid://...`. +The first `expect` - labelled "third" because of the order in which we +know it will arrive in our mailbox - **will** succeed, since the parent process +sends its `ProcessId` after the string "hello", yet the listener blocks until it +can dequeue the `ProcessId` before "expecting" a string. The second `expect` +(labelled "first") also succeeds, demonstrating that the listener has selectively +removed messages from its mailbox based on their type rather than the order in +which they arrived. The third `expect` will timeout and evaluate to `Nothing`, +because only one string is ever sent to the listener and that has already been +removed from the mailbox. The removal of messages from the process' mailbox based +on type is what makes this program viable - without this "selective receiving", +the program would block and never complete. + +By contrast, the [`recieve`][2] family of primitives take a list of `Match` +objects, each derived from evaluating a [`match`][3] style primitive. This +subject was covered briefly in the first tutorial. Matching on messages allows +us to separate the type(s) of messages we can handle from the type that the +whole `receive` expression evaluates to. + +Consider the following snippet: + +{% highlight haskell %} +usingReceive = do + () <- receiveWait [ + match (\(s :: String) -> say s) + , match (\(i :: Int) -> say $ show i) + ] +{% endhighlight %} + +Note that each of the matches in the list must evaluate to the same type, +as the type signature indicates: `receiveWait :: [Match b] -> Process b`. + +The behaviour of `receiveWait` differs from `receiveTimeout` in that it +blocks forever (until a match is found in the process' mailbox), whereas the +variant taking a timeout will return `Nothing` unless a match is found within +the specified time interval. Note that as with `System.Timeout`, the only +guarantee we have about a timeout based function is that it will not +expire _before_ the given interval. Both functions scan the mailbox in FIFO +order, evaluating the list of `match` expressions in declarative +(i.e., insertion) order until one of the matches succeeds or the operation +times out. + +### Advanced Mailbox Processing + +There are times when it is desirable to take a message from our mailbox without +explicitly specifying its type. Not only is this a useful capability, it is the +_only_ way to process messages in the precise order they were received. + +To see how this works in practise, let's consider the `relay` primitive that +ships with distributed-process. This utility function starts a process that +simply dequeues _any_ messages it receives and forwards them to some other process. +In order to dequeue messages regardless of their type, this code relies on the +`matchAny` primitive, which has the following type: + +{% highlight haskell %} +matchAny :: forall b. (Message -> Process b) -> Match b +{% endhighlight %} + +Since forwarding _raw messages_ (without decoding them first) is a common pattern +in Cloud Haskell programs, there is also a primitive to do that for us: + +{% highlight haskell %} +forward :: Message -> ProcessId -> Process () +{% endhighlight %} + +Given these types, we can see that in order to combine `matchAny` with `forward` +we need to either _flip_ `forward` and apply the `ProcessId` (leaving us with +the required type `Message -> Process b`) or use a lambda - the actual implementation +does the latter and looks like this: + +{% highlight haskell %} +relay :: ProcessId -> Process () +relay !pid = forever' $ receiveWait [ matchAny (\m -> forward m pid) ] +{% endhighlight %} + +This is pretty useful, but since `matchAny` operates on the raw `Message` type, +we're limited in what we can do with the messages we receive. In order to delve +_inside_ a message, we have to know its type. If we have an expression that operates +on a specific type, we can _attempt_ to decode the message to that type and examine +the result to see whether the decoding succeeds or not. There are two primitives +we can use to that effect: `unwrapMessage` and `handleMessage`. Their types look like +this: + +{% highlight haskell %} +unwrapMessage :: forall m a. (Monad m, Serializable a) => Message -> m (Maybe a) + +handleMessage :: forall m a b. (Monad m, Serializable a) => Message -> (a -> m b) -> m (Maybe b) +{% endhighlight %} + +Of the two, `unwrapMessage` is the simpler, taking a raw `Message` and evaluating to +`Maybe a` before returning that value in the monad `m`. If the type of the raw `Message` +does not match our expectation, the result will be `Nothing`, otherwise `Just a`. + +The approach `handleMessage` takes is a bit more flexible, taking a function +from `a -> m b` and returning `Just b` if the underlying message is of type `a` (hence the +operation can be executed and evaluate to `Maybe b`) or `Nothing` if the message's type +is incompatible with the handler function. + +Let's look at `handleMessage` in action. Earlier on we looked at `relay` from +distributed-process and now we'll consider its sibling `proxy` - this takes a predicate, +evaluates some input of type `a` and returns `Process Bool`, allowing us to run arbitrary +`Process` code in order to decide whether or not the `a` is eligible to be forwarded to +the relay `ProcessId`. The type of `proxy` is thus: + +{% highlight haskell %} +proxy :: Serializable a => ProcessId -> (a -> Process Bool) -> Process () +{% endhighlight %} + +Since `matchAny` operates on `(Message -> Process b)` and `handleMessage` operates on +`a -> Process b` we can compose these to make our proxy server. We must not forward +messages for which the predicate function evaluates to `Just False`, nor can we sensibly +forward messages which the predicate function is unable to evaluate due to type +incompatibility. This leaves us with the definition found in distributed-process: + +{% highlight haskell %} +proxy pid proc = do + receiveWait [ + matchAny (\m -> do + next <- handleMessage m proc + case next of + Just True -> forward m pid + Just False -> return () -- explicitly ignored + Nothing -> return ()) -- un-routable / cannot decode + ] + proxy pid proc +{% endhighlight %} + +Beyond simple relays and proxies, the raw message handling capabilities available in +distributed-process can be utilised to develop highly generic message processing code. +All the richness of the distributed-process-platform APIs (such as `ManagedProcess`) which +will be discussed in later tutorials are, in fact, built upon these families of primitives. + +### Typed Channels + +While being able to send and receive any `Serializable` datum is very powerful, the burden +of decoding types correctly at runtime is levied on the programmer and there are runtime +overheads to be aware of (which will be covered in later tutorials). Fortunately, +distributed-provides provides a type safe alternative to `send` and `receive`, in the form +of _Typed Channels_. Represented by distinct ends, a `SendPort a` (which is `Serializable`) +and `ReceivePort a` (which is not), channels are a lightweight and useful abstraction that +provides a type safe interface for interacting with processes separately from their primary +mailbox. + +Channels are created with `newChan :: Process (SendPort a, ReceivePort a)`, with +messages sent via `sendChan :: SendPort a -> a -> Process ()`. The `ReceivePort` can be +passed directly to `receiveChan`, or used in a `receive{Wait, Timeout}` call via the +`matchChan` primitive, so as to combine mailbox scans with channel reads. + +### Process Lifetime + +A process will continue executing until it has evaluated to some value, or is abruptly +terminated either by crashing (with an un-handled exception) or being instructed to +stop executing. Deliberate stop instructions take one of two forms: a `ProcessExitException` +or `ProcessKillException`. As the names suggest, these _signals_ are delivered in the form +of asynchronous exceptions, however you should not to rely on that fact! After all, +we cannot throw an exception to a thread that is executing in some other operating +system process or on a remote host! Instead, you should use the [`exit`][5] and [`kill`][6] +primitives from distributed-process, which not only ensure that remote target processes +are handled seamlessly, but also maintain a guarantee that if you send a message and +*then* an exit signal, the message will be delivered to the destination process (via its +local node controller) before the exception is thrown - note that this does not guarantee +that the destination process will have time to _do anything_ with the message before it +is terminated. + +The `ProcessExitException` signal is sent from one process to another, indicating that the +receiver is being asked to terminate. A process can choose to tell itself to exit, and the +[`die`][7] primitive simplifies doing so without worrying about the expected type for the +action. In fact, [`die`][7] has slightly different semantics from [`exit`][5], since the +latter involves sending an internal signal to the local node controller. A direct consequence +of this is that the _exit signal_ may not arrive immediately, since the _Node Controller_ could +be busy processing other events. On the other hand, the [`die`][7] primitive throws a +`ProcessExitException` directly in the calling thread, thus terminating it without delay. +In practise, this means the following two functions could behave quite differently at +runtime: + +{% highlight haskell %} + +-- this will never print anything... +demo1 = die "Boom" >> expect >>= say + +-- this /might/ print something before it exits +demo2 = do + self <- getSelfPid + exit self "Boom" + expect >>= say +{% endhighlight %} + +The `ProcessExitException` type holds a _reason_ field, which is serialised as a raw `Message`. +This exception type is exported, so it is possible to catch these _exit signals_ and decide how +to respond to them. Catching _exit signals_ is done via a set of primitives in +distributed-process, and the use of them forms a key component of the various fault tolerance +strategies provided by distributed-process-platform. + +A `ProcessKillException` is intended to be an _untrappable_ exit signal, so its type is +not exported and therefore you can __only__ handle it by catching all exceptions, which +as we all know is very bad practise. The [`kill`][6] primitive is intended to be a +_brutal_ means for terminating process - e.g., it is used to terminate supervised child +processes that haven't shutdown on request, or to terminate processes that don't require +any special cleanup code to run when exiting - although it does behave like [`exit`][5] +in so much as it is dispatched (to the target process) via the _Node Controller_. + +### Monitoring and Linking + +Processes can be linked to other processes (or nodes or channels). A link, which is +unidirectional, guarantees that once any object we have linked to *exits*, we will also +be terminated. A simple way to test this is to spawn a child process, link to it and then +terminate it, noting that we will subsequently die ourselves. Here's a simple example, +in which we link to a child process and then cause it to terminate (by sending it a message +of the type it is waiting for). Even though the child terminates "normally", our process +is also terminated since `link` will _link the lifetime of two processes together_ regardless +of exit reasons. + +{% highlight haskell %} +demo = do + pid <- spawnLocal $ receive >>= return + link pid + send pid () + () <- receive +{% endhighlight %} + +The medium that link failures uses to signal exit conditions is the same as exit and kill +signals - asynchronous exceptions. Once again, it is a bad idea to rely on this (not least +because it might change in some future release) and the exception type (`ProcessLinkException`) +is not exported so as to prevent developers from abusing exception handling code in this +special case. Since link exit signals cannot be caught directly, if you find yourself wanting +to _trap_ a link failure, you probably want to use a monitor instead. + +Whilst the built-in `link` primitive terminates the link-ee regardless of exit reason, +distributed-process-platform provides an alternate function `linkOnFailure`, which only +dispatches the `ProcessLinkException` if the link-ed process dies abnormally (i.e., with +some `DiedReason` other than `DiedNormal`). + +Monitors on the other hand, do not cause the *listening* process to exit at all, instead +putting a `ProcessMonitorNotification` into the process' mailbox. This signal and its +constituent fields can be introspected in order to decide what action (if any) the receiver +can/should take in response to the monitored process' death. Let's take a look at how +monitors can be used to determine both when and _how_ a process has terminated. Tucked +away in distributed-process-platform, the `linkOnFailure` primitive works in exactly this +way, only terminating the caller if the subject terminates abnormally. Let's take a look... + +{% highlight haskell %} +linkOnFailure them = do + us <- getSelfPid + tid <- liftIO $ myThreadId + void $ spawnLocal $ do + callerRef <- P.monitor us + calleeRef <- P.monitor them + reason <- receiveWait [ + matchIf (\(ProcessMonitorNotification mRef _ _) -> + mRef == callerRef) -- nothing left to do + (\_ -> return DiedNormal) + , matchIf (\(ProcessMonitorNotification mRef' _ _) -> + mRef' == calleeRef) + (\(ProcessMonitorNotification _ _ r') -> return r') + ] + case reason of + DiedNormal -> return () + _ -> liftIO $ throwTo tid (ProcessLinkException us reason) +{% endhighlight %} + +As we can see, this code makes use of monitors to track both processes involved in the +link. In order to track _both_ processes and react to changes in their status, it is +necessary to spawn a third process which will do the monitoring. This doesn't happen +with the built-in link primitive, but is necessary in this case since the link handling +code resides outside the _Node Controller_. + +The two matches passed to `receiveWait` both handle a `ProcessMonitorNotification`, and +the predicate passed to `matchIf` is used to determine whether the notification we're +receiving is for the process that called us, or the _linked to_ process. If the former +dies, we've nothing more to do, since links are unidirectional. If the latter dies +however, we must examine the `DiedReason` the `ProcessMonitorNotification` provides us +with, to determine whether the subject exited normally (i.e., with `DiedNormal`). +If the exit was _abnormal_, we throw a `ProcessLinkException` to the original caller, +which is exactly how an ordinary link would behave. + +Linking and monitoring are foundational tools for *supervising* processes, where a top level +process manages a set of children, starting, stopping and restarting them as necessary. + +Exit signals in Cloud Haskell then, are unlike asynchronous exceptions in other +haskell code. Whilst a process *can* use asynchronous exceptions - there's +nothing stoping this since the `Process` monad is an instance of `MonadIO` - +as we've seen, exceptions thrown are not bound by the same ordering guarantees +as messages delivered to a process. Link failures and exit signals *might* work +via asynchronous exceptions - that is the case in the current implementation - but +these are implemented in such a fashion that if you send a message and *then* an +exit signal, the message is guaranteed to arrive first. + +You should avoid throwing your own exceptions in code where possible. Instead, +you should terminate yourself, or another process, using the built-in primitives +`exit`, `kill` and `die`. + +### Getting Process Info + +The `getProcessInfo` function provides a means for us to obtain information about a running +process. The `ProcessInfo` type it returns contains the local node id and a list of +registered names, monitors and links for the process. The call returns `Nothing` if the +process in question is not alive. + +[1]: hackage.haskell.org/package/distributed-process/docs/Control-Distributed-Process.html#v:receiveWait +[2]: hackage.haskell.org/package/distributed-process/docs/Control-Distributed-Process.html#v:expect +[3]: http://hackage.haskell.org/package/distributed-process-0.4.2/docs/Control-Distributed-Process.html#v:match +[4]: /static/semantics.pdf +[5]: http://hackage.haskell.org/package/distributed-process-0.4.2/docs/Control-Distributed-Process.html#v:exit +[6]: http://hackage.haskell.org/package/distributed-process-0.4.2/docs/Control-Distributed-Process.html#v:kill +[7]: http://hackage.haskell.org/package/distributed-process-0.4.2/docs/Control-Distributed-Process.html#v:die diff --git a/tutorials/ch-tutorial4.md b/tutorials/ch-tutorial4.md new file mode 100644 index 0000000..6566a59 --- /dev/null +++ b/tutorials/ch-tutorial4.md @@ -0,0 +1,624 @@ +--- +layout: tutorial +sections: ['Introduction', 'Managed Processes', 'A Basic Example', 'Building a Task Queue', 'Implementing the Client', 'Implementing the Server', 'Making use of Async', 'Wiring up Handlers', 'Putting it all together', 'Performance Considerations'] +categories: tutorial +title: Managed Process Tutorial +--- + +### Introduction + +The source code for this tutorial is based on the `BlockingQueue` module +from distributed-process-platform and can be accessed [here][1]. +Please note that this tutorial is based on the stable (master) branch +of distributed-process-platform. + +### Managed Processes + +There are subtle bugs waiting in code that evaluated `send` and `receive` +directly. Forgetting to monitor the destination whilst waiting for a reply +and failing to match on the correct message types are the most common ones, +but others exist (such as badly formed `Binary` instances for user defined +data types). + +The /Managed Process/ API handles _all_ sending and receiving of messages, +error handling and decoding problems on your behalf, leaving you to focus +on writing code that describes _what the server process does_ when it receives +messages, rather than how it receives them. The API also provides a set of +pre-defined client interactions, all of which have well defined semantics +and failure modes. + +A managed process server definition is defined using record syntax, with +a list of `Dispatcher` types that describe how the server should handle +particular kinds of client interaction, for specific types. The fields +of the `ProcessDefinition` record also provide for error handling (in case +of either server code crashing _or_ exit signals dispatched to the server +process) and _cleanup_ code required to run on terminate/shutdown. + +{% highlight haskell %} +myServer :: ProcessDefinition MyStateType +myServer = + ProcessDefinition { + apiHandlers = [ + -- a list of Dispatcher, derived from calling + -- handleInfo or handleRaw with a suitable function, e.g., + handleCast myFunctionThatDoesNotReply + , handleCall myFunctionThatDoesReply + , handleRpcChan myFunctionThatRepliesViaTypedChannels + ] + , infoHandlers = [ + -- a list of DeferredDispatcher, derived from calling + -- handleInfo or handleRaw with a suitable function, e.g., + handleInfo myFunctionThatHandlesOneSpecificNonCastNonCallMessageType + , handleRaw myFunctionThatHandlesRawMessages + ] + , exitHandlers = [ + -- a list of ExitSignalDispatcher, derived from calling + -- handleExit with a suitable function, e.g., + handleExit myExitHandlingFunction + ] + -- what should I do just before stopping? + , terminateHandler = myTerminateFunction + -- what should I do about messages that cannot be handled? + , unhandledMessagePolicy = Drop -- Terminate | (DeadLetter ProcessId) + } + +{% endhighlight %} + +Client interactions with a managed process come in various flavours. It is +still possible to send an arbitrary message to a managed process, just as +you would a regular process. When defining a protocol between client and +server processes however, it is useful to define a specific set of types +that the server expects to receive from the client and possibly replies +that the server may send back. The `cast` and `call` mechanisms in the +/managed process/ API cater for this requirement specifically, allowing +the developer tighter control over the domain of input messages from +clients, whilst ensuring that client code handles errors (such as server +failures) consistently and those input messages are routed to a suitable +message handling function in the server process. + +--------- + +### A Basic Example + +Let's consider a simple _math server_ like the one in the main documentation +page. We could allow clients to send us `(ProcessId, Double, Double)` and +reply to the first tuple element with the sum of the second and third. But +what happens if our process is killed while the client is waiting for the +reply? (The client would deadlock). The client could always set up a monitor +and wait for the reply _or_ a monitor signal, and could even write that code +generically, but what if the code evaluating the client's utility function +`expect`s the wrong type? We could use a typed channel to alleviate that ill, +but that only helps with the client receiving messages, not the server. How +can we ensure that the server receives the correct type(s) as well? Creating +multiple typed channels (one for each kind of message we're expecting) and +then distributing those to all our clients seems like a kludge. + +The `call` and `cast` APIs help us to avoid precisely this conundrum by +providing a uniform API for both the client _and_ the server to observe. Whilst +there is nothing to stop clients from sending messages directly to a managed +process, it is simple enough to prevent this as well (just by hiding its +`ProcessId`, either behind a newtype or some other opaque structure). The +author of the server is then able to force clients through API calls that +can enforce the required types _and_ ensure that the correct client-server +protocol is used. Here's a better example of that math server that does +just so: + +---- + +{% highlight haskell %} +module MathServer + ( -- client facing API + add + -- starting/spawning the server process + , launchMathServer + ) where + +import .... -- elided + +-- We keep this data-type hidden from the outside world, and we ignore +-- messages sent to us that we do not recognise, so misbehaving clients +-- (who do not use our API) are basically ignored. +data Add = Add Double Double + deriving (Typeable, Generic) +instance Binary Add where + +-- client facing API + +-- This is the only way clients can get a message through to us that +-- we will respond to, and since we control the type(s), there is no +-- risk of decoding errors on the server. The /call/ API ensures that +-- if the server does fail for some other reason however (such as being +-- killed by another process), the client will get an exit signal also. +-- +add :: ProcessId -> Double -> Double -> Process Double +add sid = call sid . Add + +-- server side code + +launchMathServer :: Process ProcessId +launchMathServer = + let server = statelessProcess { + apiHandlers = [ handleCall_ (\(Add x y) -> return (x + y)) ] + , unhandledMessagePolicy = Drop + } + in spawnLocal $ start () (statelessInit Infinity) server >> return () +{% endhighlight %} + + +This style of programming will already be familiar if you've used some +combination of `send` in your clients and the `receive [ match ... ]` +family of functions to write your servers. The primary difference here, +is that the choice of when to return to (potentially blocking on) the +server's mailbox is taken out of the programmer's hands, leaving the +implementor to worry only about the logic to be applied once a message +of one type or another is received. + +---- + +Of course, it would still be possible to write the server and client code +and encounter a type resolution failure, since `call` still takes an +arbitrary `Serializable` datum just like `send`. We can solve that for +the return type of the _remote_ call by sending a typed channel and +replying explicitly to it in our server side code. Whilst this doesn't +make the server code any prettier (since it has to reply to the channel +explicitly, rather than just evaluating to a result), it does the +likelihood of runtime errors somewhat. + +{% highlight haskell %} +-- This is the only way clients can get a message through to us that +-- we will respond to, and since we control the type(s), there is no +-- risk of decoding errors on the server. The /call/ API ensures that +-- if the server does fail for some other reason however (such as being +-- killed by another process), the client will get an exit signal also. +-- +add :: ProcessId -> Double -> Double -> Process Double +add sid = syncCallChan sid . Add + +launchMathServer :: Process ProcessId +launchMathServer = + let server = statelessProcess { + apiHandlers = [ handleRpcChan_ (\chan (Add x y) -> sendChan chan (x + y)) ] + , unhandledMessagePolicy = Drop + } + in spawnLocal $ start () (statelessInit Infinity) server >> return () +{% endhighlight %} + +Ensuring that only valid types are sent to the server is relatively simple, +given that we do not expose the client directly to `call` and write our own +wrapper functions. An additional level of isolation and safety is available +when using /control channels/, which will be covered in a subsequent tutorial. + +Before we leave the math server behind, let's take a brief look at the `cast` +side of the client-server protocol. Unlike its synchronous cousin, `cast` does +not expect a reply at all - it is a fire and forget call, much like `send`, +but carries the same additional type information that a `call` does (about its +inputs) and is also routed to a `Dispatcher` in the `apiHandlers` field of the +process definition. + +We will use cast with the existing `Add` type, to implement a function that +takes an /add request/ and prints the result instead of returning it. If we +were implementing this with `call` we would be a bit stuck, because there is +nothing to differentiate between two `Add` instances and the server would +choose the first valid (i.e., type safe) handler and ignore the others. + +Note that because the client doesn't wait for a reply, if you execute this +function in a test/demo application, you'll need to block the main thread +for a while to wait for the server to receive the message and print out +the result. + +{% highlight haskell %} + +printSum :: ProcessId -> Double -> Double -> Process () +printSum sid = cast sid . Add + +launchMathServer :: Process ProcessId +launchMathServer = + let server = statelessProcess { + apiHandlers = [ handleRpcChan_ (\chan (Add x y) -> sendChan chan (x + y)) + , handleCast_ (\(Add x y) -> liftIO $ putStrLn $ show (x + y) >> continue_) ] + , unhandledMessagePolicy = Drop + } + in spawnLocal $ start () (statelessInit Infinity) server >> return () +{% endhighlight %} + + +Of course this is a toy example - why defer simple computations like addition +and/or printing results to a separate process? Next, we'll build something a bit +more interesting and useful. + +### Building a Task Queue + +This section of the tutorial is based on a real module from the +distributed-process-platform library, called `BlockingQueue`. + +Let's imagine we want to execute tasks on an arbitrary node, but want +the caller to block whilst the remote task is executing. We also want +to put an upper bound on the number of concurrent tasks/callers that +the server will accept. Let's use `ManagedProcess` to implement a generic +task server like this, with the following characteristics + +* requests to enqueue a task are handled immediately +* callers however, are blocked until the task completes (or fails) +* an upper bound is placed on the number of concurrent running tasks + +Once the upper bound is reached, tasks will be queued up for execution. +Only when we drop below this limit will tasks be taken from the backlog +and executed. + +Since we want the server to proceed with its work whilst the client is +blocked, the asynchronous `cast` API may sound like the ideal approach, +or we might use the asynchronous cousin of our typed-channel +handling API `callChan`. The `call` API however, offers exactly the +tools we need to keep the client blocked (waiting for a reply) whilst +the server is allowed to proceed with its work. + +### Implementing the client + +We'll start by thinking about the types we need to consume in the server +and client processes: the tasks we're being asked to perform. + +To submit a task, our clients will submit an action in the process +monad, wrapped in a `Closure` environment. We will use the `Addressable` +typeclass to allow clients to specify the server's location in whatever +manner suits them: The type of a task will be `Closure (Process a)` and +the server will explicitly return an /either/ value with `Left String` +for errors and `Right a` for successful results. + +{% highlight haskell %} +-- enqueues the task in the pool and blocks +-- the caller until the task is complete +executeTask :: forall s a . (Addressable s, Serializable a) + => s + -> Closure (Process a) + -> Process (Either String a) +executeTask sid t = call sid t +{% endhighlight %} + +Remember that in Cloud Haskell, the only way to communicate with a process +(apart from introducing scoped concurrency primitives like `MVar` or using +stm) is via its mailbox and typed channels. Also, all communication with +the process is asynchronous from the sender's perspective and synchronous +from the receiver's. Although `call` is a synchronous (RPC-like) protocol, +communication with the *server process* has to take place out of band. + +The server implementation chooses to reply to each request and when handling +a `call`, can defer its reply until a later stage, thus going back to +receiving and processing other messages in the meantime. As far as the client +is concerned, it is simply waiting for a reply. Note that the `call` primitive +is implemented so that messages from other processes cannot interleave with +the server's response. This is very important, since another message of type +`Either String a` could theoretically arrive in our mailbox from somewhere +else whilst we're receiving, therefore `call` transparently tags the call +message and awaits a specific reply from the server (containing the same +tag). These tags are guaranteed to be unique across multiple nodes, since +they're based on a `MonitorRef`, which holds a `Identifier ProcessId` and +a node local monitor ref counter. All monitor creation is coordinated by +the caller's node controller (guaranteeing the uniqueness of the ref +counter for the lifetime of the node) and the references are not easily +forged (i.e., sent by mistake - this is not a security feature of any sort) +since the type is opaque. + +In terms of code for the client then, that's all there is to it! +Note that the type signature we expose to our consumers is specific, and that +we do not expose them to either arbitrary messages arriving in their mailbox +or to exceptions being thrown in their thread. Instead we return an `Either`. +One very important thing about this approach is that if the server replies +with some other type (i.e., a type other than `Either String a`) then our +client will be blocked indefinitely! We could alleviate this by using a +typed channel as we saw previously with our math server, but there's little +point since we're in total charge of both client and server. + +There are several varieties of the `call` API that deal with error +handling in different ways. Consult the haddocks for more info about +these. + +### Implementing the server + +Back on the server, we write a function that takes our state and an +input message - in this case, the `Closure` we've been sent - and +have that update the process' state and possibility launch the task +if we have enough spare capacity. + +{% highlight haskell %} +data Pool a = Pool a +{% endhighlight %} + +I've called the state type `Pool` as we're providing a fixed size resource +pool from the consumer's perspective. We could think of this as a bounded +queue, latch or barrier of sorts, but that conflates the example a bit too +much. We parameterise the state by the type of data that can be returned +by submitted tasks. + +The updated pool must store the task **and** the caller (so we can reply +once the task is complete). The `ManagedProcess.Server` API will provide us +with a `Recipient` value which can be used to reply to the caller at a later +time, so we'll make use of that here. + +{% highlight haskell %} +acceptTask :: Serializable a + => Pool a + -> Recipient + -> Closure (Process a) + -> Process (Pool a) +{% endhighlight %} + +For our example we will avoid using even vaguely exotic types to manage our +process' internal state, and stick to simple property lists. This is hardly +efficient, but that's fine for a test/demo. + +{% highlight haskell %} +data Pool a = Pool { + poolSize :: PoolSize + , accepted :: [(Recipient, Closure (Process a))] + } deriving (Typeable) +{% endhighlight %} + +Given a pool of closures, we must now work out how to execute them +on the caller's behalf. + +### Making use of Async + +So **how** can we execute this `Closure (Process a)` without blocking the server +process itself? We will use the `Control.Distributed.Process.Platform.Async` API +to execute each task asynchronously and provide a means for waiting on the result. + +In order to use the `Async` handle to get the result of the computation once it's +complete, we'll have to hang on to a reference. We also need a way to associate the +submitter with the handle, so we end up with one field for the active (running) +tasks and another for the queue of accepted (but inactive) ones, like so... + +{% highlight haskell %} +data Pool a = Pool { + poolSize :: PoolSize + , active :: [(Recipient, Async a)] + , accepted :: [(Recipient, Closure (Process a))] + } deriving (Typeable) +{% endhighlight %} + +To turn that `Closure` environment into a thunk we can evaluate, we'll use the +built in `unClosure` function, and we'll pass the thunk to `async` and get back +a handle to the async task. + +{% highlight haskell %} +proc <- unClosure task' +asyncHandle <- async proc +{% endhighlight %} + +Of course, we decided not to block on each `Async` handle, and we can't sit +in a *loop* polling all the handles representing tasks we're running +(since no submissions would be handled whilst we're spinning waiting +for results). Instead we rely on monitors instead, so we must store a +`MonitorRef` in order to know which monitor signal relates to which +async task (and recipient). + +{% highlight haskell %} +data Pool a = Pool { + poolSize :: PoolSize + , active :: [(MonitorRef, Recipient, Async a)] + , accepted :: [(Recipient, Closure (Process a))] + } deriving (Typeable) +{% endhighlight %} + +Finally we can implement the `acceptTask` function. + +{% highlight haskell %} +acceptTask :: Serializable a + => Pool a + -> Recipient + -> Closure (Process a) + -> Process (Pool a) +acceptTask s@(Pool sz' runQueue taskQueue) from task' = + let currentSz = length runQueue + in case currentSz >= sz' of + True -> do + return $ s { accepted = ((from, task'):taskQueue) } + False -> do + proc <- unClosure task' + asyncHandle <- async proc + ref <- monitorAsync asyncHandle + taskEntry <- return (ref, from, asyncHandle) + return s { active = (taskEntry:runQueue) } +{% endhighlight %} + +If we're at capacity, we add the task (and caller) to the `accepted` queue, +otherwise we launch and monitor the task using `async` and stash the monitor +ref, caller ref and the async handle together in the `active` field. Prepending +to the list of active/running tasks is a somewhat arbitrary choice. One might +argue that heuristically, the younger a task is the less likely it is that it +will run for a long time. Either way, I've done this to avoid cluttering the +example with data structures, so we can focus on the `ManagedProcess` APIs. + +Now we will write a function that handles the results. When a monitor signal +arrives, we lookup an async handle that we can use to obtain the result +and send it back to the caller. Because, even if we were running at capacity, +we've now seen a task complete (and therefore reduced the number of active tasks +by one), we will also pull off a pending task from the backlog (i.e., accepted), +if any exists, and execute it. As with the active task list, we're going to +take from the backlog in FIFO order, which is almost certainly not what you'd want +in a real application, but that's not the point of the example either. + +The steps then, are + +1. find the async handle for the monitor ref +2. pull the result out of it +3. send the result to the client +4. bump another task from the backlog (if there is one) +5. carry on + +This chain then, looks like `wait >>= respond >> bump-next-task >>= continue`. + +Item (3) requires special API support from `ManagedProcess`, because we're not +just sending *any* message back to the caller. We're replying to a specific `call` +that has taken place and is, from the client's perspective, still running. +The `ManagedProcess` API call for this is `replyTo`. + +{% highlight haskell %} +taskComplete :: forall a . Serializable a + => Pool a + -> ProcessMonitorNotification + -> Process (ProcessAction (Pool a)) +taskComplete s@(Pool _ runQ _) + (ProcessMonitorNotification ref _ _) = + let worker = findWorker ref runQ in + case worker of + Just t@(_, c, h) -> wait h >>= respond c >> bump s t >>= continue + Nothing -> continue s + where + respond :: Recipient + -> AsyncResult a + -> Process () + respond c (AsyncDone r) = replyTo c ((Right r) :: (Either String a)) + respond c (AsyncFailed d) = replyTo c ((Left (show d)) :: (Either String a)) + respond c (AsyncLinkFailed d) = replyTo c ((Left (show d)) :: (Either String a)) + respond _ _ = die $ TerminateOther "IllegalState" + + bump :: Pool a -> (MonitorRef, Recipient, Async a) -> Process (Pool a) + bump st@(Pool _ runQueue acc) worker = + let runQ2 = deleteFromRunQueue worker runQueue in + case acc of + [] -> return st { active = runQ2 } + ((tr,tc):ts) -> acceptTask (st { accepted = ts, active = runQ2 }) tr tc + +findWorker :: MonitorRef + -> [(MonitorRef, Recipient, Async a)] + -> Maybe (MonitorRef, Recipient, Async a) +findWorker key = find (\(ref,_,_) -> ref == key) + +deleteFromRunQueue :: (MonitorRef, Recipient, Async a) + -> [(MonitorRef, Recipient, Async a)] + -> [(MonitorRef, Recipient, Async a)] +deleteFromRunQueue c@(p, _, _) runQ = deleteBy (\_ (b, _, _) -> b == p) c runQ +{% endhighlight %} + +That was pretty simple. We've dealt with mapping the `AsyncResult` to `Either` values, +which we *could* have left to the caller, but this makes the client facing API much +simpler to work with. + +### Wiring up handlers + +The `ProcessDefinition` takes a number of different kinds of handler. The only ones +_we_ care about are the call handler for submissions, and the handler that +deals with monitor signals. TODO: THIS DOES NOT READ WELL + +Call and cast handlers live in the `apiHandlers` list of our `ProcessDefinition` +and have the type `Dispatcher s` where `s` is the state type for the process. We +cannot construct a `Dispatcher` ourselves, but a range of functions in the +`ManagedProcess.Server` module exist to lift functions like the ones we've just +defined, to the correct type. The particular function we need is `handleCallFrom`, +which works with functions over the state, `Recipient` and call data/message. +All varieties of `handleCall` need to return a `ProcessReply`, which has the +following type: + +{% highlight haskell %} +data ProcessReply s a = + ProcessReply a (ProcessAction s) + | NoReply (ProcessAction s) +{% endhighlight %} + +Again, various utility functions are defined by the API for constructing a +`ProcessAction` and we make use of `noReply_` here, which constructs `NoReply` +for us and presets the `ProcessAction` to `continue`, which goes back to +receiving messages from clients. We already have a function over our input domain, +which evaluates to a new state, so we end up with: + +{% highlight haskell %} +storeTask :: Serializable a + => Pool a + -> Recipient + -> Closure (Process a) + -> Process (ProcessReply (Pool a) ()) +storeTask s r c = acceptTask s r c >>= noReply_ +{% endhighlight %} + +In order to spell things out for the compiler, we need to put a type signature +in place at the call site too, so our final construct is + +{% highlight haskell %} +handleCallFrom (\s f (p :: Closure (Process a)) -> storeTask s f p) +{% endhighlight %} + +No such thing is required for `taskComplete`, as there's no ambiguity about its +type. Our process definition is now finished, and here it is: + +{% highlight haskell %} +poolServer :: forall a . (Serializable a) => ProcessDefinition (Pool a) +poolServer = + defaultProcess { + apiHandlers = [ + handleCallFrom (\s f (p :: Closure (Process a)) -> storeTask s f p) + ] + , infoHandlers = [ + handleInfo taskComplete + ] + } :: ProcessDefinition (Pool a) +{% endhighlight %} + +Starting the pool is simple: `ManagedProcess` provides several utility functions +to help with spawning and running processes. +The `start` function takes an _initialising_ thunk, which must generate the initial +state and per-call timeout settings, then the process definition which we've already +encountered. + +{% highlight haskell %} +simplePool :: forall a . (Serializable a) + => PoolSize + -> ProcessDefinition (Pool a) + -> Process (Either (InitResult (Pool a)) TerminateReason) +simplePool sz server = start sz init' server + where init' :: PoolSize -> Process (InitResult (Pool a)) + init' sz' = return $ InitOk (emptyPool sz') Infinity + + emptyPool :: Int -> Pool a + emptyPool s = Pool s [] [] +{% endhighlight %} + +### Putting it all together + +Starting up a pool locally or on a remote node is just a matter of using `spawn` +or `spawnLocal` with `simplePool`. The second argument should add specificity to +the type of results the process definition operates on, e.g., + +{% highlight haskell %} +let svr' = poolServer :: ProcessDefinition (Pool String) +in simplePool s svr' +{% endhighlight %} + +Defining tasks is as simple as making them remote-worthy: + +{% highlight haskell %} +sampleTask :: (TimeInterval, String) -> Process String +sampleTask (t, s) = sleep t >> return s + +$(remotable ['sampleTask]) +{% endhighlight %} + +And executing them is just as simple too. Given a pool which has been registered +locally as "mypool": + +{% highlight haskell %} +tsk <- return $ ($(mkClosure 'sampleTask) (seconds 2, "foobar")) +executeTask "mypool" tsk +{% endhighlight %} + +In this tutorial, we've really just scratched the surface of the `ManagedProcess` +API. By handing over control of the client/server protocol to the framework, we +are able to focus on the code that matters, such as state transitions and decision +making, without getting bogged down (much) with the business of sending and +receiving messages, handling client/server failures and such like. + +### Performance Considerations + +We did not take much care over our choice of data structures. Might this have profound +consequences for clients? The LIFO nature of the pending backlog is surprising, but +we can change that quite easily by changing data structures. In fact, the code on which +this example is based uses `Data.Sequence` to provide both strictness and FIFO +execution ordering. + +Perhaps more of a concern is the cost of using `Async` everywhere - remember +we used this in the *server* to handle concurrently executing tasks and obtaining +their results. An invocation of `async` will create two new processes: one to perform +the calculation and another to monitor the first and handle failures and/or cancellation. +Spawning processes is cheap, but not free as each process is a haskell thread, plus +some additional book keeping data. + +[1]: https://github.com/haskell-distributed/distributed-process-platform/blob/master/src/Control/Distributed/Process/Platform/Task/Queue/BlockingQueue.hs diff --git a/tutorials/ch-tutorial5.md b/tutorials/ch-tutorial5.md new file mode 100644 index 0000000..3f268b4 --- /dev/null +++ b/tutorials/ch-tutorial5.md @@ -0,0 +1,15 @@ +--- +layout: tutorial +categories: tutorial +sections: ['Introduction'] +title: Supervision Principles +--- + +### Introduction + +In previous tutorial, we've looked at utilities for linking processes together +and monitoring their lifecycle as it changes. The ability to link and monitor are +foundational tools for building _reliable_ systems, and are the bedrock principles +on which Cloud Haskell's supervision capabilities are built. + + diff --git a/tutorials/tutorial2.md b/tutorials/tutorial-NT2.md similarity index 100% rename from tutorials/tutorial2.md rename to tutorials/tutorial-NT2.md diff --git a/tutorials/tutorial1.md b/tutorials/tutorial1.md deleted file mode 100644 index 0575704..0000000 --- a/tutorials/tutorial1.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -layout: tutorial1 -categories: tutorial -title: Getting Started ---- - -### Getting Started - ------ - -Please note that this tutorial is a work in progress. We highly recommend -reading the haddock documentation and reading the Well-Typed blog, which -are offer the best quality sources of information at this time. - -In order to go through this tutorial you will need a Haskell development -environment and we recommend installing the latest version of the -[Haskell Platform](http://www.haskell.org/platform/) if you've not done -so already. - -Once you're up and running, you'll want to get hold of the distributed-process -library and a choice of network transport backend. This guide will use -the network-transport-tcp backend, but the simplelocalnet or inmemory -backends are also available on github, along with some other experimental -options. - -### Create a node - -Cloud Haskell's *lightweight processes* reside on a 'node', which must -be initialised with a network transport implementation and a remote table. -The latter is required so that physically separate nodes can identify known -objects in the system (such as types and functions) when receiving messages -from other nodes. We'll look at inter-node communication later, so for now -it will suffice to pass the default remote table, which defines the built-in -stuff Cloud Haskell needs at a minimum. - -Let's start with imports first: - -{% highlight haskell %} -import Network.Transport.TCP (createTransport, defaultTCPParameters) -import Control.Distributed.Process -import Control.Distributed.Process.Node -{% endhighlight %} - -Our TCP network transport backend needs an IP address and port to get started -with, and we're good to go... - -{% highlight haskell %} -main :: IO () -main = do - Right t <- createTransport "127.0.0.1" "10501" defaultTCPParameters - node <- newLocalNode t initRemoteTable - .... -{% endhighlight %} - -And now we have a running node. - -### Send messages - -We can start a new lightweight process with `forkProcess`, which takes a node, -a `Process` action - because our concurrent code will run in the `Process` -monad - and returns an address for the process in the form of a `ProcessId`. -The process id can be used to send messages to the running process - here we -will send one to ourselves! - -{% highlight haskell %} --- in main - _ <- forkProcess node $ do - -- get our own process id - self <- getSelfPid - send self "hello" - hello <- expect :: Process String - liftIO $ putStrLn hello - return () -{% endhighlight %} - -Lightweight processes are implemented as `forkIO` threads. In general we will -try to forget about this implementation detail, but for now just note that we -haven't deadlocked ourself by sending to and receiving from our own mailbox -in this fashion. Sending a message is a completely asynchronous operation - even -if the recipient doesn't exist, no error will be raised and evaluating `send` -will not block the caller. - -Receiving messages works the other way around, blocking the caller until a message -matching the expected type arrives in the process (conceptual) mailbox. -If multiple messages of that type are in the queue, they will be returned in FIFO -order, otherwise the caller will be blocked until a message arrives that can be -decoded to the correct type. - -Let's spawn another process on the same node and make the two talk to each other. - -{% highlight haskell %} -import Control.Concurrent (threadDelay) -import Control.Monad (forever) -import Control.Distributed.Process -import Control.Distributed.Process.Node -import Network.Transport.TCP (createTransport, defaultTCPParameters) - -replyBack :: (ProcessId, String) -> Process () -replyBack (sender, msg) = send sender msg - -logMessage :: String -> Process () -logMessage msg = say $ "handling " ++ msg - -main :: IO () -main = do - Right t <- createTransport "127.0.0.1" "10501" defaultTCPParameters - node <- newLocalNode t initRemoteTable - -- Spawn a new process on a local node - forkProcess node $ do - -- Spawn worker inside one more process on the local node - echoPid <- spawnLocal $ forever $ do - -- Test the matches in order against each message in the queue - receiveWait [match logMessage, match replyBack] - - -- `say` sends a message to the process registered as logger. - -- By default, this process simply sends the string to stderr. - say "send some messages!" - send echoPid "hello" - self <- getSelfPid - send echoPid (self, "hello") - -- like `expect` (waits for a message), but with timeout - m <- expectTimeout 1000000 - case m of - -- Die immediately - throws a ProcessExitException with the given reason. - Nothing -> die "nothing came back!" - (Just s) -> say $ "got back " ++ s - return () - - -- A 1 second wait. Otherwise the main thread can terminate before - -- our messages reach the logging process or get flushed to stdio - liftIO $ threadDelay (1*1000000) - return () -{% endhighlight %} - -Note that we've used a `receive` class of function this time around. These -functions work with the [`Match`][Match] data type, and provide a range of -advanced dispatching options. The `match` construct allows you to construct a -list of potential message handlers and have them evaluated against incoming -messages. Our first match indicates that, given a tuple `t :: (ProcessId, -String)` we will send the `String` component back to the sender's -`ProcessId`. Our second match prints out whatever string it receives. - -Also note the use of a 'timeout' (given in microseconds), which is available for -both the `expect` and `receive` variants. This returns `Nothing` unless a message -can be dequeued from the mailbox within the specified time interval. - -### Serializable - -Processes can send data if the type implements the `Serializable` typeclass, which is -done indirectly by implementing `Binary` and deriving `Typeable`. Implementations are -already provided for primitives and some commonly used data structures. - -### Spawning Remote Processes - -In order to spawn a process on a node we need something of type `Closure (Process ())`. -In distributed-process if `f : T1 -> T2` then - -{% highlight haskell %} - $(mkClosure 'f) :: T1 -> Closure T2 -{% endhighlight %} - -That is, the first argument the function we pass to mkClosure will act as the closure -environment for that process; if you want multiple values in the closure environment, -you must tuple them up. - -In order to spawn a process remotely we will need to configure the remote table -(see the documentation for more details) and the easiest way to do this, is to -let the library generate the relevant code for us. For example (taken from the -distributed-process-platform test suites): - -{% highlight haskell %} -sampleTask :: (TimeInterval, String) -> Process String -sampleTask (t, s) = sleep t >> return s - -$(remotable ['sampleTask]) -{% endhighlight %} - -We can now create a closure environment for `sampleTask` like so: - -{% highlight haskell %} -($(mkClosure 'sampleTask) (seconds 2, "foobar")) -{% endhighlight %} - -The call to `remotable` generates a remote table and generates a definition -`__remoteTable :: RemoteTable -> RemoteTable` in our module for us. We can -compose this with other remote tables in order to come up with a final, merged -remote table for use in our program: - -{% highlight haskell %} -myRemoteTable :: RemoteTable -myRemoteTable = Main.__remoteTable initRemoteTable - -main :: IO () -main = do - localNode <- newLocalNode transport myRemoteTable - -- etc -{% endhighlight %} - ------- - -[1]: /static/doc/distributed-process/Control-Distributed-Process.html#v:Message -[2]: http://hackage.haskell.org/package/distributed-process -[3]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async.html -[4]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.htmlv:callAsync -[Match]: http://hackage.haskell.org/packages/archive/distributed-process/latest/doc/html/Control-Distributed-Process-Internal-Primitives.html#t:Match diff --git a/tutorials/tutorial3.md b/tutorials/tutorial3.md deleted file mode 100644 index ca2572e..0000000 --- a/tutorials/tutorial3.md +++ /dev/null @@ -1,389 +0,0 @@ ---- -layout: tutorial3 -categories: tutorial -title: Managed Process Tutorial ---- - -### Introduction - -The source code on which this tutorial is based is kept on github, -and can be accessed [here][1]. Please note that this tutorial is -based on the stable (master) branch of distributed-process-platform. - -The main idea behind `ManagedProcess` is to separate the functional -and non-functional aspects of a process. By functional, we mean whatever -application specific task the process performs, and by non-functional -we mean the *concurrency* or, more precisely, handling of the process' -mailbox. - -Another effect that `ManagedProcess` has is to provide client code -with a typed, specific API for interacting with the process, much as -a TypedChannel does. We achieve this by writing and exporting functions -that operate on the types we want clients to see, and using the API -from `Control.Distributed.Process.Platform.ManagedProcess.Client` to -interact with the server. - -Let's imagine we want to execute tasks on an arbitrary node, using a -mechanism much as we would with the `call` API from distributed-process. -As with `call`, we want the caller to block whilst the remote task is -executing, but we also want to put an upper bound on the number of -concurrent tasks. We will use `ManagedProcess` to implement a generic -task server with the following characteristics - -* requests to enqueue a task are handled immediately -* callers will block until the task completes (or fails) -* an upper bound is placed on the number of concurrent running tasks - -Once the upper bound is reached, tasks will be queued up for later -execution, and only when we drop below the limit will tasks be taken -from the backlog and executed. - -`ManagedProcess` provides a basic protocol for *server-like* processes -such as this, based on the synchronous `call` and asynchronous `cast` -functions used by code we provide to client clients and matching -*handler* functions in the process itself, for which there is a similar -API on the *server*. Although `call` is a synchronous protocol, -communication with the *server process* is out of band, both from the -client and the server's point of view. The server implementation chooses -whether to reply to a call request immediately, or defer its reply until -a later stage and go back to receiving other messages in the meanwhile. - -### Implementing the client - -Before we figure out the shape of our state, let's think about the types -we'll need to consume in the server process: the tasks we perform and the -maximum pool size. - -{% highlight haskell %} -type PoolSize = Int -type SimpleTask a = Closure (Process a) -{% endhighlight %} - -To submit a task, our clients will submit an action in the process -monad, wrapped in a `Closure` environment. We will use the `Addressable` -typeclass to allow clients to specify the server's location in whatever -manner suits them: - -{% highlight haskell %} --- enqueues the task in the pool and blocks --- the caller until the task is complete -executeTask :: forall s a . (Addressable s, Serializable a) - => s - -> Closure (Process a) - -> Process (Either String a) -executeTask sid t = call sid t -{% endhighlight %} - -That's it for the client! Note that the type signature we expose to -our consumers is specific, and that we do not expose them to either -arbitrary messages arriving in their mailbox or to exceptions being -thrown in their thread. Instead we return an `Either`. - -There are several varieties of the `call` API that deal with error -handling in different ways. Consult the haddocks for more info about -these. - -### Implementing the server - -Back on the server, we write a function that takes our state and an -input message - in this case, the `Closure` we've been sent - and -have that update the process' state and possibility launch the task -if we have enough spare capacity. - -{% highlight haskell %} -data Pool a = Pool a -{% endhighlight %} - -I've called the state type `Pool` as we're providing a fixed size resource -pool from the consumer's perspective. We could think of this as a bounded -size latch or barrier of sorts, but that conflates the example a bit too -much. We parameterise the state by the type of data that can be returned -by submitted tasks. - -The updated pool must store the task **and** the caller (so we can reply -once the task is complete). The `ManagedProcess.Server` API will provide us -with a `Recipient` value which can be used to reply to the caller at a later -time, so we'll make use of that here. - -{% highlight haskell %} -acceptTask :: Serializable a - => Pool a - -> Recipient - -> Closure (Process a) - -> Process (Pool a) -{% endhighlight %} - -For our example we will avoid using even vaguely exotic types to manage our -process' internal state, and stick to simple property lists. This is hardly -efficient, but that's fine for a test/demo. - -{% highlight haskell %} -data Pool a = Pool { - poolSize :: PoolSize - , accepted :: [(Recipient, Closure (Process a))] - } deriving (Typeable) -{% endhighlight %} - -### Making use of Async - -So **how** can we execute this `Closure (Process a)` without blocking the server -process itself? We will use the `Control.Distributed.Process.Platform.Async` API -to execute the task asynchronously and provide a means for waiting on the result. - -In order to use the `Async` handle to get the result of the computation once it's -complete, we'll have to hang on to a reference. We also need a way to associate the -submitter with the handle, so we end up with one field for the active (running) -tasks and another for the queue of accepted (but inactive) ones, like so... - -{% highlight haskell %} -data Pool a = Pool { - poolSize :: PoolSize - , active :: [(Recipient, Async a)] - , accepted :: [(Recipient, Closure (Process a))] - } deriving (Typeable) -{% endhighlight %} - -To turn that `Closure` environment into a thunk we can evaluate, we'll use the -built in `unClosure` function, and we'll pass the thunk to `async` and get back -a handle to the async task. - -{% highlight haskell %} -proc <- unClosure task' -asyncHandle <- async proc -{% endhighlight %} - -Of course, we decided that we wouldn't block on each `Async` handle, and we're not -able to sit in a *loop* polling all the handles representing tasks we're running, -because no submissions would be handled whilst spinning and waiting for results. -We're relying on monitors instead, so we need to store the `MonitorRef` so we know -which monitor signal relates to which async task (and recipient). - -{% highlight haskell %} -data Pool a = Pool { - poolSize :: PoolSize - , active :: [(MonitorRef, Recipient, Async a)] - , accepted :: [(Recipient, Closure (Process a))] - } deriving (Typeable) -{% endhighlight %} - -Finally we can implement the `acceptTask` function. - -{% highlight haskell %} -acceptTask :: Serializable a - => Pool a - -> Recipient - -> Closure (Process a) - -> Process (Pool a) -acceptTask s@(Pool sz' runQueue taskQueue) from task' = - let currentSz = length runQueue - in case currentSz >= sz' of - True -> do - return $ s { accepted = ((from, task'):taskQueue) } - False -> do - proc <- unClosure task' - asyncHandle <- async proc - ref <- monitorAsync asyncHandle - taskEntry <- return (ref, from, asyncHandle) - return s { active = (taskEntry:runQueue) } -{% endhighlight %} - -If we're at capacity, we add the task (and caller) to the `accepted` queue, -otherwise we launch and monitor the task using `async` and stash the monitor -ref, caller ref and the async handle together in the `active` field. Prepending -to the list of active/running tasks is a somewhat arbitrary choice. One might -argue that heuristically, the younger a task is the less likely it is that it -will run for a long time. Either way, I've done this to avoid cluttering the -example other data structures, so we can focus on the `ManagedProcess` APIs -only. - -Now we will write a function that handles the results. When the monitor signal -arrives, we use the async handle to obtain the result and send it back to the caller. -Because, even if we were running at capacity, we've now seen a task complete (and -therefore reduce the number of active tasks by one), we will also pull off a pending -task from the backlog (i.e., accepted), if any exists, and execute it. As with the -active task list, we're going to take from the backlog in FIFO order, which is -almost certainly not what you'd want in a real application, but that's not the -point of the example either. - -The steps then, are - -1. find the async handle for the monitor ref -2. pull the result out of it -3. send the result to the client -4. bump another task from the backlog (if there is one) -5. carry on - -This chain then, looks like `wait h >>= respond c >> bump s t >>= continue`. - -Item (3) requires special API support from `ManagedProcess`, because we're not -just sending *any* message back to the caller. We're replying to a `call` -that has already taken place and is, in fact, still running. The API call for -this is `replyTo`. - -{% highlight haskell %} -taskComplete :: forall a . Serializable a - => Pool a - -> ProcessMonitorNotification - -> Process (ProcessAction (Pool a)) -taskComplete s@(Pool _ runQ _) - (ProcessMonitorNotification ref _ _) = - let worker = findWorker ref runQ in - case worker of - Just t@(_, c, h) -> wait h >>= respond c >> bump s t >>= continue - Nothing -> continue s - where - respond :: Recipient - -> AsyncResult a - -> Process () - respond c (AsyncDone r) = replyTo c ((Right r) :: (Either String a)) - respond c (AsyncFailed d) = replyTo c ((Left (show d)) :: (Either String a)) - respond c (AsyncLinkFailed d) = replyTo c ((Left (show d)) :: (Either String a)) - respond _ _ = die $ TerminateOther "IllegalState" - - bump :: Pool a -> (MonitorRef, Recipient, Async a) -> Process (Pool a) - bump st@(Pool _ runQueue acc) worker = - let runQ2 = deleteFromRunQueue worker runQueue in - case acc of - [] -> return st { active = runQ2 } - ((tr,tc):ts) -> acceptTask (st { accepted = ts, active = runQ2 }) tr tc - -findWorker :: MonitorRef - -> [(MonitorRef, Recipient, Async a)] - -> Maybe (MonitorRef, Recipient, Async a) -findWorker key = find (\(ref,_,_) -> ref == key) - -deleteFromRunQueue :: (MonitorRef, Recipient, Async a) - -> [(MonitorRef, Recipient, Async a)] - -> [(MonitorRef, Recipient, Async a)] -deleteFromRunQueue c@(p, _, _) runQ = deleteBy (\_ (b, _, _) -> b == p) c runQ -{% endhighlight %} - -That was pretty simple. We've deal with mapping the `AsyncResult` to `Either` values, -which we *could* have left to the caller, but this makes the client facing API much -simpler to work with. - -### Wiring up handlers - -The `ProcessDefinition` takes a number of different kinds of handler. The only ones -we care about are the call handler for submission handling, and the handler that -deals with monitor signals. - -Call and cast handlers live in the `apiHandlers` list of a `ProcessDefinition` and -must have the type `Dispatcher s` where `s` is the state type for the process. We -cannot construct a `Dispatcher` ourselves, but a range of functions in the -`ManagedProcess.Server` module exist to lift functions like the ones we've just -defined. The particular function we need is `handleCallFrom`, which works with -functions over the state, `Recipient` and the call data/message. All the varieties -of `handleCall` need to return a `ProcessReply`, which has the following type - -{% highlight haskell %} -data ProcessReply s a = - ProcessReply a (ProcessAction s) - | NoReply (ProcessAction s) -{% endhighlight %} - -There are also various utility function in the API to construct a `ProcessAction` -and we will make use of `noReply_` here, which constructs `NoReply` for us and -presets the `ProcessAction` to `ProcessContinue`, which goes back to receiving -messages without further action. We already have a function over the right input -domain which evaluates to a new state so we end up with: - -{% highlight haskell %} -storeTask :: Serializable a - => Pool a - -> Recipient - -> Closure (Process a) - -> Process (ProcessReply (Pool a) ()) -storeTask s r c = acceptTask s r c >>= noReply_ -{% endhighlight %} - -In order to spell things out for the compiler, we need to put a type signature -in place at the call site too, so our final construct is - -{% highlight haskell %} -handleCallFrom (\s f (p :: Closure (Process a)) -> storeTask s f p) -{% endhighlight %} - -No such thing is required for `taskComplete`, as there's no ambiguity about its -type. Our process definition is finished, and here it is: - -{% highlight haskell %} -poolServer :: forall a . (Serializable a) => ProcessDefinition (Pool a) -poolServer = - defaultProcess { - apiHandlers = [ - handleCallFrom (\s f (p :: Closure (Process a)) -> storeTask s f p) - ] - , infoHandlers = [ - handleInfo taskComplete - ] - } :: ProcessDefinition (Pool a) -{% endhighlight %} - -Starting the pool is fairly simple and `ManagedProcess` has some utilities to help. - -{% highlight haskell %} -simplePool :: forall a . (Serializable a) - => PoolSize - -> ProcessDefinition (Pool a) - -> Process (Either (InitResult (Pool a)) TerminateReason) -simplePool sz server = start sz init' server - where init' :: PoolSize -> Process (InitResult (Pool a)) - init' sz' = return $ InitOk (Pool sz' [] []) Infinity -{% endhighlight %} - -### Putting it all together - -Starting up a pool locally or on a remote node is just a matter of using `spawn` -or `spawnLocal` with `simplePool`. The second argument should specify the type of -results, e.g., - -{% highlight haskell %} -let s' = poolServer :: ProcessDefinition (Pool String) -in simplePool s s' -{% endhighlight %} - -Defining tasks is as simple as making them remote-worthy: - -{% highlight haskell %} -sampleTask :: (TimeInterval, String) -> Process String -sampleTask (t, s) = sleep t >> return s - -$(remotable ['sampleTask]) -{% endhighlight %} - -And executing them is just as simple too. Given a pool which has been registered -locally as "mypool", we can simply call it directly: - -{% highlight haskell %} -job <- return $ ($(mkClosure 'sampleTask) (seconds 2, "foobar")) -call "mypool" job >>= wait >>= stash result -{% endhighlight %} - -Hopefully this has demonstrated a few benefits of the `ManagedProcess` API, although -it's really just scratching the surface. We have focussed on the code that matters - -state transitions and decision making, without getting bogged down (much) with receiving -or sending messages, apart from using some simple APIs when we needed to. - -### Performance Considerations - -We did not take much care over our choice of data structures. Might this have profound -consequences for clients? The LIFO nature of the pending backlog is surprising, but -we can change that quite easily by changing data structures. - -What's perhaps more of a concern is the cost of using `Async` everywhere - remember -we used this in the *server* to handle concurrently executing tasks and obtaining -their results. The `Async` module is also used by `ManagedProcess` to handle the -`call` mechanism, and there *are* some overheads to using it. An invocation of -`async` will create two new processes: one to perform the calculation and another -to monitor the first and handle failure and/or cancellation. Spawning processes is -cheap, but not free as each process is a haskell thread, plus some additional book -keeping data. - -The cost of spawning two processes for each computation/task might represent just that -bit too much overhead for some applications. In our next tutorial, we'll look at the -`Control.Distributed.Process.Platform.Task` API, which looks a lot like `Async` but -manages exit signals in a single thread and makes configurable task pools and task -supervision strategy part of its API. - -[1]: https://github.com/haskell-distributed/distributed-process-platform/blob/master/tests/SimplePool.hs From 09e23c03861f3accee76f44c3447614f0c95fe81 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Thu, 6 Feb 2014 19:44:18 +0000 Subject: [PATCH 007/108] Finish off the managed process tutorial --- tutorials/ch-tutorial4.md | 430 ++++++++++++++++++++------------------ 1 file changed, 232 insertions(+), 198 deletions(-) diff --git a/tutorials/ch-tutorial4.md b/tutorials/ch-tutorial4.md index 9618deb..ed1ac1f 100644 --- a/tutorials/ch-tutorial4.md +++ b/tutorials/ch-tutorial4.md @@ -214,7 +214,7 @@ printSum sid = cast sid . Add launchMathServer :: Process ProcessId launchMathServer = let server = statelessProcess { - apiHandlers = [ handleRpcChan_ (\chan (Add x y) -> sendChan chan (x + y)) + apiHandlers = [ handleRpcChan_ (\chan (Add x y) -> sendChan chan (x + y) >> continue_) , handleCast_ (\(Add x y) -> liftIO $ putStrLn $ show (x + y) >> continue_) ] , unhandledMessagePolicy = Drop } @@ -300,143 +300,152 @@ since the type is opaque. In terms of code for the client then, that's all there is to it! Note that the type signature we expose to our consumers is specific, and that -we do not expose them to either arbitrary messages arriving in their mailbox -or to exceptions being thrown in their thread. Instead we return an `Either`. -One very important thing about this approach is that if the server replies -with some other type (i.e., a type other than `Either String a`) then our -client will be blocked indefinitely! We could alleviate this by using a -typed channel as we saw previously with our math server, but there's little -point since we're in total charge of both client and server. +we do not expose them to arbitrary messages arriving in their mailbox. Note +that if a `call` fails, a `ProcessExitException` will be thrown in the caller's +thread (since the implementation calls `die` if it detects that the server has +died before replying). Other variations of `call` exist that return a `Maybe` or +an `Either ExitReason a` instead of making the caller's process exit. + +Note that if the server replies to this call with some other type (i.e., a type +other than `Either String a`) then our client will be blocked indefinitely! +We could alleviate this by using a typed channel as we saw previously with our +math server, but there's little point since we're in total charge of both the +client and the server's code. ### Implementing the server -Back on the server, we write a function that takes our state and an -input message - in this case, the `Closure` we've been sent - and -have that update the process' state and possibility launch the task -if we have enough spare capacity. +To implement the server, we'll need to hang on to some internal state. As well as +knowing our queue's size limit, we will need to track the _active_ tasks we're +currently running. Each task will be submitted as a `Closure (Process a)` and we'll +need to spawn the task (asynchronously), handle the result (once the closure has +run to completion) and communicate the result (or failure) to the original caller. -{% highlight haskell %} -data Pool a = Pool a -{% endhighlight %} - -I've called the state type `Pool` as we're providing a fixed size resource -pool from the consumer's perspective. We could think of this as a bounded -queue, latch or barrier of sorts, but that conflates the example a bit too -much. We parameterise the state by the type of data that can be returned -by submitted tasks. - -The updated pool must store the task **and** the caller (so we can reply -once the task is complete). The `ManagedProcess.Server` API will provide us -with a `Recipient` value which can be used to reply to the caller at a later -time, so we'll make use of that here. +This means our pool state will need to be parameterised by the result type it will +accept in its closures. So now we have the beginnings of our state type: {% highlight haskell %} -acceptTask :: Serializable a - => Pool a - -> Recipient - -> Closure (Process a) - -> Process (Pool a) +data BlockingQueue a = BlockingQueue {% endhighlight %} -For our example we will avoid using even vaguely exotic types to manage our -process' internal state, and stick to simple property lists. This is hardly -efficient, but that's fine for a test/demo. - -{% highlight haskell %} -data Pool a = Pool { - poolSize :: PoolSize - , accepted :: [(Recipient, Closure (Process a))] - } deriving (Typeable) -{% endhighlight %} - -Given a pool of closures, we must now work out how to execute them -on the caller's behalf. - ### Making use of Async So **how** can we execute this `Closure (Process a)` without blocking the server -process itself? We will use the `Control.Distributed.Process.Platform.Async` API +process itself? We can use the `Control.Distributed.Process.Platform.Async` API to execute each task asynchronously and provide a means for waiting on the result. -In order to use the `Async` handle to get the result of the computation once it's +In order to use an `Async` handle to get the result of the computation once it's complete, we'll have to hang on to a reference. We also need a way to associate the submitter with the handle, so we end up with one field for the active (running) -tasks and another for the queue of accepted (but inactive) ones, like so... +tasks and another for the queue of accepted (but inactive) ones, as expected. + +Since we cannot wait on all these `Async` handles at once whilst we're supposed to +be accepting new messages from clients - actually, /async/ does provide an API for +multiplexing on async results, but that's no use here - instead we will monitor the +async tasks and pull the results when we receive their monitor signals. So for the +active tasks, we'll need to store a `MonitorRef` and a reference to the original +caller, plus the async handle itself. We'll use a simple association list for this +state, though we should probably use a more optimal data structure eventually. + +For the tasks that we cannot execute immediately (i.e., when we reach the queue's +size limit), we hold the client ref and the closure, but no monitor ref. We'll +use a data structure that support FIFO ordering semantics for this, since that's +probably what clients will expect of something calling itself a "queue". {% highlight haskell %} -data Pool a = Pool { - poolSize :: PoolSize - , active :: [(Recipient, Async a)] - , accepted :: [(Recipient, Closure (Process a))] - } deriving (Typeable) +data BlockingQueue a = BlockingQueue { + poolSize :: SizeLimit + , active :: [(MonitorRef, CallRef (Either ExitReason a), Async a)] + , accepted :: Seq (CallRef (Either ExitReason a), Closure (Process a)) + } {% endhighlight %} -To turn that `Closure` environment into a thunk we can evaluate, we'll use the -built in `unClosure` function, and we'll pass the thunk to `async` and get back -a handle to the async task. +Our queue-like behaviour is fairly simple to define using `Data.Sequence`: {% highlight haskell %} -proc <- unClosure task' -asyncHandle <- async proc +enqueue :: Seq a -> a -> Seq a +enqueue s a = a <| s + +dequeue :: Seq a -> Maybe (a, Seq a) +dequeue s = maybe Nothing (\(s' :> a) -> Just (a, s')) $ getR s + +getR :: Seq a -> Maybe (ViewR a) +getR s = + case (viewr s) of + EmptyR -> Nothing + a -> Just a {% endhighlight %} -Of course, we decided not to block on each `Async` handle, and we can't sit -in a *loop* polling all the handles representing tasks we're running -(since no submissions would be handled whilst we're spinning waiting -for results). Instead we rely on monitors instead, so we must store a -`MonitorRef` in order to know which monitor signal relates to which -async task (and recipient). + +Now, to turn that `Closure` environment into a thunk we can evaluate, we'll use the +built in `unClosure` function, and we'll pass the thunk to `async` and get back +a handle to the running async task, which we'll then need to monitor. We won't cover +the async API in detail here, except to point out that the call to `async` spawns a +new process to do the actual work and returns a handle that we can use to query for +the result. {% highlight haskell %} -data Pool a = Pool { - poolSize :: PoolSize - , active :: [(MonitorRef, Recipient, Async a)] - , accepted :: [(Recipient, Closure (Process a))] - } deriving (Typeable) +proc <- unClosure task' +asyncHandle <- async proc +ref <- monitorAsync asyncHandle {% endhighlight %} -Finally we can implement the `acceptTask` function. +We can now implement the `acceptTask` function, which the server will use to handle +submitted tasks. The signature of our function must be compatible with the message +handling API from `ManagedProcess` that we're going to use it with - in this case +`handleCallFrom`. This variant of the `handleCall` family of functions is specifically +intended for use when the server is going to potentially delay its reply, rather than +replying immediately. It takes an expression that operates over our server's state, a +`CallRef` that uniquely identifies the caller and can be used to reply to them later +on and the message that was sent to the server - in this case, a `Closure (Process a)`. + +All managed process handler functions must return either a `ProcessAction`, indicating +how the server should proceed, or a `ProcessReply`, which combines a `ProcessAction` +with a possible reply to one of the `call` derivatives. Since we're deferring our reply +until later, we will use `noReply_`, which creates a `ProcessAction` for us, telling +the server to continue receiving messages. {% highlight haskell %} +storeTask :: Serializable a + => BlockingQueue a + -> CallRef (Either ExitReason a) + -> Closure (Process a) + -> Process (ProcessReply (Either ExitReason a) (BlockingQueue a)) +storeTask s r c = acceptTask s r c >>= noReply_ + acceptTask :: Serializable a - => Pool a - -> Recipient + => BlockingQueue a + -> CallRef (Either ExitReason a) -> Closure (Process a) - -> Process (Pool a) -acceptTask s@(Pool sz' runQueue taskQueue) from task' = + -> Process (BlockingQueue a) +acceptTask s@(BlockingQueue sz' runQueue taskQueue) from task' = let currentSz = length runQueue in case currentSz >= sz' of True -> do - return $ s { accepted = ((from, task'):taskQueue) } + return $ s { accepted = enqueue taskQueue (from, task') } False -> do proc <- unClosure task' asyncHandle <- async proc ref <- monitorAsync asyncHandle - taskEntry <- return (ref, from, asyncHandle) + let taskEntry = (ref, from, asyncHandle) return s { active = (taskEntry:runQueue) } {% endhighlight %} If we're at capacity, we add the task (and caller) to the `accepted` queue, otherwise we launch and monitor the task using `async` and stash the monitor -ref, caller ref and the async handle together in the `active` field. Prepending -to the list of active/running tasks is a somewhat arbitrary choice. One might -argue that heuristically, the younger a task is the less likely it is that it -will run for a long time. Either way, I've done this to avoid cluttering the -example with data structures, so we can focus on the `ManagedProcess` APIs. - -Now we will write a function that handles the results. When a monitor signal -arrives, we lookup an async handle that we can use to obtain the result -and send it back to the caller. Because, even if we were running at capacity, -we've now seen a task complete (and therefore reduced the number of active tasks -by one), we will also pull off a pending task from the backlog (i.e., accepted), -if any exists, and execute it. As with the active task list, we're going to -take from the backlog in FIFO order, which is almost certainly not what you'd want -in a real application, but that's not the point of the example either. +ref, caller ref and the async handle together in the `active` field. + +Now we must write a function that handles the results of these closures. When +a monitor signal arrives in our mailbox, we need to lookup the async handle +associated with it so as to obtain the result and send it back to the caller. +Because, even if we were running at capacity, we've now seen a task complete +(and therefore reduced the number of active tasks by one), we will also pull +off a pending task from the backlog (i.e., accepted), if any exists, and execute +it. The steps then, are -1. find the async handle for the monitor ref -2. pull the result out of it +1. find the async handle for our monitor ref +2. obtain the result using the handle 3. send the result to the client 4. bump another task from the backlog (if there is one) 5. carry on @@ -448,86 +457,77 @@ just sending *any* message back to the caller. We're replying to a specific `cal that has taken place and is, from the client's perspective, still running. The `ManagedProcess` API call for this is `replyTo`. +There is quite a bit of code in this next function, which we'll look at in detail. +Firstly, note that the signature is similar to the one we used for `storeTask`, but +returns just a `ProcessAction` instead of `ProcessReply`. This function will not be +wired up to a `call` (or even a `cast`), because the node controller will send the +monitor signal directly to our mailbox, not using the managed process APIs at all. +This kind of client interaction is called an _info call_ in the managed process API, +and since there's no expected reply, as with `cast`, we simply return a `ProcessAction` +telling the server what to do next - in this case, to `continue` reading from the +mailbox. + {% highlight haskell %} taskComplete :: forall a . Serializable a - => Pool a + => BlockingQueue a -> ProcessMonitorNotification - -> Process (ProcessAction (Pool a)) -taskComplete s@(Pool _ runQ _) + -> Process (ProcessAction (BlockingQueue a)) +taskComplete s@(BlockingQueue _ runQ _) (ProcessMonitorNotification ref _ _) = let worker = findWorker ref runQ in case worker of Just t@(_, c, h) -> wait h >>= respond c >> bump s t >>= continue Nothing -> continue s - where - respond :: Recipient - -> AsyncResult a - -> Process () - respond c (AsyncDone r) = replyTo c ((Right r) :: (Either String a)) - respond c (AsyncFailed d) = replyTo c ((Left (show d)) :: (Either String a)) - respond c (AsyncLinkFailed d) = replyTo c ((Left (show d)) :: (Either String a)) - respond _ _ = die $ TerminateOther "IllegalState" - - bump :: Pool a -> (MonitorRef, Recipient, Async a) -> Process (Pool a) - bump st@(Pool _ runQueue acc) worker = - let runQ2 = deleteFromRunQueue worker runQueue in - case acc of - [] -> return st { active = runQ2 } - ((tr,tc):ts) -> acceptTask (st { accepted = ts, active = runQ2 }) tr tc + + where + respond :: CallRef (Either ExitReason a) + -> AsyncResult a + -> Process () + respond c (AsyncDone r) = replyTo c ((Right r) :: (Either ExitReason a)) + respond c (AsyncFailed d) = replyTo c ((Left (ExitOther $ show d)) :: (Either ExitReason a)) + respond c (AsyncLinkFailed d) = replyTo c ((Left (ExitOther $ show d)) :: (Either ExitReason a)) + respond _ _ = die $ ExitOther "IllegalState" + + bump :: BlockingQueue a + -> (MonitorRef, CallRef (Either ExitReason a), Async a) + -> Process (BlockingQueue a) + bump st@(BlockingQueue _ runQueue acc) worker = + let runQ2 = deleteFromRunQueue worker runQueue + accQ = dequeue acc in + case accQ of + Nothing -> return st { active = runQ2 } + Just ((tr,tc), ts) -> acceptTask (st { accepted = ts, active = runQ2 }) tr tc findWorker :: MonitorRef - -> [(MonitorRef, Recipient, Async a)] - -> Maybe (MonitorRef, Recipient, Async a) + -> [(MonitorRef, CallRef (Either ExitReason a), Async a)] + -> Maybe (MonitorRef, CallRef (Either ExitReason a), Async a) findWorker key = find (\(ref,_,_) -> ref == key) -deleteFromRunQueue :: (MonitorRef, Recipient, Async a) - -> [(MonitorRef, Recipient, Async a)] - -> [(MonitorRef, Recipient, Async a)] +deleteFromRunQueue :: (MonitorRef, CallRef (Either ExitReason a), Async a) + -> [(MonitorRef, CallRef (Either ExitReason a), Async a)] + -> [(MonitorRef, CallRef (Either ExitReason a), Async a)] deleteFromRunQueue c@(p, _, _) runQ = deleteBy (\_ (b, _, _) -> b == p) c runQ {% endhighlight %} -That was pretty simple. We've dealt with mapping the `AsyncResult` to `Either` values, -which we *could* have left to the caller, but this makes the client facing API much -simpler to work with. +We've dealt with mapping the `AsyncResult` to `Either` values, which we *could* have +left to the caller, but this makes the client facing API much simpler to work with. +Note that our use of an association list for the active _run queue_ makes for an +O(n) search for our worker, but that can be optimised with a map or dictionary later. +Worse, we have to scan the list again when deleting the worker from the _run queue_, +but the same fix (using a Map) should alleviate that problem too. We leave that as +an exercise for the reader. ### Wiring up handlers -The `ProcessDefinition` takes a number of different kinds of handler. The only ones -_we_ care about are the call handler for submissions, and the handler that -deals with monitor signals. TODO: THIS DOES NOT READ WELL - Call and cast handlers live in the `apiHandlers` list of our `ProcessDefinition` and have the type `Dispatcher s` where `s` is the state type for the process. We cannot construct a `Dispatcher` ourselves, but a range of functions in the -`ManagedProcess.Server` module exist to lift functions like the ones we've just -defined, to the correct type. The particular function we need is `handleCallFrom`, -which works with functions over the state, `Recipient` and call data/message. -All varieties of `handleCall` need to return a `ProcessReply`, which has the -following type: - -{% highlight haskell %} -data ProcessReply s a = - ProcessReply a (ProcessAction s) - | NoReply (ProcessAction s) -{% endhighlight %} - -Again, various utility functions are defined by the API for constructing a -`ProcessAction` and we make use of `noReply_` here, which constructs `NoReply` -for us and presets the `ProcessAction` to `continue`, which goes back to -receiving messages from clients. We already have a function over our input domain, -which evaluates to a new state, so we end up with: - -{% highlight haskell %} -storeTask :: Serializable a - => Pool a - -> Recipient - -> Closure (Process a) - -> Process (ProcessReply (Pool a) ()) -storeTask s r c = acceptTask s r c >>= noReply_ -{% endhighlight %} +`ManagedProcess.Server` module exist to convert functions like the ones we've just +defined, to the correct type. In order to spell things out for the compiler, we need to put a type signature -in place at the call site too, so our final construct is +in place at the call site for `storeTask`, so our final construct for that +handler is thus: {% highlight haskell %} handleCallFrom (\s f (p :: Closure (Process a)) -> storeTask s f p) @@ -537,48 +537,44 @@ No such thing is required for `taskComplete`, as there's no ambiguity about its type. Our process definition is now finished, and here it is: {% highlight haskell %} -poolServer :: forall a . (Serializable a) => ProcessDefinition (Pool a) -poolServer = - defaultProcess { - apiHandlers = [ - handleCallFrom (\s f (p :: Closure (Process a)) -> storeTask s f p) - ] - , infoHandlers = [ - handleInfo taskComplete - ] - } :: ProcessDefinition (Pool a) +defaultProcess { + apiHandlers = [ + handleCallFrom (\s f (p :: Closure (Process a)) -> storeTask s f p) + , handleCall poolStatsRequest + ] + , infoHandlers = [ handleInfo taskComplete ] + } {% endhighlight %} -Starting the pool is simple: `ManagedProcess` provides several utility functions -to help with spawning and running processes. -The `start` function takes an _initialising_ thunk, which must generate the initial -state and per-call timeout settings, then the process definition which we've already -encountered. +Starting the server takes a bit of work: `ManagedProcess` provides several +utility functions to help with spawning and running processes. The `serve` +function takes an _initialising_ thunk (which has the type `InitHandler`) +that must generate the initial state and set up the server's receive timeout, +then the process definition which we've already encountered. For more details +about starting managed processes, see the haddocks. {% highlight haskell %} -simplePool :: forall a . (Serializable a) - => PoolSize - -> ProcessDefinition (Pool a) - -> Process (Either (InitResult (Pool a)) TerminateReason) -simplePool sz server = start sz init' server - where init' :: PoolSize -> Process (InitResult (Pool a)) - init' sz' = return $ InitOk (emptyPool sz') Infinity - - emptyPool :: Int -> Pool a - emptyPool s = Pool s [] [] +run :: forall a . (Serializable a) + => Process (InitResult (BlockingQueue a)) + -> Process () +run init' = ManagedProcess.serve () (\() -> init') poolServer + where poolServer = + defaultProcess { + apiHandlers = [ + handleCallFrom (\s f (p :: Closure (Process a)) -> storeTask s f p) + , handleCall poolStatsRequest + ] + , infoHandlers = [ handleInfo taskComplete ] + } :: ProcessDefinition (BlockingQueue a) + +pool :: forall a . Serializable a + => SizeLimit + -> Process (InitResult (BlockingQueue a)) +pool sz' = return $ InitOk (BlockingQueue sz' [] Seq.empty) Infinity {% endhighlight %} ### Putting it all together -Starting up a pool locally or on a remote node is just a matter of using `spawn` -or `spawnLocal` with `simplePool`. The second argument should add specificity to -the type of results the process definition operates on, e.g., - -{% highlight haskell %} -let svr' = poolServer :: ProcessDefinition (Pool String) -in simplePool s svr' -{% endhighlight %} - Defining tasks is as simple as making them remote-worthy: {% highlight haskell %} @@ -588,14 +584,56 @@ sampleTask (t, s) = sleep t >> return s $(remotable ['sampleTask]) {% endhighlight %} -And executing them is just as simple too. Given a pool which has been registered -locally as "mypool": +And executing them is just as simple too. {% highlight haskell %} tsk <- return $ ($(mkClosure 'sampleTask) (seconds 2, "foobar")) -executeTask "mypool" tsk +executeTask taskQueuePid tsk +{% endhighlight %} + +Starting up the server itself locally or on a remote node, is just a matter of +combining `spawn` or `spawnLocal` with `start`. We can go a step further though, +and add a bit more type safety to our API by using an opaque handle to communicate +with the server. The advantage of this is that it right now it is possible for +a client to send a `Closure` to the server with a return type different from the +one the server is expecting! Since the server won't recognise that message, the +`unhandledMessagePolicy` will be applied, which by default crashes the server with +an exit reason referring to "unhandled inputs"! + +By returning a handle to the server using a parameterised type, we can ensure that +only closures returning a matching type are sent. To do so, we use a phantom type +parameter and simply stash the real `ProcessId` in a newtype. We also need to be +able to pass this handle to the managed process `call` API, so we define an +instance of the `Resolvable` typeclass for it, which makes a (default) instance of +`Routable` available, which is exactly what `call` is expecting: + +{% highlight haskell %} +newtype TaskQueue a = TaskQueue { unQueue :: ProcessId } + +instance Resolvable (TaskQueue a) where + resolve = return . unQueue {% endhighlight %} +Finally, we write a `start` function that returns this handle and change the +signature of `executeTask` to match it: + +{% highlight haskell %} +start :: forall a . (Serializable a) + => SizeLimit + -> Process (TaskQueue a) +start lim = spawnLocal (start $ pool lim) >>= return . TaskQueue + +-- ....... + +executeTask :: (Serializable a) + => TaskQueue a + -> Closure (Process a) + -> Process (Either ExitReason a) +executeTask sid t = call sid t +{% endhighlight %} + +---------- + In this tutorial, we've really just scratched the surface of the `ManagedProcess` API. By handing over control of the client/server protocol to the framework, we are able to focus on the code that matters, such as state transitions and decision @@ -604,17 +642,13 @@ receiving messages, handling client/server failures and such like. ### Performance Considerations -We did not take much care over our choice of data structures. Might this have profound -consequences for clients? The LIFO nature of the pending backlog is surprising, but -we can change that quite easily by changing data structures. In fact, the code on which -this example is based uses `Data.Sequence` to provide both strictness and FIFO -execution ordering. - -Perhaps more of a concern is the cost of using `Async` everywhere - remember -we used this in the *server* to handle concurrently executing tasks and obtaining -their results. An invocation of `async` will create two new processes: one to perform -the calculation and another to monitor the first and handle failures and/or cancellation. -Spawning processes is cheap, but not free as each process is a haskell thread, plus -some additional book keeping data. +We did not take much care over our choice of data structures. Might this have +profound consequences for clients? Perhaps more of a concern is the cost of +using `Async` everywhere - remember we used this in the *server* to handle +concurrently executing tasks and obtaining their results. An invocation of +`async` will create two new processes: one to perform the calculation and +another to monitor the first and handle failures and/or cancellation. Spawning +processes is cheap, but not free as each process is a haskell thread, plus some +additional book keeping data. [1]: https://github.com/haskell-distributed/distributed-process-platform/blob/master/src/Control/Distributed/Process/Platform/Task/Queue/BlockingQueue.hs From b7c904aef997bbad65a9222a53507f87c2a95fe1 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Thu, 6 Feb 2014 20:23:01 +0000 Subject: [PATCH 008/108] Tutorial sorting now works... --- _layouts/site.html | 2 +- tutorials/{ch-tutorial1.md => ch1.md} | 0 tutorials/{ch-tutorial2.md => ch2.md} | 0 tutorials/{ch-tutorial3.md => ch3.md} | 0 tutorials/{ch-tutorial4.md => ch4.md} | 0 tutorials/{ch-tutorial5.md => ch5.md} | 0 6 files changed, 1 insertion(+), 1 deletion(-) rename tutorials/{ch-tutorial1.md => ch1.md} (100%) rename tutorials/{ch-tutorial2.md => ch2.md} (100%) rename tutorials/{ch-tutorial3.md => ch3.md} (100%) rename tutorials/{ch-tutorial4.md => ch4.md} (100%) rename tutorials/{ch-tutorial5.md => ch5.md} (100%) diff --git a/_layouts/site.html b/_layouts/site.html index 7e2f86d..78e87f8 100644 --- a/_layouts/site.html +++ b/_layouts/site.html @@ -21,7 +21,7 @@ diff --git a/tutorials/ch-tutorial1.md b/tutorials/ch1.md similarity index 100% rename from tutorials/ch-tutorial1.md rename to tutorials/ch1.md diff --git a/tutorials/ch-tutorial2.md b/tutorials/ch2.md similarity index 100% rename from tutorials/ch-tutorial2.md rename to tutorials/ch2.md diff --git a/tutorials/ch-tutorial3.md b/tutorials/ch3.md similarity index 100% rename from tutorials/ch-tutorial3.md rename to tutorials/ch3.md diff --git a/tutorials/ch-tutorial4.md b/tutorials/ch4.md similarity index 100% rename from tutorials/ch-tutorial4.md rename to tutorials/ch4.md diff --git a/tutorials/ch-tutorial5.md b/tutorials/ch5.md similarity index 100% rename from tutorials/ch-tutorial5.md rename to tutorials/ch5.md From 9fb01faece01955b026f08e7ccc60d35d9de77d4 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Thu, 27 Feb 2014 04:03:36 +0000 Subject: [PATCH 009/108] Extra tutorials --- css/bootstrap.css | 4 +- documentation.md | 6 +- img/alert.png | Bin 0 -> 7306 bytes img/info.png | Bin 0 -> 2551 bytes ...uted-Process-Platform-Async-AsyncChan.html | 109 ---- ...buted-Process-Platform-Async-AsyncSTM.html | 90 --- ...ol-Distributed-Process-Platform-Async.html | 118 ---- ...rol-Distributed-Process-Platform-Call.html | 34 -- ...rocess-Platform-ManagedProcess-Client.html | 36 -- ...rocess-Platform-ManagedProcess-Server.html | 94 --- ...buted-Process-Platform-ManagedProcess.html | 287 --------- ...rol-Distributed-Process-Platform-Test.html | 17 - ...rol-Distributed-Process-Platform-Time.html | 27 - ...ol-Distributed-Process-Platform-Timer.html | 33 -- .../Control-Distributed-Process-Platform.html | 54 -- .../distributed-process-platform.haddock | Bin 134679 -> 0 bytes .../doc-index-95.html | 4 - .../doc-index-A.html | 4 - .../doc-index-All.html | 4 - .../doc-index-C.html | 4 - .../doc-index-D.html | 4 - .../doc-index-E.html | 4 - .../doc-index-F.html | 4 - .../doc-index-G.html | 4 - .../doc-index-H.html | 4 - .../doc-index-I.html | 4 - .../doc-index-K.html | 4 - .../doc-index-L.html | 4 - .../doc-index-M.html | 4 - .../doc-index-N.html | 4 - .../doc-index-P.html | 4 - .../doc-index-R.html | 4 - .../doc-index-S.html | 4 - .../doc-index-T.html | 4 - .../doc-index-U.html | 4 - .../doc-index-W.html | 4 - .../doc-index.html | 4 - .../distributed-process-platform/frames.html | 30 - .../haddock-util.js | 344 ----------- .../hslogo-16.png | Bin 1684 -> 0 bytes .../index-frames.html | 4 - .../distributed-process-platform/index.html | 8 - ...uted-Process-Platform-Async-AsyncChan.html | 9 - ...buted-Process-Platform-Async-AsyncSTM.html | 10 - ...ol-Distributed-Process-Platform-Async.html | 9 - ...rol-Distributed-Process-Platform-Call.html | 4 - ...rocess-Platform-ManagedProcess-Client.html | 5 - ...rocess-Platform-ManagedProcess-Server.html | 7 - ...buted-Process-Platform-ManagedProcess.html | 9 - ...rol-Distributed-Process-Platform-Test.html | 4 - ...rol-Distributed-Process-Platform-Time.html | 4 - ...ol-Distributed-Process-Platform-Timer.html | 4 - ..._Control-Distributed-Process-Platform.html | 8 - .../distributed-process-platform/minus.gif | Bin 56 -> 0 bytes .../distributed-process-platform/ocean.css | 546 ------------------ .../doc/distributed-process-platform/plus.gif | Bin 59 -> 0 bytes .../distributed-process-platform/synopsis.png | Bin 11327 -> 0 bytes tutorials/ch4.md | 142 +++-- tutorials/ch5.md | 21 + tutorials/ch6.md | 350 +++++++++++ 60 files changed, 455 insertions(+), 2056 deletions(-) create mode 100644 img/alert.png create mode 100644 img/info.png delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async-AsyncChan.html delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async-AsyncSTM.html delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async.html delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Call.html delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess-Client.html delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess-Server.html delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Test.html delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Time.html delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Timer.html delete mode 100644 static/doc/distributed-process-platform/Control-Distributed-Process-Platform.html delete mode 100644 static/doc/distributed-process-platform/distributed-process-platform.haddock delete mode 100644 static/doc/distributed-process-platform/doc-index-95.html delete mode 100644 static/doc/distributed-process-platform/doc-index-A.html delete mode 100644 static/doc/distributed-process-platform/doc-index-All.html delete mode 100644 static/doc/distributed-process-platform/doc-index-C.html delete mode 100644 static/doc/distributed-process-platform/doc-index-D.html delete mode 100644 static/doc/distributed-process-platform/doc-index-E.html delete mode 100644 static/doc/distributed-process-platform/doc-index-F.html delete mode 100644 static/doc/distributed-process-platform/doc-index-G.html delete mode 100644 static/doc/distributed-process-platform/doc-index-H.html delete mode 100644 static/doc/distributed-process-platform/doc-index-I.html delete mode 100644 static/doc/distributed-process-platform/doc-index-K.html delete mode 100644 static/doc/distributed-process-platform/doc-index-L.html delete mode 100644 static/doc/distributed-process-platform/doc-index-M.html delete mode 100644 static/doc/distributed-process-platform/doc-index-N.html delete mode 100644 static/doc/distributed-process-platform/doc-index-P.html delete mode 100644 static/doc/distributed-process-platform/doc-index-R.html delete mode 100644 static/doc/distributed-process-platform/doc-index-S.html delete mode 100644 static/doc/distributed-process-platform/doc-index-T.html delete mode 100644 static/doc/distributed-process-platform/doc-index-U.html delete mode 100644 static/doc/distributed-process-platform/doc-index-W.html delete mode 100644 static/doc/distributed-process-platform/doc-index.html delete mode 100644 static/doc/distributed-process-platform/frames.html delete mode 100644 static/doc/distributed-process-platform/haddock-util.js delete mode 100644 static/doc/distributed-process-platform/hslogo-16.png delete mode 100644 static/doc/distributed-process-platform/index-frames.html delete mode 100644 static/doc/distributed-process-platform/index.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async-AsyncChan.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async-AsyncSTM.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Call.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess-Client.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess-Server.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Test.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Time.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Timer.html delete mode 100644 static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform.html delete mode 100644 static/doc/distributed-process-platform/minus.gif delete mode 100644 static/doc/distributed-process-platform/ocean.css delete mode 100644 static/doc/distributed-process-platform/plus.gif delete mode 100644 static/doc/distributed-process-platform/synopsis.png create mode 100644 tutorials/ch6.md diff --git a/css/bootstrap.css b/css/bootstrap.css index 8ab3cef..5307afe 100644 --- a/css/bootstrap.css +++ b/css/bootstrap.css @@ -899,9 +899,9 @@ blockquote { blockquote p { margin-bottom: 0; - font-size: 16px; +# font-size: 16px; font-weight: 300; - line-height: 25px; +# line-height: 25px; } blockquote small { diff --git a/documentation.md b/documentation.md index 6b75977..1256419 100644 --- a/documentation.md +++ b/documentation.md @@ -331,7 +331,7 @@ Haskell concurrency design patterns along the way. In fact, [distributed-process-platform][18] does not really consider the *task layer* in great detail. We provide an API comparable to remote's -`Promise` in [Control.Distributed.Process.Platform.Async][17]. This API however, +`Promise` in Control.Distributed.Process.Platform.Async. This API however, is derived from Simon Marlow's [Control.Concurrent.Async][19] package, and is not limited to blocking queries on `Async` handles in the same way. Instead our [API][17] handles both blocking and non-blocking queries, polling @@ -516,9 +516,9 @@ TBC [14]: http://hackage.haskell.org/package/remote [15]: http://www.erlang.org/doc/design_principles/sup_princ.html [16]: http://www.erlang.org/doc/man/supervisor.html -[17]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async.html +[17]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Async.html [18]: https://github.com/haskell-distributed/distributed-process-platform [19]: http://hackage.haskell.org/package/async [20]: /wiki/networktransport.html -[21]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html +[21]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html [22]: /tutorials/tutorial3.html diff --git a/img/alert.png b/img/alert.png new file mode 100644 index 0000000000000000000000000000000000000000..fe1f4570a1882ef7766a1c5dac69d4e8a13031bc GIT binary patch literal 7306 zcmV;59ChP~P)4Tx09Xm_T4z)gO|$OJ3%lf;mz;A3$vGoANzPd^h#-=Kl2i~yl7NDO1WDpc z6c7;+L{Jm~MS_STK?Q_6=eaj zATTV%Oh=tyV{1piyZ}Ui4v+v^KyYyj4OTNXHH1X|L)&={U`MP8;Uit-bv)LJk{&h1 zR2@kw5)l;tq0?_NMF?aN0GKI6p!WKO-H)*AFC2A*Bg2BjAepQq?B?wP@nNM977DR2 z(}3^@09d@&PfRp@gk67Pig!oY#U;cG0EW+yJi*1q3&M=#zp(2O7CXWbZeEanMraIZ z%0PFYK!^m{Kr6d@gt`I1a~+a#cMo+70Du<@09-vFAPC~~AH5Skx8M+{FF$kuuA_Gc zU7_X)-7B;}I==ox^TGgd1Jcc8{|}8HdUobl0Ni=`4{g)z=lg`wJ_+@dl=!I*hF1pw zHG6ot$pZi;4}c%JhlhLRhlf8Z06;+fyZzn5Ln4040_4#J1&9F|pau+p6>tJRAPgjc zEKmfhKnv&t6JP;sfg^AO-XH*kfJhJv5H-={sz7Z-+~_^h!Hdh0zwcWi%>@xA#4#Ih+sr4A`Ni? zQHr>QXh%Fij3Z_d9}wS=NF)W46)A+2M`|N2kS@p|WGpfRS%|DcHY0nHPmyns>&Sf+ z4#kM#L&>6aP*x}pR2V7=m5-`GHKF=Y6R5YSO*8^cjpjm2qqWdhXfJdWIvrhvu0`KR zkD}+$UoZ#^4TcvZk1@nJVuCS=m`j)&n0uH}%sgfbi^VcwMX~BwE37Xz4x5Lq!ggRs zv2U?oiSR`1L^4DMM9xIvL>WY-M0bfE5zP_p5aWqCh~7?mw=}yy?(+$wA(v#6k(c99WrmvtMqW{Q1#URJv$dJTP!!X9M!^py@!RX7F!`R9= z%YCYIN%XjU;+JJuxDI@W16 z1e-9M4O=2x9oq{6k|0K~C!`XZ2(#?O?6T}`?Ah#{>?<5J9O@jw9K{^N9N##(In6l} zIO{oIbK$uZxO}(@xCXekxw*J4xD&aXxEFXRdDMAAcq(`%coDo(ydJ!lcn5jE@d@zR z^PS`Si|;c(2frnMDt|lwngE-CnLx5Yo4^M_RzWkt6u}O`bs-KRE1?XbE}<=9eql%9 zeBmMCA0m<>J|fpdo{8c_)kGsj>qOs)F^QRprHl25eG?ZG_YyA=pOPSx(3LnN(I&AW zDIn=4c~x>!icCsZ>a0|!)Q+^6w6Aof^cxvQ8B3WQnMbl{S#{Ys*>>4&IdQoFxf;3m z@|^O{^2PEm73dT!6!H|FD3T}|D5fh89z!3~JeGK@R|%%1suZu(rF5XItQ@EOm-2y% zib}jnw<=IoQ$4HNuZB|7QA<-BR>!LwtLLguXwYidXk5{l)nwQ7(7d6!rX{8orq!mk zudS+`tUatls$;HmS!Y(4Q`bkgUUyqhQ7=JnP@h!aQvZtnf`Nd+NrN^+V5n=DV>oR@ zF!DBPFxoX%H_kGiG+{OIGHEc`Gu1N9HhpoN>v-Vtb~B`ziCK}^qPe(vtoe`ywS}|A zZHw=g+Ln2iZ>)r@Vyp(OsjXeD?^qw$7}^xstk}xgrr1u}aoL60_1RO|yV*B7ARNpc zDjc>>Xr8!u;=QA+W4hx@Cn2Xer!i*^=P>6%7X}x9mo8UIR}a^AH@us(TeCaX-NC)V z1La}oQSXWLwDqj_LVDSG-SI|yJ9szx5cxRywD^+vy8CweQTh4#_4zaThx$JX;0%Zj zm<$vNObwh1QV7Zq`WUPeTo$}{((2@$5L}37NKYtpXjJG#m}ppL*iyJgcxm{4gl$Ai zBvoW^v{rOQ3_Qj;rt1{zsne%k#U6{j61#WW{`9>#rnuO+SMf^m*WwS) zIG^cF;7mwPSURhBwlN@mKZREyNMG?ui4wD;+T=?xjQ z8L=4)nYx*ES=3plvKG$io~u7kcRud?VzyEC-5i#ji{gqtT(Q2=e^vZyNin`Sx_IfD<+Z*N z@siR~veMI~Yi0IjkIEIwYbqEkGAeej`(B@^G_35b5~(VwCa*qQy>-Lm#`I17o1HbH zHRZK5wQ03`w}NiHy=`^-QJqR%Q$1gO@g0ggsdx4pPBttxo@ktC(rfB!mTA6ym+S78 z7Rr|NR#qeVT+kCrS`*?>zNB=$Ld##;foi+El?qBq6^opEN&>eRlY~vJw6T{v~^pd9!9qcI&~m`S$!y z(9XfvtZz);YQD>Vf4FPAySx{*kKMogga1dzfx*G6!@xhWz@J#)Pb}~!7Wfkj{D}qr z!~%a}fj|2KfA$6b>b-(!#zC#_)-l3i7fzB zV*ua~|6BgAJj$bG+d`Njn=gCUQT_|`c7Ab3}PEyNK@6zzRmH3^9ENkw>ys}FBcz*Z-xJ{K$BppP_}T4h@Ysdn2orZgs!BKl#R5LOrUIn z+$H%&g{O*NmDrVyRL-h)sO@UVYDQ`G>rm>t=+)`(8=4xG8*iHG9WOT9uu!*5vzoAB zwe_}Zu>X2O)iKj))P>yjxLcn4Gfyfn6Yn&i9^W7Sk^#PfWkIh_(uU}To(gLWUybC5 zvX9P*8Hk0SR*j2}FFSKLq5tf3;%d@v3P~zwnqsxa&*mndJnx@sm@av7{$<>Z-z%%vYO_Lb=-wR6EzS=w z)W6MscXHA0z2=h8GTrjw%F^nS4;^czA5+%-Kbe1)-(cA|{Ia+?v{kd6y%X`({+rTw z!Y*caeec!oR}`GGZC8TIwPIMp(BIgKMNGwn280lh5)E5iz71Jh|{QxlGP5@xvjgZ&utKB*lKiOqGOtJ{INN; zg{@_Y)tn8V?Mb_4`<)XCjxkQ%E(ljmw?y|oPpp?N^c_6%qx828xDxm>nEj+r$eqx= zaJ7iE$mwYQn6OjTv45R@95)}onE*dalPH*^o@|#AoSK|gn%^)FTUYMvYFH(hFCYcJhu zy#1tZ{SI*hPorj&M|0BM>n#skH`-}Cuonlk7937Uul2;g9igGgSUrD zALb3GJ>RejGgN`NVnb#8caGvk8M|T9eAprKg0axnD59q?m!v?7UiiJv}@4rfIHZ zK4l^Jt?xVkMgRByOa9A#E556KAH3FFKb}~(`DFH4cSGrmJcUcZ3*%pq zB$0}f&XQ%3A0uC)D5Nx?+@rclZB2uqX{Pm{qo(Vo4`5(s7+?%#VrLp*j$+|tdCD5g z#>+NIIK$4*KFyKJDaZMdtCHK9hlS?_ZxNppKQI5fK)Yb7kTsNN*%cWRtr1HScal() zq?g>38kcUA$(0R~Gm#foz$<)G96464l%njRqN+-#`cZ9My;I}5X0}$Wwx^DTuCku6 zKC1!V;Je|n(X{cfNvG-UfDM1td4heU@Yn`F}zn^fPlsPxQ?tI)oE=eft{=d(ZO9ORMYvs@6qsCY@I zz^c&ma#+#XD;KZU7T>@2sARTuyBt@+d0npZcvVn!=8bDN%W8^iuiYxXU08Ry{>q(_ zhKk1WrW?&ocRO2pTZh^v+Fy3e-&^h6y8ojK)s5?+>SgKU?B{(TG$1x8J|z3lcX(ig z<#Etx{}}Vr$nhu7WG0KIu+xbzQ8U@Ed1r^_PA_P_<9d%})f9>99D0jxF|z|^yU zuOCM%6~F=tD32`+RG_@IKa{7g0R3PQMhugHIl$7OjPeRJ!&t)4!-o-Agf1cz@f693 zbVoKI4^bvi4);6S6kP*-aW0r{EIT$4yG&$6)IiKgoJ72av&Qw}CGf>07?N<31yU2z z`(%P-h2$9WQ1W>SQ;PePf|P|+D5`L(MQU^E9vU&4Qd&ydWZLg^{&aItuJQ?k4nsHN zF~$}q8Kw?qW#(QMU6wIc8`ed(5Vk`?7CS3@6NethJm)DcYG{Ts<^IA`z^lyrmM@E6 zp8vhTML|u$O`$4bM-h7H>r5BZ5(nbaJ zF~X-(W0&JJ4mKxtR&$cYLZQsM+=j<}>3F?b~P&(N0aCQVUDmG>_p7U&I z3jIQ3CiC^HIi7{scauv3E2lraUDyA7a}&26`t{we_I~Hl_X9NqRLejExPS~W1RhYG zLK)}-D^T5mGE`qs0-J=Bz;)p%@Bsu7!UR!(m`4gCV~~R=dXzV+15JwdK@Vd1G3l5O zSW|2>5i3zD(FU;{@eocLSB|H|C*pTVyhvU`p6?-(CaWRmBrl>Mr$~kJ+!2&tsC=mA zsqLws(-_kX(`wW9&?(Wi(aX}eGDtGiF^V(RGl?^`L0<1;F<_Zwbz)s-izN^d3fTqN zyE!a5);LqRSh?D{t+;m}uPgG-@STBXq$dJVf&{@Kp-^FV;b$UAqLNUiw?JH1{D(w~ zWS|t6)Ub#$Q6EzfGi(X|XWNrVl3h~jQg_ph zG8(h=&i%-~k?WYxaAD+9VxjEi?JF(CfhByU%jI{jM^>rcpt`wK+j;w9eMEz6lfhk? zR`GU`ds6q6x@>yf`_vwY4>CT)5C0hXHo88xGQRa}e+u`4aYpL3^_x@kk6BbTLC z{nnb-ziudPCTx#?p+920F$-8{>@3jv8hA# z1B2{29QK@ioZDP|+?RO#cs2NF`8N1R1gZrSgdBvGML0weqKje=#cxX#NG3>mN*l`@ zlNFbfl~+mKYrxKx>sCH4kTBAv`S8G!HgDz5!USHb4!Z6e*&$!uS<~Z6+ z%G}8!+w!3`%tp;N-fqx==7gbRoKu_gx+}k%yL*+#vKNoHzfXrB(%&MWEO0B>;AB5Jqg|~%(Y?$4 z#t%9N#fEBzSs&#;Mvq26*@PmA(Pwg#B~xJ9_QjnUgLHI)a<5_|9{nB{hSAgHG=$uLI{Q$nm=#E!SWGB_FoWD70>44~E11B62rUmhS3T2Bpu2GIQb zEDSt>3&e?lc%i@#yoYcA)FTM$69i$x-*OuNM$!J+&kO4JKQ$7d`m!L%+Hae`tPvoa zzjgWmcL@KcyFfOMct@iPb@Dm;kIml_NBVw_MyytBMQs1~n2eZf@<1EBgEV{n9^;YzfJV=V!k}mcD{qK<^z5#`xxnhDw9mip z|7`@oXLPM~?e_?v`pKU)fzX=vj}j<;L$WB2|mYe+^eb!9TPFvrN<>stfyC5jM^t1&B9&i# zj4Hr04z{t6V=57lUqV?qcG9pk^NbE_GPin#^h=qWpkLQ!qz6ox6cgE0v@rq+(AIESk=aeI&tP1FxkA190`vAUn89MnghQVKwXxkRTU2R)oMANym zz&?uMSk9@6fcVJU9jeQ_59W77GM}4i6Zh}-A`q;H-ycB0N9)L#OMZ&w9L`-&#JUyG zS`POrbEkmx46WRvnIfVTcHWtVs=1-i?AXZ_AUpdwhGIE~bJr7~trd`*J=>)vhe8;C zfe0wF;Lfzx0%q(bG}c8>nq*XWAZEXSFZhso zONo39Va<|toz-*ZO42e?_{2)R0omZgF#3FT4UfQd&VWln6Kn&rZa-Zq?)fXxnXw9k z7AEyg#9_eUqz3K?kScvxo!io_wLbf_qBqio;>Q1X3;0Q3Lan0X^ zPcYSUiOd_RyhSd56$A-FS8yXE@LTjct}$Za&WY>a!sAg6_^(wsBOeENH#r4 k?Q!)fsvgh(NR?FJFL;{NUfcdWg#Z8m07*qoM6N<$f{igIu>b%7 literal 0 HcmV?d00001 diff --git a/img/info.png b/img/info.png new file mode 100644 index 0000000000000000000000000000000000000000..4e197d437ce0ec122accfdc0887bd71cd853d41f GIT binary patch literal 2551 zcmZ8jc{mjK8y}Jw#|U9q%#0&Dm=SV}F^-vpawKx37`K?C8TYV}E3MoaE3qh7#3a^j z+-HtNEy9>&U11$1m1Vcx=lA^H=Xt;H`+c9!`@Wyg_pfh)wdHvMpd=6g00@|w;%p8h zwJWDnuXlHv)B-AC-|X$ls6vKYf^kg*60AqIg0y5t;}!7zPM| zK+qHqFO&_=ih+qu_iQoF0DOol zkt+AkkbmdkJgM#!Uvhvii3mBGi+3Xh2I#|JM~nWm?>q_Q{}vIc-&GyzLx$kV$TJ8v z*BBOd?5>mSj7R}WQV zfQOy`z90rzA-t*y0PyIV;f!n@c)RScCfeD9zYRqU2e#LXnYC1w!$h0b2e;5x%VVV8w4)D!ne-K<}bYB z80@0fZNI)Xzf9&&-{S2|_pM_Y6EtHo>^92WEf}_5^H3 z*22}}hS+e+w)S?=Z|u_Z+R;#BCDFS%`n25=wbK1~`7_NxSwwv|*jcD(pykJjrldc9 zf8#7UtFsU9_D+BEwp#ju)5@r`2(#_H(}+aH?H;xf%B8zOfc`KQ?;ifGlx_+tQlw<_>_`^7Wa7@2UCxGO=q7c zIx`Ys>sx1zaSKzRlasNsU*AhQ6ScFR`;bWM!f{pns9SI-fT|%;m#POabJL^^-T|kl_R>c_#`AWXCFWv)QMLCLr zlH;tGOPc17g}-foka#KPzD6>P12)NYO$dOFy{L^9&zr`U|l%>4VgMr;` ziYN+UC6c6>><$r7tr0hHWxng@8%ybeQC>Cn@yxjw;5tscF~^xOxoaCQ$LIOj#Jpi| zXXHn{XF2c!h?S#)N|kU%c%Sp=yJ$iqnShS{+NiI1GxnU_q!pYCw!G&5*maFP*$eBx zh^cqP0_i+L_*JPaK0T|ZB)&@L2i8bX93tLk3K z=eSQaS*j0OV&_sTcJ6#OCyQ(NHwH^u*UUzf?~5DBvMtn*?4=mP>XC|5zv;>IxS3!_ zx(oF$-U!Q@(u?Uju@&aSGx8!-D~=w(|yP5kQ zGs_CQD^K(Dp(@q44EEX~ndOi>6-qumx^`_E)S4x{^SW9isy&hg`^G1amCS6mf ztL@){Uo|w=yAB@~?8EX*?WiRCja>l0QFxEVMD+yYHYb)&k4S7jQ1>iQe0)_xP^a>s zS$ZXzn--PiF`TDMeg8*q?;ZNp=VrOH%_1Fqw|+R+#GIpj5ly3%mX6%4%@*II14}>+ z8(&X1Zq^J3+NCAsGLmEszHLz#hu%`;UtWtM z>Q*{%vI?Ii@V6(miPKe;F0I7brqHo)&>!p=F2a@Wh={E66^r+n^`Vxjiygh<=%8=|t!thugS@F# zXdKCe_th|e=i&#alJDF}lxvC+1#TFe<9a4~V!9roBmeDqB^fg)_gs78=xb(n&Jy?3 H*!A{bControl.Distributed.Process.Platform.Async.AsyncChan

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Portabilitynon-portable (requires concurrency)
Stabilityexperimental
MaintainerTim Watson <watson.timothy@gmail.com>
Safe HaskellNone

Control.Distributed.Process.Platform.Async.AsyncChan

Description

This module provides a set of operations for spawning Process operations - and waiting for their results. It is a thin layer over the basic - concurrency operations provided by Control.Distributed.Process. - The main feature it provides is a pre-canned set of APIs for waiting on the - result of one or more asynchronously running (and potentially distributed) - processes. -

The async handles returned by this module cannot be used by processes other - than the caller of async, and are not Serializable. Specifically, calls - that block until an async worker completes (i.e., all variants of wait) - will never return if called from a different process. -

 h <- newEmptyMVar
- outer <- spawnLocal $ async runMyAsyncTask >>= liftIO $ putMVar h
- hAsync <- liftIO $ takeMVar h
- say "this expression will never return, because hAsync belongs to 'outer'"
- wait hAsync
-

As with Async, workers can be - started on a local or remote node. -

See Control.Distributed.Platform.Async. -

Synopsis

Exported types -

type AsyncRef = ProcessId

A reference to an asynchronous action -

data AsyncTask a

A task to be performed asynchronously. -

Constructors

AsyncTask 

Fields

asyncTask :: Process a

the task to be performed -

AsyncRemoteTask 

Fields

asyncTaskDict :: Static (SerializableDict a)

the serializable dict required to spawn a remote process -

asyncTaskNode :: NodeId

the node on which to spawn the asynchronous task -

asyncTaskProc :: Closure (Process a)

the task to be performed, wrapped in a closure environment -

data AsyncChan a

A handle for an asynchronous action spawned by async. - Asynchronous actions are run in a separate process, and - operations are provided for waiting for asynchronous actions to - complete and obtaining their results (see e.g. wait). -

Handles of this type cannot cross remote boundaries. Furthermore, handles - of this type must not be passed to functions in this module by processes - other than the caller of async - that is, this module provides asynchronous - actions whose results are accessible *only* by the initiating process. This - limitation is imposed becuase of the use of typed channels, for which the - ReceivePort component is effectively thread local. -

See async -

data AsyncResult a

Represents the result of an asynchronous action, which can be in one of - several states at any given time. -

Constructors

AsyncDone a

a completed action and its result -

AsyncFailed DiedReason

a failed action and the failure reason -

AsyncLinkFailed DiedReason

a link failure and the reason -

AsyncCancelled

a cancelled action -

AsyncPending

a pending action (that is still running) -

Instances

Typeable1 AsyncResult 
Eq a => Eq (AsyncResult a) 
Show a => Show (AsyncResult a) 
Binary a_1627460136 => Binary (AsyncResult a_1627460136) 

data Async a

An opaque handle that refers to an asynchronous operation. -

Spawning asynchronous operations -

async :: Serializable a => AsyncTask a -> Process (AsyncChan a)

Spawns an asynchronous action in a new process. - We ensure that if the caller's process exits, that the worker is killed. - Because an AsyncChan can only be used by the initial caller's process, if - that process dies then the result (if any) is discarded. If a process other - than the initial caller attempts to obtain the result of an asynchronous - action, the behaviour is undefined. It is highly likely that such a - process will block indefinitely, quite possible that such behaviour could lead - to deadlock and almost certain that resource starvation will occur. Do Not - share the handles returned by this function across multiple processes. -

If you need to spawn an asynchronous operation whose handle can be shared by - multiple processes then use the AsyncSTM module instead. -

There is currently a contract for async workers, that they should - exit normally (i.e., they should not call the exit or kill with their own - ProcessId nor use the terminate primitive to cease functining), otherwise - the AsyncResult will end up being AsyncFailed DiedException instead of - containing the desired result. -

asyncLinked :: Serializable a => AsyncTask a -> Process (AsyncChan a)

For *AsyncChan*, async already ensures an AsyncChan is - never left running unintentionally. This function is provided for compatibility - with other async implementations that may offer different semantics for - async with regards linking. -

asyncLinked = async

newAsync :: Serializable a => (AsyncTask a -> Process (AsyncChan a)) -> AsyncTask a -> Process (Async a)

Create a new AsyncChane and wrap it in an Async record. -

Used by Control.Distributed.Process.Platform.Async. -

Cancelling asynchronous operations -

cancel :: AsyncChan a -> Process ()

Cancel an asynchronous operation. Cancellation is asynchronous in nature. -

See Control.Distributed.Process.Platform.Async. -

cancelWait :: Serializable a => AsyncChan a -> Process (AsyncResult a)

Cancel an asynchronous operation and wait for the cancellation to complete. -

See Control.Distributed.Process.Platform.Async. -

cancelWith :: Serializable b => b -> AsyncChan a -> Process ()

Cancel an asynchronous operation immediately. -

See Control.Distributed.Process.Platform.Async. -

cancelKill :: String -> AsyncChan a -> Process ()

Like cancelWith but sends a kill instruction instead of an exit. -

See Control.Distributed.Process.Platform.Async. -

Querying for results -

poll :: Serializable a => AsyncChan a -> Process (AsyncResult a)

Check whether an AsyncChan has completed yet. -

See Control.Distributed.Process.Platform.Async. -

check :: Serializable a => AsyncChan a -> Process (Maybe (AsyncResult a))

Like poll but returns Nothing if (poll hAsync) == AsyncPending. -

See Control.Distributed.Process.Platform.Async. -

wait :: Serializable a => AsyncChan a -> Process (AsyncResult a)

Wait for an asynchronous action to complete, and return its - value. The outcome of the action is encoded as an AsyncResult. -

See Control.Distributed.Process.Platform.Async. -

waitAny :: Serializable a => [AsyncChan a] -> Process (AsyncResult a)

Wait for any of the supplied AsyncChanss to complete. If multiple - Asyncs complete, then the value returned corresponds to the first - completed Async in the list. Only unread Asyncs are of value here, - because AsyncChan does not hold on to its result after it has been read! -

This function is analagous to the mergePortsBiased primitive. -

See Control.Distibuted.Process.mergePortsBiased. -

waitAnyCancel :: Serializable a => [AsyncChan a] -> Process (AsyncResult a)

Like waitAny, but also cancels the other asynchronous - operations as soon as one has completed. -

Waiting with timeouts -

waitAnyTimeout :: Serializable a => TimeInterval -> [AsyncChan a] -> Process (Maybe (AsyncResult a))

Like waitAny but times out after the specified delay. -

waitTimeout :: Serializable a => TimeInterval -> AsyncChan a -> Process (Maybe (AsyncResult a))

Wait for an asynchronous operation to complete or timeout. -

See Control.Distributed.Process.Platform.Async. -

waitCancelTimeout :: Serializable a => TimeInterval -> AsyncChan a -> Process (AsyncResult a)

Wait for an asynchronous operation to complete or timeout. If it times out, - then cancelWait the async handle instead. -

waitCheckTimeout :: Serializable a => TimeInterval -> AsyncChan a -> Process (AsyncResult a)

Wait for an asynchronous operation to complete or timeout. -

See Control.Distributed.Process.Platform.Async. -

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async-AsyncSTM.html b/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async-AsyncSTM.html deleted file mode 100644 index c9e9599..0000000 --- a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async-AsyncSTM.html +++ /dev/null @@ -1,90 +0,0 @@ -Control.Distributed.Process.Platform.Async.AsyncSTM

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Portabilitynon-portable (requires concurrency)
Stabilityexperimental
MaintainerTim Watson <watson.timothy@gmail.com>
Safe HaskellNone

Control.Distributed.Process.Platform.Async.AsyncSTM

Description

This module provides a set of operations for spawning Process operations - and waiting for their results. It is a thin layer over the basic - concurrency operations provided by Control.Distributed.Process. -

The difference between AsyncSTM and - AsyncChan is that handles of the - former (i.e., returned by this module) can be used by processes other - than the caller of async, but are not Serializable. -

As with AsyncChan, workers can be - started on a local or remote node. -

Portions of this file are derived from the Control.Concurrent.Async - module, written by Simon Marlow. -

Synopsis

Exported types -

type AsyncRef = ProcessId

A reference to an asynchronous action -

data AsyncTask a

A task to be performed asynchronously. -

Constructors

AsyncTask 

Fields

asyncTask :: Process a

the task to be performed -

AsyncRemoteTask 

Fields

asyncTaskDict :: Static (SerializableDict a)

the serializable dict required to spawn a remote process -

asyncTaskNode :: NodeId

the node on which to spawn the asynchronous task -

asyncTaskProc :: Closure (Process a)

the task to be performed, wrapped in a closure environment -

data AsyncSTM a

An handle for an asynchronous action spawned by async. - Asynchronous operations are run in a separate process, and - operations are provided for waiting for asynchronous actions to - complete and obtaining their results (see e.g. wait). -

Handles of this type cannot cross remote boundaries, nor are they - Serializable. -

Instances

Eq (AsyncSTM a) 

data AsyncResult a

Represents the result of an asynchronous action, which can be in one of - several states at any given time. -

Constructors

AsyncDone a

a completed action and its result -

AsyncFailed DiedReason

a failed action and the failure reason -

AsyncLinkFailed DiedReason

a link failure and the reason -

AsyncCancelled

a cancelled action -

AsyncPending

a pending action (that is still running) -

Instances

Typeable1 AsyncResult 
Eq a => Eq (AsyncResult a) 
Show a => Show (AsyncResult a) 
Binary a_1627460136 => Binary (AsyncResult a_1627460136) 

data Async a

An opaque handle that refers to an asynchronous operation. -

Spawning asynchronous operations -

async :: Serializable a => AsyncTask a -> Process (AsyncSTM a)

Spawns an asynchronous action in a new process. -

asyncLinked :: Serializable a => AsyncTask a -> Process (AsyncSTM a)

This is a useful variant of async that ensures an AsyncChan is - never left running unintentionally. We ensure that if the caller's process - exits, that the worker is killed. Because an AsyncChan can only be used - by the initial caller's process, if that process dies then the result - (if any) is discarded. -

newAsync :: Serializable a => (AsyncTask a -> Process (AsyncSTM a)) -> AsyncTask a -> Process (Async a)

Create a new AsyncSTM and wrap it in an Async record. -

Used by Async. -

Cancelling asynchronous operations -

cancel :: AsyncSTM a -> Process ()

Cancel an asynchronous operation. -

See Control.Distributed.Process.Platform.Async. -

cancelWait :: Serializable a => AsyncSTM a -> Process (AsyncResult a)

Cancel an asynchronous operation and wait for the cancellation to complete. -

See Control.Distributed.Process.Platform.Async. -

cancelWith :: Serializable b => b -> AsyncSTM a -> Process ()

Cancel an asynchronous operation immediately. -

See Control.Distributed.Process.Platform.Async. -

cancelKill :: String -> AsyncSTM a -> Process ()

Like cancelWith but sends a kill instruction instead of an exit. -

See Async. -

Querying for results -

poll :: Serializable a => AsyncSTM a -> Process (AsyncResult a)

Check whether an AsyncSTM has completed yet. -

See Control.Distributed.Process.Platform.Async. -

check :: Serializable a => AsyncSTM a -> Process (Maybe (AsyncResult a))

Like poll but returns Nothing if (poll hAsync) == AsyncPending. -

See Control.Distributed.Process.Platform.Async. -

wait :: AsyncSTM a -> Process (AsyncResult a)

Wait for an asynchronous action to complete, and return its - value. The result (which can include failure and/or cancellation) is - encoded by the AsyncResult type. -

wait = liftIO . atomically . waitSTM

See Control.Distributed.Process.Platform.Async. -

waitAny :: Serializable a => [AsyncSTM a] -> Process (AsyncSTM a, AsyncResult a)

Wait for any of the supplied AsyncSTMs to complete. If multiple - Asyncs complete, then the value returned corresponds to the first - completed Async in the list. -

NB: Unlike AsyncChan, AsyncSTM does not discard its AsyncResult once - read, therefore the semantics of this function are different to the - former. Specifically, if asyncs = [a1, a2, a3] and (AsyncDone _) = a1 - then the remaining a2, a3 will never be returned by waitAny. -

Waiting with timeouts -

waitAnyTimeout :: Serializable a => TimeInterval -> [AsyncSTM a] -> Process (Maybe (AsyncResult a))

Like waitAny but times out after the specified delay. -

waitTimeout :: Serializable a => TimeInterval -> AsyncSTM a -> Process (Maybe (AsyncResult a))

Wait for an asynchronous operation to complete or timeout. -

See Control.Distributed.Process.Platform.Async. -

waitCheckTimeout :: Serializable a => TimeInterval -> AsyncSTM a -> Process (AsyncResult a)

Wait for an asynchronous operation to complete or timeout. -

See Control.Distributed.Process.Platform.Async. -

STM versions -

pollSTM :: AsyncSTM a -> STM (Maybe (AsyncResult a))

A version of poll that can be used inside an STM transaction. -

waitTimeoutSTM :: Serializable a => TimeInterval -> AsyncSTM a -> Process (Maybe (AsyncResult a))

As waitTimeout but uses STM directly, which might be more efficient. -

waitAnyCancel :: Serializable a => [AsyncSTM a] -> Process (AsyncSTM a, AsyncResult a)

Like waitAny, but also cancels the other asynchronous - operations as soon as one has completed. -

waitEither :: AsyncSTM a -> AsyncSTM b -> Process (Either (AsyncResult a) (AsyncResult b))

Wait for the first of two AsyncSTMs to finish. -

waitEither_ :: AsyncSTM a -> AsyncSTM b -> Process ()

Like waitEither, but the result is ignored. -

waitBoth :: AsyncSTM a -> AsyncSTM b -> Process (AsyncResult a, AsyncResult b)

Waits for both AsyncSTMs to finish. -

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async.html b/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async.html deleted file mode 100644 index 74b842c..0000000 --- a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async.html +++ /dev/null @@ -1,118 +0,0 @@ -Control.Distributed.Process.Platform.Async

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Portabilitynon-portable (requires concurrency)
Stabilityexperimental
MaintainerTim Watson <watson.timothy@gmail.com>
Safe HaskellNone

Control.Distributed.Process.Platform.Async

Description

The async APIs provided by distributed-process-platform provide means - for spawning asynchronous operations, waiting for their results, cancelling - them and various other utilities. The two primary implementation are - AsyncChan which provides a handle which is scoped to the calling process, - and AsyncSTM, whose async mechanism can be used by (i.e., shared across) - multiple local processes. -

Both abstractions can run asynchronous operations on remote nodes. The STM - based implementation provides a slightly richer API. The API defined in - this module only supports a subset of operations on async handles, - and (specifically) does not support mixing handles initialised via - different implementations. -

There is an implicit contract for async workers; Workers must exit - normally (i.e., should not call the exit, die or terminate - Cloud Haskell primitives), otherwise the AsyncResult will end up being - AsyncFailed DiedException instead of containing the result. -

See Control.Distributed.Process.Platform.Async.AsyncSTM, - Control.Distributed.Process.Platform.Async.AsyncChan. -

See Control.Distributed.Platform.Task for a high level layer built - on these capabilities. -

Exported Types -

data Async a

An opaque handle that refers to an asynchronous operation. -

type AsyncRef = ProcessId

A reference to an asynchronous action -

data AsyncTask a

A task to be performed asynchronously. -

Constructors

AsyncTask 

Fields

asyncTask :: Process a

the task to be performed -

AsyncRemoteTask 

Fields

asyncTaskDict :: Static (SerializableDict a)

the serializable dict required to spawn a remote process -

asyncTaskNode :: NodeId

the node on which to spawn the asynchronous task -

asyncTaskProc :: Closure (Process a)

the task to be performed, wrapped in a closure environment -

data AsyncResult a

Represents the result of an asynchronous action, which can be in one of - several states at any given time. -

Constructors

AsyncDone a

a completed action and its result -

AsyncFailed DiedReason

a failed action and the failure reason -

AsyncLinkFailed DiedReason

a link failure and the reason -

AsyncCancelled

a cancelled action -

AsyncPending

a pending action (that is still running) -

Instances

Typeable1 AsyncResult 
Eq a => Eq (AsyncResult a) 
Show a => Show (AsyncResult a) 
Binary a_1627460136 => Binary (AsyncResult a_1627460136) 

Spawning asynchronous operations -

async :: Serializable a => Process a -> Process (Async a)

Spawn an AsyncTask and return the Async handle to it. - See asyncSTM. -

asyncLinked :: Serializable a => Process a -> Process (Async a)

Spawn an AsyncTask (linked to the calling process) and - return the Async handle to it. - See asyncSTM. -

asyncSTM :: Serializable a => AsyncTask a -> Process (Async a)

Spawn an AsyncTask and return the Async handle to it. - Uses the STM implementation, whose handles can be read by other - processes, though they're not Serializable. -

See AsyncSTM. -

asyncLinkedSTM :: Serializable a => AsyncTask a -> Process (Async a)

Spawn an AsyncTask (linked to the calling process) and return the - Async handle to it. Uses the STM based implementation, whose handles - can be read by other processes, though they're not Serializable. -

See AsyncSTM. -

asyncChan :: Serializable a => AsyncTask a -> Process (Async a)

Spawn an AsyncTask and return the Async handle to it. - Uses a channel based implementation, whose handles can only be read once, - and only by the calling process. -

See AsyncChan. -

asyncLinkedChan :: Serializable a => AsyncTask a -> Process (Async a)

Linked version of asyncChan. -

See AsyncChan. -

task :: Process a -> AsyncTask a

Wraps a regular Process a as an AsyncTask. -

remoteTask :: Static (SerializableDict a) -> NodeId -> Closure (Process a) -> AsyncTask a

Wraps the components required and builds a remote AsyncTask. -

Cancelling asynchronous operations -

cancel :: Async a -> Process ()

Cancel an asynchronous operation. Cancellation is asynchronous in nature. - To wait for cancellation to complete, use cancelWait instead. The notes - about the asynchronous nature of cancelWait apply here also. -

See Process -

cancelWait :: Serializable a => Async a -> Process (AsyncResult a)

Cancel an asynchronous operation and wait for the cancellation to complete. - Because of the asynchronous nature of message passing, the instruction to - cancel will race with the asynchronous worker, so it is entirely possible - that the AsyncResult returned will not necessarily be AsyncCancelled. For - example, the worker may complete its task after this function is called, but - before the cancellation instruction is acted upon. -

If you wish to stop an asychronous operation immediately (with caveats) - then consider using cancelWith or cancelKill instead. -

cancelWith :: Serializable b => b -> Async a -> Process ()

Cancel an asynchronous operation immediately. - This operation is performed by sending an exit signal to the asynchronous - worker, which leads to the following semantics: -

  1. If the worker already completed, this function has no effect. -
  2. The worker might complete after this call, but before the signal arrives. -
  3. The worker might ignore the exit signal using catchExit. -

In case of (3), this function has no effect. You should use cancel - if you need to guarantee that the asynchronous task is unable to ignore - the cancellation instruction. -

You should also consider that when sending exit signals to a process, the - definition of immediately is somewhat vague and a scheduler might take - time to handle the request, which can lead to situations similar to (1) as - listed above, if the scheduler to which the calling process' thread is bound - decides to GC whilst another scheduler on which the worker is running is able - to continue. -

See exit -

cancelKill :: String -> Async a -> Process ()

Like cancelWith but sends a kill instruction instead of an exit signal. -

See kill -

Querying for results -

poll :: Serializable a => Async a -> Process (AsyncResult a)

Check whether an Async handle has completed yet. The status of the - action is encoded in the returned AsyncResult. If the action has not - completed, the result will be AsyncPending, or one of the other - constructors otherwise. This function does not block waiting for the result. - Use wait or waitTimeout if you need blocking/waiting semantics. -

check :: Serializable a => Async a -> Process (Maybe (AsyncResult a))

Like poll but returns Nothing if (poll hAsync) == AsyncPending. - See poll. -

wait :: Async a -> Process (AsyncResult a)

Wait for an asynchronous action to complete, and return its - value. The result (which can include failure and/or cancellation) is - encoded by the AsyncResult type. -

Waiting with timeouts -

waitTimeout :: Serializable a => TimeInterval -> Async a -> Process (Maybe (AsyncResult a))

Wait for an asynchronous operation to complete or timeout. Returns - Nothing if the AsyncResult does not change from AsyncPending within - the specified delay, otherwise Just asyncResult is returned. If you want - to wait/block on the AsyncResult without the indirection of Maybe then - consider using wait or waitCheckTimeout instead. -

waitCancelTimeout :: Serializable a => TimeInterval -> Async a -> Process (AsyncResult a)

Wait for an asynchronous operation to complete or timeout. If it times out, - then cancelWait the async handle instead. -

waitCheckTimeout :: Serializable a => TimeInterval -> Async a -> Process (AsyncResult a)

Wait for an asynchronous operation to complete or timeout. This variant - returns the AsyncResult itself, which will be AsyncPending if the - result has not been made available, otherwise one of the other constructors. -

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Call.html b/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Call.html deleted file mode 100644 index 92c5689..0000000 --- a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Call.html +++ /dev/null @@ -1,34 +0,0 @@ -Control.Distributed.Process.Platform.Call

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Safe HaskellNone

Control.Distributed.Process.Platform.Call

Description

Maintainers : Jeff Epstein, Tim Watson - Stability : experimental - Portability : non-portable (requires concurrency) -

This module provides a facility for Remote Procedure Call (rpc) style - interactions with Cloud Haskell processes. -

Clients make synchronous calls to a running process (i.e., server) using the - callAt, callTimeout and multicall functions. Processes acting as the - server are constructed using Cloud Haskell's receive family of primitives - and the callResponse family of functions in this module. -

Synopsis

Documentation

callAt :: (Serializable a, Serializable b) => ProcessId -> a -> Tag -> Process (Maybe b)

Like callTimeout, but with no timeout. - Returns Nothing if the target process dies. -

callTimeout :: (Serializable a, Serializable b) => ProcessId -> a -> Tag -> Timeout -> Process (Maybe b)

Sends a message of type a to the given process, to be handled by a - corresponding callResponse... function, which will send back a message of - type b. The tag is per-process unique identifier of the transaction. If the - timeout expires or the target process dies, Nothing will be returned. -

multicall :: forall a b. (Serializable a, Serializable b) => [ProcessId] -> a -> Tag -> Timeout -> Process [Maybe b]

Like callTimeout, but sends the message to multiple - recipients and collects the results. -

callResponse :: (Serializable a, Serializable b) => (a -> Process (b, c)) -> Match c

Produces a Match that can be used with the receiveWait family of - message-receiving functions. callResponse will respond to a message of - type a sent by callTimeout, and will respond with a value of type b. -

callResponseIf :: (Serializable a, Serializable b) => (a -> Bool) -> (a -> Process (b, c)) -> Match c

callResponseDefer :: (Serializable a, Serializable b) => (a -> (b -> Process ()) -> Process c) -> Match c

callResponseDeferIf :: (Serializable a, Serializable b) => (a -> Bool) -> (a -> (b -> Process ()) -> Process c) -> Match c

callForward :: Serializable a => (a -> (ProcessId, c)) -> Match c

Produces a Match that can be used with the receiveWait family of - message-receiving functions. When calllForward receives a message of type - from from callTimeout (and similar), it will forward the message to another - process, who will be responsible for responding to it. It is the user's - responsibility to ensure that the forwarding process is linked to the - destination process, so that if it fails, the sender will be notified. -

callResponseAsync :: (Serializable a, Serializable b) => (a -> Maybe c) -> (a -> Process b) -> Match c

The message handling code is started in a separate thread. It's not - automatically linked to the calling thread, so if you want it to be - terminated when the message handling thread dies, you'll need to call - link yourself. -

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess-Client.html b/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess-Client.html deleted file mode 100644 index a3ac0e7..0000000 --- a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess-Client.html +++ /dev/null @@ -1,36 +0,0 @@ -Control.Distributed.Process.Platform.ManagedProcess.Client

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Safe HaskellNone

Control.Distributed.Process.Platform.ManagedProcess.Client

Description

Client API -

Synopsis

API for client interactions with the process -

shutdown :: ProcessId -> Process ()

Send a signal instructing the process to terminate. The receive loop which - manages the process mailbox will prioritise Shutdown signals higher than - any other incoming messages, but the server might be busy (i.e., still in the - process of excuting a handler) at the time of sending however, so the caller - should not make any assumptions about the timeliness with which the shutdown - signal will be handled. If responsiveness is important, a better approach - might be to send an exit signal with Shutdown as the reason. An exit - signal will interrupt any operation currently underway and force the running - process to clean up and terminate. -

call :: forall a b. (Serializable a, Serializable b) => ProcessId -> a -> Process b

Make a synchronous call - will block until a reply is received. - The calling process will exit with TerminateReason if the calls fails. -

safeCall :: forall a b. (Serializable a, Serializable b) => ProcessId -> a -> Process (Either TerminateReason b)

Safe version of call that returns information about the error - if the operation fails. If an error occurs then the explanation will be - will be stashed away as (TerminateOther String). -

tryCall :: forall s a b. (Addressable s, Serializable a, Serializable b) => s -> a -> Process (Maybe b)

Version of safeCall that returns Nothing if the operation fails. If - you need information about *why* a call has failed then you should use - safeCall or combine catchExit and call instead. -

callAsync :: forall s a b. (Addressable s, Serializable a, Serializable b) => s -> a -> Process (Async b)

Performs a synchronous call to the the given server address, however the - call is made out of band and an async handle is returned immediately. This - can be passed to functions in the Async API in order to obtain the result. -

See Control.Distributed.Process.Platform.Async -

callTimeout :: forall s a b. (Addressable s, Serializable a, Serializable b) => s -> a -> TimeInterval -> Process (Maybe b)

Make a synchronous call, but timeout and return Nothing if the reply - is not received within the specified time interval. -

If the result of the call is a failure (or the call was cancelled) then - the calling process will exit, with the AsyncResult given as the reason. -

cast :: forall a m. (Addressable a, Serializable m) => a -> m -> Process ()

Sends a cast message to the server identified by ServerId. The server - will not send a response. Like Cloud Haskell's send primitive, cast is - fully asynchronous and never fails - therefore casting to a non-existent - (e.g., dead) server process will not generate an error. -

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess-Server.html b/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess-Server.html deleted file mode 100644 index eac5c29..0000000 --- a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess-Server.html +++ /dev/null @@ -1,94 +0,0 @@ -Control.Distributed.Process.Platform.ManagedProcess.Server

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Safe HaskellNone

Control.Distributed.Process.Platform.ManagedProcess.Server

Description

Server process API -

Synopsis

Server actions -

condition :: forall a b. (Serializable a, Serializable b) => (a -> b -> Bool) -> Condition a b

Creates a Conditon from a function that takes a process state a and - an input message b and returns a Bool indicating whether the associated - handler should run. -

state :: forall s m. Serializable m => (s -> Bool) -> Condition s m

Create a Condition from a function that takes a process state a and - returns a Bool indicating whether the associated handler should run. -

input :: forall s m. Serializable m => (m -> Bool) -> Condition s m

Creates a Condition from a function that takes an input message m and - returns a Bool indicating whether the associated handler should run. -

reply :: Serializable r => r -> s -> Process (ProcessReply s r)

Instructs the process to send a reply and continue running. -

replyWith :: Serializable m => m -> ProcessAction s -> Process (ProcessReply s m)

Instructs the process to send a reply and evaluate the ProcessAction. -

noReply :: Serializable r => ProcessAction s -> Process (ProcessReply s r)

Instructs the process to skip sending a reply and evaluate a ProcessAction -

noReply_ :: forall s r. Serializable r => s -> Process (ProcessReply s r)

Continue without giving a reply to the caller - equivalent to continue, - but usable in a callback passed to the handleCall family of functions. -

haltNoReply_ :: TerminateReason -> Process (ProcessReply s TerminateReason)

Halt process execution during a call handler, without paying any attention - to the expected return type. -

continue :: s -> Process (ProcessAction s)

Instructs the process to continue running and receiving messages. -

continue_ :: s -> Process (ProcessAction s)

Version of continue that can be used in handlers that ignore process state. -

timeoutAfter :: TimeInterval -> s -> Process (ProcessAction s)

Instructs the process to wait for incoming messages until TimeInterval - is exceeded. If no messages are handled during this period, the timeout - handler will be called. Note that this alters the process timeout permanently - such that the given TimeInterval will remain in use until changed. -

timeoutAfter_ :: TimeInterval -> s -> Process (ProcessAction s)

Version of timeoutAfter that can be used in handlers that ignore process state. -

 action (\(TimeoutPlease duration) -> timeoutAfter_ duration)
-

hibernate :: TimeInterval -> s -> Process (ProcessAction s)

Instructs the process to hibernate for the given TimeInterval. Note - that no messages will be removed from the mailbox until after hibernation has - ceased. This is equivalent to calling threadDelay. -

hibernate_ :: TimeInterval -> s -> Process (ProcessAction s)

Version of hibernate that can be used in handlers that ignore process state. -

 action (\(HibernatePlease delay) -> hibernate_ delay)
-

stop :: TerminateReason -> Process (ProcessAction s)

Instructs the process to terminate, giving the supplied reason. If a valid - terminateHandler is installed, it will be called with the TerminateReason - returned from this call, along with the process state. -

stop_ :: TerminateReason -> s -> Process (ProcessAction s)

Version of stop that can be used in handlers that ignore process state. -

 action (\ClientError -> stop_ TerminateNormal)
-

Server handler/callback creation -

handleCall :: (Serializable a, Serializable b) => (s -> a -> Process (ProcessReply s b)) -> Dispatcher s

handleCallIf

Arguments

:: forall s a b . (Serializable a, Serializable b) 
=> Condition s a

predicate that must be satisfied for the handler to run -

-> (s -> a -> Process (ProcessReply s b))

a reply yielding function over the process state and input message -

-> Dispatcher s 

Constructs a call handler from an ordinary function in the Process - monad. Given a function f :: (s -> a -> Process (ProcessReply s b)), - the expression handleCall f will yield a Dispatcher for inclusion - in a Behaviour specification for the GenProcess. Messages are only - dispatched to the handler if the supplied condition evaluates to True -

handleCallFrom :: forall s a b. (Serializable a, Serializable b) => (s -> Recipient -> a -> Process (ProcessReply s b)) -> Dispatcher s

As handleCall but passes the Recipient to the handler function. - This can be useful if you wish to reply later to the caller by, e.g., - spawning a process to do some work and have it replyTo caller response - out of band. In this case the callback can pass the Recipient to the - worker (or stash it away itself) and return noReply. -

handleCallFromIf

Arguments

:: forall s a b . (Serializable a, Serializable b) 
=> Condition s a

predicate that must be satisfied for the handler to run -

-> (s -> Recipient -> a -> Process (ProcessReply s b))

a reply yielding function over the process state, sender and input message -

-> Dispatcher s 

As handleCallFrom but only runs the handler if the supplied Condition - evaluates to True. -

handleCast :: Serializable a => (s -> a -> Process (ProcessAction s)) -> Dispatcher s

handleCastIf

Arguments

:: forall s a . Serializable a 
=> Condition s a

predicate that must be satisfied for the handler to run -

-> (s -> a -> Process (ProcessAction s))

an action yielding function over the process state and input message -

-> Dispatcher s 

Constructs a cast handler from an ordinary function in the Process - monad. Given a function f :: (s -> a -> Process (ProcessAction s)), - the expression handleCall f will yield a Dispatcher for inclusion - in a Behaviour specification for the GenProcess. -

handleInfo :: forall s a. Serializable a => (s -> a -> Process (ProcessAction s)) -> DeferredDispatcher s

Creates a generic input handler (i.e., for recieved messages that are not - sent using the cast or call APIs) from an ordinary function in the - Process monad. -

handleDispatch :: Serializable a => (s -> a -> Process (ProcessAction s)) -> Dispatcher s

Constructs a handler for both call and cast messages. - handleDispatch = handleDispatchIf (const True) -

handleExit :: forall s a. Serializable a => (s -> ProcessId -> a -> Process (ProcessAction s)) -> ExitSignalDispatcher s

Creates an exit handler scoped to the execution of any and all the - registered call, cast and info handlers for the process. -

Stateless handlers -

action

Arguments

:: forall s a . Serializable a 
=> (a -> s -> Process (ProcessAction s))

a function from the input message to a stateless action, cf continue_ -

-> Dispatcher s 

Constructs an action handler. Like handleDispatch this can handle both - cast and call messages and you won't know which you're dealing with. - This can be useful where certain inputs require a definite action, such as - stopping the server, without concern for the state (e.g., when stopping we - need only decide to stop, as the terminate handler can deal with state - cleanup etc). For example: -

action (MyCriticalErrorSignal -> stop_ TerminateNormal)

handleCall_ :: (Serializable a, Serializable b) => (a -> Process b) -> Dispatcher s

Constructs a call handler from a function in the Process monad. - The handler expression returns the reply, and the action will be - set to continue. -

 handleCall_ = handleCallIf_ (const True)
-

handleCallIf_

Arguments

:: forall s a b . (Serializable a, Serializable b) 
=> Condition s a

predicate that must be satisfied for the handler to run -

-> (a -> Process b)

a function from an input message to a reply -

-> Dispatcher s 

Constructs a call handler from an ordinary function in the Process - monad. This variant ignores the state argument present in handleCall and - handleCallIf and is therefore useful in a stateless server. Messges are - only dispatched to the handler if the supplied condition evaluates to True -

See handleCall -

handleCast_ :: Serializable a => (a -> s -> Process (ProcessAction s)) -> Dispatcher s

Version of handleCast that ignores the server state. -

handleCastIf_

Arguments

:: forall s a . Serializable a 
=> Condition s a

predicate that must be satisfied for the handler to run -

-> (a -> s -> Process (ProcessAction s))

a function from the input message to a stateless action, cf continue_ -

-> Dispatcher s 

Version of handleCastIf that ignores the server state. -

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html b/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html deleted file mode 100644 index 5b70f93..0000000 --- a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html +++ /dev/null @@ -1,287 +0,0 @@ -Control.Distributed.Process.Platform.ManagedProcess

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Portabilitynon-portable (requires concurrency)
Stabilityexperimental
MaintainerTim Watson <watson.timothy@gmail.com>
Safe HaskellNone

Control.Distributed.Process.Platform.ManagedProcess

Description

This module provides a high(er) level API for building complex Process - implementations by abstracting out the management of the process' mailbox, - reply/response handling, timeouts, process hiberation, error handling - and shutdown/stop procedures. It is modelled along similar lines to OTP's - gen_server API - http://www.erlang.org/doc/man/gen_server.html. -

API Overview
-

Once started, a generic process will consume messages from its mailbox and - pass them on to user defined handlers based on the types received (mapped - to those accepted by the handlers) and optionally by also evaluating user - supplied predicates to determine which handlers are valid. - Each handler returns a ProcessAction which specifies how we should proceed. - If none of the handlers is able to process a message (because their types are - incompatible) then the process unhandledMessagePolicy will be applied. -

The ProcessAction type defines the ways in which a process can respond - to its inputs, either by continuing to read incoming messages, setting an - optional timeout, sleeping for a while or by stopping. The optional timeout - behaves a little differently to the other process actions. If no messages - are received within the specified time span, the process timeoutHandler - will be called in order to determine the next action. -

Generic processes are defined by the ProcessDefinition type, using record - syntax. The ProcessDefinition fields contain handlers (or lists of them) - for specific tasks. In addtion to the timeoutHandler, a ProcessDefinition - may also define a terminateHandler which is called just before the process - exits. This handler will be called whenever the process is stopping, i.e., - when a callback returns stop as the next action or if an unhandled exit - signal or similar asynchronous exception is thrown in (or to) the process - itself. -

The other handlers are split into two groups: apiHandlers and infoHandlers. - The former contains handlers for the cast and call protocols, whilst the - latter contains handlers that deal with input messages which are not sent - via these API calls (i.e., messages sent using bare send or signals put - into the process mailbox by the node controller, such as - ProcessMonitorNotification and the like). -

The Cast/Call Protocol
-

Deliberate interactions with the process will usually fall into one of two - categories. A cast interaction involves a client sending a message - asynchronously and the server handling this input. No reply is sent to - the client. On the other hand, a call interaction is a kind of rpc - where the client sends a message and waits for a reply. -

The expressions given to apiHandlers have to conform to the cast|call - protocol. The details of this are, however, hidden from the user. A set - of API functions for creating apiHandlers are given instead, which - take expressions (i.e., a function or lambda expression) and create the - appropriate Dispatcher for handling the cast (or call). -

The castcall protocol handlers deal with expected/ inputs. These form - the explicit public API for the process, and will usually be exposed by - providing module level functions that defer to the cast/call API. For - example: -

- add :: ProcessId -> Double -> Double -> Double
- add pid x y = call pid (Add x y)
-
Handling Info Messages
-

An explicit protocol for communicating with the process can be - configured using cast and call, but it is not possible to prevent - other kinds of messages from being sent to the process mailbox. When - any message arrives for which there are no handlers able to process - its content, the UnhandledMessagePolicy will be applied. Sometimes - it is desireable to process incoming messages which aren't part of the - protocol, rather than let the policy deal with them. This is particularly - true when incoming messages are important to the process, but their point - of origin is outside the developer's control. Handling signals such as - ProcessMonitorNotification is a typical example of this: -

 handleInfo_ (\(ProcessMonitorNotification _ _ r) -> say $ show r >> continue_)
-
Handling Process State
-

The ProcessDefinition is parameterised by the type of state it maintains. - A process that has no state will have the type ProcessDefinition () and can - be bootstrapped by evaluating statelessProcess. -

All call/cast handlers come in two flavours, those which take the process - state as an input and those which do not. Handlers that ignore the process - state have to return a function that takes the state and returns the required - action. Versions of the various action generating functions ending in an - underscore are provided to simplify this: -

-   statelessProcess {
-       apiHandlers = [
-         handleCall_   (\(n :: Int) -> return (n * 2))
-       , handleCastIf_ (\(c :: String, _ :: Delay) -> c == "timeout")
-                       (\("timeout", Delay d) -> timeoutAfter_ d)
-       ]
-     , timeoutHandler = \_ _ -> stop $ TerminateOther "timeout"
-   }
-
Handling Errors
-

Error handling appears in several contexts and process definitions can - hook into these with relative ease. Only process failures as a result of - asynchronous exceptions are supported by the API, which provides several - scopes for error handling. -

Catching exceptions inside handler functions is no different to ordinary - exception handling in monadic code. -

-   handleCall (\x y ->
-                catch (hereBeDragons x y)
-                      (\(e :: SmaugTheTerribleException) ->
-                           return (Left (show e))))
-

The caveats mentioned in Control.Distributed.Process.Platform about - exit signal handling obviously apply here as well. -

Structured Exit Signal Handling
-

Because Control.Distributed.Process.ProcessExitException is a ubiquitous - signalling mechanism in Cloud Haskell, it is treated unlike other - asynchronous exceptions. The ProcessDefinition exitHandlers field - accepts a list of handlers that, for a specific exit reason, can decide - how the process should respond. If none of these handlers matches the - type of reason then the process will exit with DiedException why. In - addition, a default exit handler is installed for exit signals where the - reason == Shutdown, because this is an exit signal used explicitly and - extensively throughout the platform. The default behaviour is to gracefully - shut down the process, calling the terminateHandler as usual, before - stopping with TerminateShutdown given as the final outcome. -

Example: How to annoy your supervisor and end up force-killed: -

 handleExit  (\state from (sigExit :: Shutdown) -> continue s)
-

That code is, of course, very silly. Under some circumstances, handling - exit signals is perfectly legitimate. Handling of other forms of - asynchronous exception is not supported by this API. -

If any asynchronous exception goes unhandled, the process will immediately - exit without running the terminateHandler. It is very important to note - that in Cloud Haskell, link failures generate asynchronous exceptions in - the target and these will NOT be caught by the API and will therefore - cause the process to exit without running the termination handler - callback. If your termination handler is set up to do important work - (such as resource cleanup) then you should avoid linking you process - and use monitors instead. -

Synopsis

Starting server processes -

data InitResult s

Return type for and InitHandler expression. -

Constructors

InitOk s Delay 
forall r . Serializable r => InitFail r

denotes failed initialisation and the reason -

type InitHandler a s = a -> Process (InitResult s)

An expression used to initialise a process with its state. -

start :: a -> InitHandler a s -> ProcessDefinition s -> Process (Either (InitResult s) TerminateReason)

Starts a gen-process configured with the supplied process definition, - using an init handler and its initial arguments. This code will run the - Process until completion and return Right TerminateReason *or*, - if initialisation fails, return Left InitResult which will be - InitFail why. -

Client interactions -

shutdown :: ProcessId -> Process ()

Send a signal instructing the process to terminate. The receive loop which - manages the process mailbox will prioritise Shutdown signals higher than - any other incoming messages, but the server might be busy (i.e., still in the - process of excuting a handler) at the time of sending however, so the caller - should not make any assumptions about the timeliness with which the shutdown - signal will be handled. If responsiveness is important, a better approach - might be to send an exit signal with Shutdown as the reason. An exit - signal will interrupt any operation currently underway and force the running - process to clean up and terminate. -

statelessProcess :: ProcessDefinition ()

A basic, stateless process definition, where the unhandled message policy - is set to Terminate, the default timeout handlers does nothing (i.e., the - same as calling continue () and the terminate handler is a no-op. -

statelessInit :: Delay -> InitHandler () ()

A basic, state unaware InitHandler that can be used with - statelessProcess. -

call :: forall a b. (Serializable a, Serializable b) => ProcessId -> a -> Process b

Make a synchronous call - will block until a reply is received. - The calling process will exit with TerminateReason if the calls fails. -

safeCall :: forall a b. (Serializable a, Serializable b) => ProcessId -> a -> Process (Either TerminateReason b)

Safe version of call that returns information about the error - if the operation fails. If an error occurs then the explanation will be - will be stashed away as (TerminateOther String). -

tryCall :: forall s a b. (Addressable s, Serializable a, Serializable b) => s -> a -> Process (Maybe b)

Version of safeCall that returns Nothing if the operation fails. If - you need information about *why* a call has failed then you should use - safeCall or combine catchExit and call instead. -

callAsync :: forall s a b. (Addressable s, Serializable a, Serializable b) => s -> a -> Process (Async b)

Performs a synchronous call to the the given server address, however the - call is made out of band and an async handle is returned immediately. This - can be passed to functions in the Async API in order to obtain the result. -

See Control.Distributed.Process.Platform.Async -

callTimeout :: forall s a b. (Addressable s, Serializable a, Serializable b) => s -> a -> TimeInterval -> Process (Maybe b)

Make a synchronous call, but timeout and return Nothing if the reply - is not received within the specified time interval. -

If the result of the call is a failure (or the call was cancelled) then - the calling process will exit, with the AsyncResult given as the reason. -

cast :: forall a m. (Addressable a, Serializable m) => a -> m -> Process ()

Sends a cast message to the server identified by ServerId. The server - will not send a response. Like Cloud Haskell's send primitive, cast is - fully asynchronous and never fails - therefore casting to a non-existent - (e.g., dead) server process will not generate an error. -

Defining server processes -

data ProcessDefinition s

Stores the functions that determine runtime behaviour in response to - incoming messages and a policy for responding to unhandled messages. -

Constructors

ProcessDefinition 

Fields

apiHandlers :: [Dispatcher s]

functions that handle call/cast messages -

infoHandlers :: [DeferredDispatcher s]

functions that handle non call/cast messages -

exitHandlers :: [ExitSignalDispatcher s]

functions that handle exit signals -

timeoutHandler :: TimeoutHandler s

a function that handles timeouts -

terminateHandler :: TerminateHandler s

a function that is run just before the process exits -

unhandledMessagePolicy :: UnhandledMessagePolicy

how to deal with unhandled messages -

type TerminateHandler s = s -> TerminateReason -> Process ()

An expression used to handle process termination. -

type TimeoutHandler s = s -> Delay -> Process (ProcessAction s)

An expression used to handle process timeouts. -

data ProcessAction s

The action taken by a process after a handler has run and its updated state. - See continue - timeoutAfter - hibernate - stop -

Constructors

ProcessContinue s

continue with (possibly new) state -

ProcessTimeout TimeInterval s

timeout if no messages are received -

ProcessHibernate TimeInterval s

hibernate for delay -

ProcessStop TerminateReason

stop the process, giving TerminateReason -

data ProcessReply s a

Returned from handlers for the synchronous call protocol, encapsulates - the reply data and the action to take after sending the reply. A handler - can return NoReply if they wish to ignore the call. -

type CallHandler a s = s -> a -> Process (ProcessReply s a)

type CastHandler s = s -> Process ()

data UnhandledMessagePolicy

Policy for handling unexpected messages, i.e., messages which are not - sent using the call or cast APIs, and which are not handled by any of the - handleInfo handlers. -

Constructors

Terminate

stop immediately, giving TerminateOther UnhandledInput as the reason -

DeadLetter ProcessId

forward the message to the given recipient -

Drop

dequeue and then drop/ignore the message -

handleCall :: (Serializable a, Serializable b) => (s -> a -> Process (ProcessReply s b)) -> Dispatcher s

handleCallIf

Arguments

:: forall s a b . (Serializable a, Serializable b) 
=> Condition s a

predicate that must be satisfied for the handler to run -

-> (s -> a -> Process (ProcessReply s b))

a reply yielding function over the process state and input message -

-> Dispatcher s 

Constructs a call handler from an ordinary function in the Process - monad. Given a function f :: (s -> a -> Process (ProcessReply s b)), - the expression handleCall f will yield a Dispatcher for inclusion - in a Behaviour specification for the GenProcess. Messages are only - dispatched to the handler if the supplied condition evaluates to True -

handleCallFrom :: forall s a b. (Serializable a, Serializable b) => (s -> Recipient -> a -> Process (ProcessReply s b)) -> Dispatcher s

As handleCall but passes the Recipient to the handler function. - This can be useful if you wish to reply later to the caller by, e.g., - spawning a process to do some work and have it replyTo caller response - out of band. In this case the callback can pass the Recipient to the - worker (or stash it away itself) and return noReply. -

handleCallFromIf

Arguments

:: forall s a b . (Serializable a, Serializable b) 
=> Condition s a

predicate that must be satisfied for the handler to run -

-> (s -> Recipient -> a -> Process (ProcessReply s b))

a reply yielding function over the process state, sender and input message -

-> Dispatcher s 

As handleCallFrom but only runs the handler if the supplied Condition - evaluates to True. -

handleCast :: Serializable a => (s -> a -> Process (ProcessAction s)) -> Dispatcher s

handleCastIf

Arguments

:: forall s a . Serializable a 
=> Condition s a

predicate that must be satisfied for the handler to run -

-> (s -> a -> Process (ProcessAction s))

an action yielding function over the process state and input message -

-> Dispatcher s 

Constructs a cast handler from an ordinary function in the Process - monad. Given a function f :: (s -> a -> Process (ProcessAction s)), - the expression handleCall f will yield a Dispatcher for inclusion - in a Behaviour specification for the GenProcess. -

handleInfo :: forall s a. Serializable a => (s -> a -> Process (ProcessAction s)) -> DeferredDispatcher s

Creates a generic input handler (i.e., for recieved messages that are not - sent using the cast or call APIs) from an ordinary function in the - Process monad. -

handleDispatch :: Serializable a => (s -> a -> Process (ProcessAction s)) -> Dispatcher s

Constructs a handler for both call and cast messages. - handleDispatch = handleDispatchIf (const True) -

handleExit :: forall s a. Serializable a => (s -> ProcessId -> a -> Process (ProcessAction s)) -> ExitSignalDispatcher s

Creates an exit handler scoped to the execution of any and all the - registered call, cast and info handlers for the process. -

Stateless handlers -

action

Arguments

:: forall s a . Serializable a 
=> (a -> s -> Process (ProcessAction s))

a function from the input message to a stateless action, cf continue_ -

-> Dispatcher s 

Constructs an action handler. Like handleDispatch this can handle both - cast and call messages and you won't know which you're dealing with. - This can be useful where certain inputs require a definite action, such as - stopping the server, without concern for the state (e.g., when stopping we - need only decide to stop, as the terminate handler can deal with state - cleanup etc). For example: -

action (MyCriticalErrorSignal -> stop_ TerminateNormal)

handleCall_ :: (Serializable a, Serializable b) => (a -> Process b) -> Dispatcher s

Constructs a call handler from a function in the Process monad. - The handler expression returns the reply, and the action will be - set to continue. -

 handleCall_ = handleCallIf_ (const True)
-

handleCallIf_

Arguments

:: forall s a b . (Serializable a, Serializable b) 
=> Condition s a

predicate that must be satisfied for the handler to run -

-> (a -> Process b)

a function from an input message to a reply -

-> Dispatcher s 

Constructs a call handler from an ordinary function in the Process - monad. This variant ignores the state argument present in handleCall and - handleCallIf and is therefore useful in a stateless server. Messges are - only dispatched to the handler if the supplied condition evaluates to True -

See handleCall -

handleCast_ :: Serializable a => (a -> s -> Process (ProcessAction s)) -> Dispatcher s

Version of handleCast that ignores the server state. -

handleCastIf_

Arguments

:: forall s a . Serializable a 
=> Condition s a

predicate that must be satisfied for the handler to run -

-> (a -> s -> Process (ProcessAction s))

a function from the input message to a stateless action, cf continue_ -

-> Dispatcher s 

Version of handleCastIf that ignores the server state. -

Constructing handler results -

condition :: forall a b. (Serializable a, Serializable b) => (a -> b -> Bool) -> Condition a b

Creates a Conditon from a function that takes a process state a and - an input message b and returns a Bool indicating whether the associated - handler should run. -

state :: forall s m. Serializable m => (s -> Bool) -> Condition s m

Create a Condition from a function that takes a process state a and - returns a Bool indicating whether the associated handler should run. -

input :: forall s m. Serializable m => (m -> Bool) -> Condition s m

Creates a Condition from a function that takes an input message m and - returns a Bool indicating whether the associated handler should run. -

reply :: Serializable r => r -> s -> Process (ProcessReply s r)

Instructs the process to send a reply and continue running. -

replyWith :: Serializable m => m -> ProcessAction s -> Process (ProcessReply s m)

Instructs the process to send a reply and evaluate the ProcessAction. -

noReply :: Serializable r => ProcessAction s -> Process (ProcessReply s r)

Instructs the process to skip sending a reply and evaluate a ProcessAction -

noReply_ :: forall s r. Serializable r => s -> Process (ProcessReply s r)

Continue without giving a reply to the caller - equivalent to continue, - but usable in a callback passed to the handleCall family of functions. -

haltNoReply_ :: TerminateReason -> Process (ProcessReply s TerminateReason)

Halt process execution during a call handler, without paying any attention - to the expected return type. -

continue :: s -> Process (ProcessAction s)

Instructs the process to continue running and receiving messages. -

continue_ :: s -> Process (ProcessAction s)

Version of continue that can be used in handlers that ignore process state. -

timeoutAfter :: TimeInterval -> s -> Process (ProcessAction s)

Instructs the process to wait for incoming messages until TimeInterval - is exceeded. If no messages are handled during this period, the timeout - handler will be called. Note that this alters the process timeout permanently - such that the given TimeInterval will remain in use until changed. -

timeoutAfter_ :: TimeInterval -> s -> Process (ProcessAction s)

Version of timeoutAfter that can be used in handlers that ignore process state. -

 action (\(TimeoutPlease duration) -> timeoutAfter_ duration)
-

hibernate :: TimeInterval -> s -> Process (ProcessAction s)

Instructs the process to hibernate for the given TimeInterval. Note - that no messages will be removed from the mailbox until after hibernation has - ceased. This is equivalent to calling threadDelay. -

hibernate_ :: TimeInterval -> s -> Process (ProcessAction s)

Version of hibernate that can be used in handlers that ignore process state. -

 action (\(HibernatePlease delay) -> hibernate_ delay)
-

stop :: TerminateReason -> Process (ProcessAction s)

Instructs the process to terminate, giving the supplied reason. If a valid - terminateHandler is installed, it will be called with the TerminateReason - returned from this call, along with the process state. -

stop_ :: TerminateReason -> s -> Process (ProcessAction s)

Version of stop that can be used in handlers that ignore process state. -

 action (\ClientError -> stop_ TerminateNormal)
-
\ No newline at end of file diff --git a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Test.html b/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Test.html deleted file mode 100644 index 64c9a72..0000000 --- a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Test.html +++ /dev/null @@ -1,17 +0,0 @@ -Control.Distributed.Process.Platform.Test

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Portabilitynon-portable (requires concurrency)
Stabilityexperimental
MaintainerTim Watson
Safe HaskellNone

Control.Distributed.Process.Platform.Test

Description

This module provides basic building blocks for testing Cloud Haskell programs. -

Documentation

type TestResult a = MVar a

A mutable cell containing a test result. -

noop :: Process ()

Does exactly what it says on the tin, doing so in the Process monad. -

stash :: TestResult a -> a -> Process ()

Stashes a value in our TestResult using putMVar -

data Ping

A simple Ping signal -

Constructors

Ping 

data TestProcessControl

Control signals used to manage test processes -

startTestProcess :: Process () -> Process ProcessId

Starts a test process on the local node. -

runTestProcess :: Process () -> Process ()

Runs a test process around the supplied proc, which is executed - whenever the outer process loop receives a Go signal. -

testProcessGo :: ProcessId -> Process ()

Tell a test process to continue executing -

testProcessStop :: ProcessId -> Process ()

Tell a test process to stop (i.e., terminate) -

testProcessReport :: ProcessId -> Process ()

Tell a test process to send a report (message) - back to the calling process -

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Time.html b/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Time.html deleted file mode 100644 index c8f6c66..0000000 --- a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Time.html +++ /dev/null @@ -1,27 +0,0 @@ -Control.Distributed.Process.Platform.Time

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Portabilitynon-portable (requires concurrency)
Stabilityexperimental
MaintainerTim Watson
Safe HaskellNone

Control.Distributed.Process.Platform.Time

Description

This module provides facilities for working with time delays and timeouts. - The type Timeout and the timeout family of functions provide mechanisms - for working with threadDelay-like behaviour operates on microsecond values. -

The TimeInterval and TimeUnit related functions provide an abstraction - for working with various time intervals and the Delay type provides a - corrolary to timeout that works with these. -

Documentation

milliSeconds :: Int -> TimeInterval

given a number, produces a TimeInterval of milliseconds -

seconds :: Int -> TimeInterval

given a number, produces a TimeInterval of seconds -

minutes :: Int -> TimeInterval

given a number, produces a TimeInterval of minutes -

hours :: Int -> TimeInterval

given a number, produces a TimeInterval of hours -

asTimeout :: TimeInterval -> Int

converts the supplied TimeInterval to milliseconds -

after :: Int -> TimeUnit -> Int

Convenience for making timeouts; e.g., -

 receiveTimeout (after 3 Seconds) [ match (\"ok" -> return ()) ]
-

within :: Int -> TimeUnit -> TimeInterval

Convenience for making TimeInterval; e.g., -

 let ti = within 5 Seconds in .....
-

timeToMs :: TimeUnit -> Int -> Int

converts the supplied TimeUnit to microseconds -

data TimeUnit

Defines the time unit for a Timeout value -

Constructors

Days 
Hours 
Minutes 
Seconds 
Millis 
Micros 

type Timeout = Maybe Int

Represents a timeout in terms of microseconds, where Nothing stands for - infinity and Just 0, no-delay. -

data TimeoutNotification

Send to a process when a timeout expires. -

Constructors

TimeoutNotification Tag 

timeout :: Int -> Tag -> ProcessId -> Process ()

Sends the calling process TimeoutNotification tag after time microseconds -

infiniteWait :: Timeout

Constructs an inifinite Timeout. -

noWait :: Timeout

Constructs a no-wait Timeout -

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Timer.html b/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Timer.html deleted file mode 100644 index 0baf0df..0000000 --- a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Timer.html +++ /dev/null @@ -1,33 +0,0 @@ -Control.Distributed.Process.Platform.Timer

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Portabilitynon-portable (requires concurrency)
Stabilityexperimental
MaintainerTim Watson <watson.timothy@gmail.com>
Safe HaskellNone

Control.Distributed.Process.Platform.Timer

Description

Provides an API for running code or sending messages, either after some - initial delay or periodically, and for cancelling, re-setting and/or - flushing pending timers. -

Documentation

type TimerRef = ProcessId

an opaque reference to a timer -

data Tick

represents a tick event that timers can generate -

Constructors

Tick 

sleep :: TimeInterval -> Process ()

blocks the calling Process for the specified TimeInterval. Note that this - function assumes that a blocking receive is the most efficient approach to - acheiving this, however the runtime semantics (particularly with regards - scheduling) should not differ from threadDelay in practise. -

sendAfter :: Serializable a => TimeInterval -> ProcessId -> a -> Process TimerRef

starts a timer which sends the supplied message to the destination - process after the specified time interval. -

runAfter :: TimeInterval -> Process () -> Process TimerRef

runs the supplied process action(s) after t has elapsed -

exitAfter :: Serializable a => TimeInterval -> ProcessId -> a -> Process TimerRef

calls exit pid reason after t has elapsed -

killAfter :: TimeInterval -> ProcessId -> String -> Process TimerRef

kills the specified process after t has elapsed -

startTimer :: Serializable a => TimeInterval -> ProcessId -> a -> Process TimerRef

starts a timer that repeatedly sends the supplied message to the destination - process each time the specified time interval elapses. To stop messages from - being sent in future, cancelTimer can be called. -

ticker :: TimeInterval -> ProcessId -> Process TimerRef

sets up a timer that sends Tick repeatedly at intervals of t -

periodically :: TimeInterval -> Process () -> Process TimerRef

runs the supplied process action(s) repeatedly at intervals of t -

resetTimer :: TimerRef -> Process ()

resets a running timer. Note: Cancelling a timer does not guarantee that - all its messages are prevented from being delivered to the target process. - Also note that resetting an ongoing timer (started using the startTimer or - periodically functions) will only cause the current elapsed period to time - out, after which the timer will continue running. To stop a long-running - timer permanently, you should use cancelTimer instead. -

cancelTimer :: TimerRef -> Process ()

permanently cancels a timer -

flushTimer :: (Serializable a, Eq a) => TimerRef -> a -> Delay -> Process ()

cancels a running timer and flushes any viable timer messages from the - process' message queue. This function should only be called by the process - expecting to receive the timer's messages! -

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform.html b/static/doc/distributed-process-platform/Control-Distributed-Process-Platform.html deleted file mode 100644 index a6c2220..0000000 --- a/static/doc/distributed-process-platform/Control-Distributed-Process-Platform.html +++ /dev/null @@ -1,54 +0,0 @@ -Control.Distributed.Process.Platform

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Safe HaskellNone

Control.Distributed.Process.Platform

Description

Cloud Haskell Platform
-

It is important not to be too general when catching exceptions in -handler code, because asynchonous exceptions provide cloud haskell with -its process termination mechanism. Two exceptions in particular, signal -the instigators intention to stop a process immediately, these are raised -in response to the kill and exit primitives provided by -the base distributed-process package. -

Exported Types -

class Addressable a where

Provides a unified API for addressing processes -

Methods

sendTo :: Serializable m => a -> m -> Process ()

Send a message to the target asynchronously -

resolve :: a -> Process (Maybe ProcessId)

Resolve the reference to a process id, or Nothing if resolution fails -

data TerminateReason

Provides a reason for process termination. -

Constructors

TerminateNormal

indicates normal exit -

TerminateShutdown

normal response to a Shutdown -

TerminateOther !String

abnormal (error) shutdown -

type Tag = Int

Tags provide uniqueness for messages, so that they can be - matched with their response. -

type TagPool = MVar Tag

Generates unique Tag for messages and response pairs. - Each process that depends, directly or indirectly, on - the call mechanisms in Control.Distributed.Process.Global.Call - should have at most one TagPool on which to draw unique message - tags. -

Utilities and Extended Primitives -

spawnLinkLocal :: Process () -> Process ProcessId

Node local version of spawnLink. - Note that this is just the sequential composition of spawn and link. - (The Unified semantics that underlies Cloud Haskell does not even support - a synchronous link operation) -

spawnMonitorLocal :: Process () -> Process (ProcessId, MonitorRef)

Like spawnLinkLocal, but monitor the spawned process -

linkOnFailure :: ProcessId -> Process ()

CH's link primitive, unlike Erlang's, will trigger when the target - process dies for any reason. This function has semantics like Erlang's: - it will trigger ProcessLinkException only when the target dies abnormally. -

times :: Int -> Process () -> Process ()

Apply the supplied expression n times -

matchCond :: Serializable a => (a -> Maybe (Process b)) -> Match b

An alternative to matchIf that allows both predicate and action - to be expressed in one parameter. -

Call/Tagging support -

newTagPool :: Process TagPool

Create a new per-process source of unique - message identifiers. -

getTag :: TagPool -> Process Tag

Extract a new identifier from a TagPool. -

Registration and Process Lookup -

whereisOrStart :: String -> Process () -> Process ProcessId

Returns the pid of the process that has been registered - under the given name. This refers to a local, per-node registration, - not global registration. If that name is unregistered, a process - is started. This is a handy way to start per-node named servers. -

whereisOrStartRemote :: NodeId -> String -> Closure (Process ()) -> Process (Maybe ProcessId)

A remote equivalent of whereisOrStart. It deals with the - node registry on the given node, and the process, if it needs to be started, - will run on that node. If the node is inaccessible, Nothing will be returned. -

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/distributed-process-platform.haddock b/static/doc/distributed-process-platform/distributed-process-platform.haddock deleted file mode 100644 index c43ccbab9ec1efc546da1c0f0f4b0aeb4e365024..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 134679 zcmeF)2fU}(Ss(cM-%%Y)vXC359vAS8J-RkFV4Gqz8cEjJk}RaL6#@jVu`ysdEbAz|GE9PfAx<}J^$3^U;fmo%|HI9|K!x+zfD-=pLS>A_W9Ejf5XJz zIPo`4{LK@8%f#P0@nJ@LB}|LTc<&BTA!#J_gpUpMieJ@KD2@t-^Kub=ogO#B-s{_`gO z^C$ibCjJX2{!J^de!pnq@0s`u6Myf-U!3^+CjS13e_-O@Jn;`s{H2M1%f!ER;vbs$ zhbR7#iGSO~KRWSWJn@fB{NodUdE(zb@$Z=UCno+EO#GKj{4bpNFP-==oA@uE_+K>f zUor7tIq`cF|IUg3s)>Ks$~$}BJ@H>X@$Z@VubKF-o%pYt`1ely`zHSV6aVDIfBnRN z!^D5%#DCMofAhqDVB)`J;=gs}oul72@!vl2ADsB_nD}2j@xNr^f9b@3=fwZAiT|#N z|L%$Zo{9hE6aT#v|9unx{S*HK6aOn#{^qnlIPt%7;(yh||LTeVH5319C;rz>{0~k1 z4^RBBpZFh{_}?(`KRWThapiAG`< z$0z1+WE3f|l!-@ZoCjLL3`2S?$|I?MrC;q>f z`2TX^|Er1ruP6S$nfU*<{3oo;=G5k8VV%#;W$oP|=d!+Y*qOT*dS}S_?!GZXXG7-o zzcJ)I{@PH-oZBPxRiWy}lOt0(HjO)<&uwnYl}0Bw6nK&}zklspQB?2S$m8WuB}Vus zLNJqpW3j)vA>1SFrG2-hr%rEnZcTF`G(yCy3V;KbH#hH4@aQ@Z#Cvume1y8{J>ktT zZY2vlr+3b!sR3SyK|R=xt61>bax_A747(0UrrI(+LQHGP(E}QOgc@^WFwk#qHw5bG z1mn}wUJNy7=B?8&p5xp#K0WzD()uE@h?=lPjF z9~z-=3sGsA2;fIZzg|&ib8E>kS?`!Rv)S2JM^s?6p7gZ58X-$(F9dm3S-C4vM}`iN z3QMA_MPx_FT5_#6Zq$C)?PY8%C(A!sj**eRIkMY_L6d&-n)J(&p`UFuLi_rjM$&g) zv}zC$UW^drc+oQs6<&l`9&3A4LMqD(yXuw?2kOnvol!kPGi(8DzsWe8F^~iibnQ|o zOYr}@9kNGYb5?3&@dgWzD%98HWfOs9N1cA%JHRz|T#4-zvjH^CN^;FU7+) zJMP~j)I-30y=A|AJtbeAt@f<5qeG{WXC-oqTM{}wNlTFkC1Z%FH-aFn^Cf54yye#WoQ5HFxCl5 zyI^s7bwxUR=sg_%9U;cPY5$h^ys*o^jjN4v>a@#$@A6Mv{;A6iF8_nejV@kGpAx83 zx47Ku!t+xePEXzDa=Xj_c2^jZo90CXCCavtcjv zzLmODDmyaXUDGvM^Yp(hM9<@))|x(EJ&uq$YuG9lHmx6_v!TaAWZo01?ipuw!1>jo z^PyvRN#-tv*g6-2LmL12>7AFSp^6DxNOe-l^az2DH(nmh!mo>3I=ThN%7cdIp7yIk zUlY>jKB9}w!ul7dcRo4YCql%irGA7g?^1l1T5kL(7~CQUHXnZd5C&BYJJ1^A<@n1|R8ljWG07r&F%3B(9+~uJ$ZXQO)j9Ui~ zd2@?%o1NE(9ijU}M4AF)O)Hg+r=^7=o=sPC02W`Z)fTEXs&ItJ6bc!-t(T|ns5vZT ztLLP3)!z{00rV5Ibc70(IejL%%Oo11M!z92&n%PRo3HDIG&VQBBEuI$waSzr>r*Yz z_l2sa^ZM@$^|%L3WXL}nst$0>F^$ZtM7r6aqpp~PrTzD9_U{{;oeLQqAsXb&N|tG1 zb%dJBJ{cRM31YkeSVu?4^q&khu$RMIV-NAdJZg|DKQh*nQY0F5<8+h5!zL1Rk{_XC z_gChc#73)F;5)NCEzrP1XBtyy-t)54KQHa#+ng}C#!+Q-C`Gbcx5ZH+-H zV$HX{aTO}k&rhUZnO3!0YpxjZW9@4j!RDV{GS=N4nll5xxjEX6nhaS}nGqho-iD6L z;F+BlMvC-F)n$IJ38+QP7z^5>hNzKvK_W5Izdz(@+!`oF15*7F(${WB<63Q63kTi2 z=F12zdY7l8yOx(;#r4AA@__hgsB5x6h==5Tah+-#RlT;Y*adb9()z?YwxzGr4z$Q}~%-IrjDMfmf z@+T%xFIfA>*`bpi`nX)k`o>3ye&cu;_a7I2=HSh+7oT40un_$xj`QmDo%Lx;Nqd~oL;byu1Z0%iM1 zt9!)B^!UOT0(XQXH~g2T8KF6tO*+fem}9y9vG%ph&B-CdOKX_v`Vrpvu^#W=d0)Nn zTs6=v{bOxgaz$I~EzfzkoZ0cDVI}Nv^n|~bJ+vLxp6T_0Z}<@^8hqEWC&ZAE9U-wH zyBktLKbs1AaoP)^5uy)9Yk5XjJvMel6{X+j4l>r#-&~B97m@vq@jvZ9l=s#M2m?*n z2u~5PKpUT(JW#wGegATOP}+Gz#71b2f%sZV-JB71H9|GrxXfma@oO){XR;$?Z6P#{ z`qt``wM$0G_~Lik<7`!*`Qm7d<;~b`=!~2G9ijaf-gn}>wmJ3BT>fX5f9~=xT>clA zyIh{<@_ZL3-Kqc8<$rT=XFT;H7mwPfr2o{GFTM{41BAb@{(t{vVhB z*X93P^nmr;5Ku6I1Q9^+fCCLMuz&&y3?M)N0Q<}{_p_%qi`*Tj)1j1dM*i6*muf%t zFZ8Pa1aE0!f+;3{#yYmH-O~vBce~3|-;}VAKI!z%YtlR#`lgWde``otG?m{ReuVbc z)=Bteh>TLvN;&xvx)7r8QfP$cGS;VuEMn7IHV|EOw;aDAd~w;JhRAO5-A6jVOI;qU0#K5P}{xiX(iH)NAY5m}TiS->SfW`zkYI zwDt;IdE(e)?8z%$&ykqx3Cd3k$`__>d{`l;RL z))=oY_KX#Sxt5Z-5hBY@HvxIrtU0rMTtC9GtR;P|2N?N!$QsnfSYWMDpqn+3Fh}-W z$Y%NUY?d3-J`^%v&-WjrPr3d~pBS?G^hYBo@Rf(ye-gstz3TAG`2*kx)#0P! zXI`8^Xk3lih;z=E06`6o5Od^>bCaz3-OvbK4uMJUl7+N%HlXqdrunWN;jO7oeQVjd z+RnfJ1f5>Bdy6l@@;Ep`00D--?wl=$-_Fh}*7bD^xCZV+$tg@nB}>3;LJ4Tn8nD(I z`;ueByd)du4QU?>2`+ynMpEf$!i>+grjW#^8%?P#T3|GwfHsq@8AyAWAFoo3m-OvbK4sF+XAqY!t&EXV| z5u#5kJq?jD{qvy_67K4_J3Jg@>&FORd&SNczl_s|pZ6Hu3C)dZMyM)@d49YpEmegj z0}HIf4~MSTVxx5_s!|gXHZP%@j{`F8v(Xwk%KpgwwbaFSG}*M4&PJ+r&Nt(Hay+~= z$HR-#UI>j)BQ7(rH4f(J>cb|#>Ew?BG6x6w5wdovfaPKNCHr48W7OoOPz77yRRkQF zp2y?_)L41OQrM5I!|YzqUTOTTv37)J)%x9Ija%imZOxTuuCd?NcTz=GhnRNP;OQ74 zdby0~Z-~tEVT5{o;?0}e3vJgjTW5COmg@*L$TEnGS<(Hce^iucp3#wMB6q`&kR;a| zvXaPy5!#NTF&@`5JQ;S}P_}O>=6MV=oxB>MM?$dFUiHZ42p#3`9JyA%_47@#k>{IT z^Rn!kyV70=!8t;EA$Y7MPgi{~21~1Hgf|B3{xfS7zCtHtqDKhcSd}wi5l0#<4$MxE z^jmHP&bT1bL#Oc((g(6-t51%MHT0T4v-7fCN65?qJMA1g8jpJoUlb!UO$ol*iw0^4 zgihPrLlGO_4V@8tA1s~njgV8m)OX7F>`r-O;*@VcFHldtB+PRDY;MHeS?E(ZmMY>q3B)KSn{${o8_&=PufXmUhqtSiAw)YpV|n& zL^Ncc5rj)$ScGUVBqiMyBmCKrc_E(9?vAyHB)z)vt^UffZQR-U5&nw*9WnXx)5}|P z_l4z&bESo(UoS*qK4mo^8g>8>6q0)E7zZ<@j=Up&R3 zWtXjDTI}cjIQ!{X44a*oWO{^{E_C8lf+90j$^G6jj9Vide(;#)9d9fv|<5&h7!wG?Jv4%`<0@;Wa1 z<}QWcqfeimgPK2h8EuvaBht_&$g&} z(K^Pw}(*}0Hrgj!OCL1m$C2X8iEt)mz`u;+k| z^j{a!J{|&LPI629T*gP}OV_sU(nVgvM~Iy-3%NVNARPD+x;xZ2TI-ATLZL5YBSZ&# zwfW`YrRuXcslpe9xfuGeN8@<13NWhQ3)7;7sp_%B>r$AU33;)JiS zpA&wBPKv$xd5zN8Ch_^)gvUdG@*&^asI9$>jgY>W%b{B~{Nk_NMyMyS5X(Q=)|_%3 zp+(n+v~g)yjkG}mv1lSZOUG3sZ(skc@b%OQ{D?2XR~ z&mbM4IcN5*e$T3U^m*~;os>2%&-ka@J1TPoE!8J~1-$;c5*J=;$dhd$gDFo-WW7)lm;Bi`UBm`&ow{US`b~ z#O@2yUa#G;+T+{ZA*U=nOH|S>4_>D}Yx&GpM!zt2ysmmY1PiRayN&StnzJKz^v!3I zHNC0NrzBZA#S3`=bEDeMTq}sJM?(5|m>aX&?Kl~BN5~jV`Sj=JG-K^P<)$9s@?saO#aNJ~`Rk>Egqa&0Q|f%Vl|RI`suO_u9?#U0$%f zdWQEiLeK&WE2xkH3LS8}A*g_Y2_%RBf(IOEfPn=Rj|Pl>p>jG(XjWnYg8(rO1Ta7W z2P80n00cw~+??iOfxJEwn2Tp#t0v{9pE$Lr&`XlLm zedx*1yMyl3o3aTXO#7IDP&ZLGkCw2?T{hCne@$9*BlJxxZ~j|CHYoWKV(hC|elIOu zBZMdKHH|T~PbXaksHLxVtszIZ4O%(b8X*S`9pcSiXcDc@;V@;uqkiNESg+WNHEoSS?8}AmQ z4{`O)r+3`bwq0ey5^$XebFJALk)@;SR1H@iJiX&~ysds)bgVMm z!C!BIonO}pfyja($}P@jP*xi*G9x4ds)KDVZ*OCvw#L!ls%Y3-dt?xb(okj+{hxU^XUQL+pw&_-D zE%ydTR>I~j+h{U}AEBe%1>f8=>0&WAq{T41yBnKlUYLt_>-K-J@gCd-mwR0e- zzsmzIZ+3am<&w)=T;A&PP-sKl=0z@EWNu#U;`#a1!?;ggzAn$)7lrN#vBeZi46(xu zD~zzg1Pd`hKlQXzzUY4V)Moh&fH&P3A&MxZ_?{3$_uZ5WO|;WCTiC}QiDPs_z5cu5 z+5Z{YXT+7uA$8y&7@;RZdm;6w7P}orcrSn)P)E(HA8V@1I(>V>8GKL3!y-G@le0gp zv;D}Md(%^2M5v%gybt`EFc(7h*9e`o-@ZEYblPJh#AeswpQ&SlZn}CVhz~pO4)xG} zXLvE#e|zGKulWA5n2yke(Dh>CUvCh-X*37s;GlDa_}ueGAMZxdn8-(YvLj?IKkD0T z!}wU|jI$??#;rr=LnA~V-QtmtxGtm_p$`Q~ zQb&$^O0IT|@j)2LjgU3us((8T#*HNmuSqjPbM%9Oz?hje!k5n`G<8*_9_#4BFig-P-+Xe#g$~@aNVsKLT`b_yM76pSG-&LsI%|(h+PcTBg-X}N98_**|^=mrp8#*qq%-RU`WB|L!%t2%gqE6dW zt#ci>uHN8{3xzejU;{zE8ycY}Le}8Bb~_U0=&OgtNB{QeRb9$pwrTvE4*qb%i;Bkg zq~b~73Y3Cx4>1aW<>x~qbQHLb6_7pCed$H*0x0bW(OEd=^cyU4-Nz%mW8GLClmu&Q zhtmRn9p!Qt@laJR%hPY}nlmzvi<+8EgP3ULrNE-<;8aO}I}*HOqLI`>&CCcD#vFme z)V(GWjnzr1JHKk9eY;3lgKC6M(qW!sba1+MXO9}Jd2B>>-o^sO8!~4Oexm4XdNVqx zXtO=eg+|DHtrm%XW5kMa=(M{b#r?Q@l~db1UYQ2umL4@A=~Hd#eWAvs^ZIH| zq-WFQ9}QIpxJ-eMI!`y3tyL}TS;H+d7eeF@4&5tmk(WjL#!NpH0;L;#KYEYvKQ@d^ z%oz3AXjov^e=@|@vUroPuZhzL(Y%y$?gXD2UIB@DgTsjtlB1^p&e*xo2>EOa;g6CN zFrWZYRP5s4bLjNPQuf}Suc3Kw(XD7N#9ehTLUS$Xn-_}mSJXttg}${$PHK8gjwSI0eH69#6`JLeJVKX43ewmJRXr7Ct>MZDIZJv^ zV2Yd8j+Ng_+u}5jCsVszs2!VzJh{%-_E_f@xA&jN@{>8w&*)sjG}j`YW}1-k?F5|5 z*a)$>2I$Up$kKvkqCgw-W1m!(XZJT9eA$CZr+;_vZum2s=j^;Zt-#gc2DAq($Y2SI zxl5t9g+_=Dt+nRhvunvW1a+J35$c+4n~!Jia_HfYrQQ9+$B*V`?^Dq)NF|<5MP3Nm zl=}}uPwc!NAKHEv>V49VUF6dqRCV|fvTN;*9w&B*onx2SA?HIQblg$a-Q^{$B6uv*T2(?W@^qF6S_CXN2Z4{iAM!##<-k;nb(&AY|c-!(8QIimbavyFX5!Qx@En`A3LS*=2UL5v9!nF9D)*RVo z{r=5&v;3|4$YAqeAtQ3_mO3r%gs-v!qjL~0g`i&922#(Gd$H*96^px)@k+Aq+*YNBrSE_1voqs&73O5cO z=+@Lt(3A?APu9xi{iXfqHiBX@pEiwAAA-I#^W;3WHb45W-Ri$?IW4~Y@V5&HaeOI~ z9B+4pKO4Fb+BSM$#u{8+jnG$w&aHgG(D_;W=ThSJ{B^b|w35XUn#c5;ik600kZrrX zF&dONWsH@lxVL}!j!d~hS=JC6ql)?M_h#ptQz3wM+<``Zxt}b%=t~Y?wP*Yn=5)9* z?L#5<4^M#`()*JAUqWk&C}M=M5#kfTBH!{;JH~;>bD%OK{B{hjo5wgrj*jh&Dr@u2 zsumt{XO=$<9pQ|&l1bCV0{6PoiV><6e7&g5_I$^AWDc5nJ#WdJbeQjE-urQ1ny}lM z*YZd(M`vVBcO~CoHa|jao5LGZQpUXVdrR62A@cLad_4Wjp@)|bvdoT<)yvAXwbr^O z!}EIbWhWNUmo5)J$JGxzbTy(p!m}Z->W0h+$zJi{?Jbz#>^i4HaeaAkJvaUB2v-Nc zx^{$D+xo@y;f>laIx%Wra!||LI}SSD_XMvukZb*Ktp_&0#m{H!+sI#;z&;cwey#&Mf3YY`-RnAma&OrS;YUaS^^3zjRuD$0_Znx%EeKGE_(B>sLkzPt|c@yK*J9Itgo%#hX zU*hr$UA)X(egfgtm*aksi&ucBzS2c5mdC!0?*VO~SaP#@>GIcrAHp}Fmo31fw}l=J z!4G~*{o_BRiE`KLJx%A9C|QR!yLF6 zx;JzobWiAuLLMpX(SibS+fac52^=uM0HJ0aPCC1xydU&NM>_r48+8^ zb?AI(gsh=kUgGA3G$Uk*<(ABjgFl;f!?uQcy0jy-op$K}g=|v?*7DX_+HHBV z>^~EwQ9sz_Tf*>~G$S-uJGWZ7bCf}81g)V$C6TF1=9|U3WL`B3i#cH)Yuikz0yfDO z=Gm1egT~tKrlniEU%~HNp3=^(V`pqjH;D&BGw3>10~`?V5h_RkJ8w+fTOT95KvgGa z=^!^k=30hyRF5$Z%tufi53VvX!dzY{wt^C#BHAC~bUixJURFv$3X*Ycr(EkguKX^a+DhH`Zi*+mbywGR`eyBQ%#qL)qNX zWsTOHmfiB^diu>_-Nw|kUXJ`+vI(1q-Vp+edYP-a*;L_=b-rQg8s_Jac9(TGf+VbT z&SAG^`)sWH5QynAxoRWlVlOb;}JSXjA zA*vf~OBJ-SM?+Qrfy0eV)UYTwVnwKBZG@<5gsFoT)m#gp=2}!EN9ApiE9w@kXf#4( zVSvT>`wpMVazDEvBaeqzLhGa3;`o`o@_5<)pStsp^7>2>qsx-C4WaSvG>nYRsVy+; z0W;6}lQw??RD0I4Wu3QuMySpk+uqS}<k-|Df8=pKJyu5OB(UbKoZrv1&iWBL3RTDIm7oY9Z*0*8N!k(WcE+32pAkDE)_~m> zc7*N?QBen*x0>6_oFLk)x2C-i0(m}FE~ejL=Fhoak?sgp<+42erX3>7N2vET{A{|1 z+0(hL(@$UR&k;Dx*&UI%mYkhmwY55?XQZZPFCs0XtEHb~REclRSC6pW*_4Tyt8gwB%3BZRW=+XAR;`pAz&N0z8;wZPl4EHfnpUV@jJI{gcu_-yEv?Mkl-T zmZ6ib?g?${KcRY6x+7$%41oq##aoodXL#LC#;olNgQ{n8gy?E0m<6@j=R!kpR5h?U zR+&Xd9+uqx*|g4+G+wH@&GF`XU@dcppGwsYkgJ|*(6~?C2$@fG9$mXvt3yX6tE{0B z5+S^ZPz})t)#Ib0U=A$NSdgbZ9~z-=41qbz7>A7wZXolu-D0{{x34v;{As4)TWX#4zr5R7}8R4?!BW*pgz$lb#&~8JoVp{_P!9A5qf0h^WtL@_RR8=geHI}_<}YL zgz@IMG7VaDVf_eot#t)M*81j8{=1W^pQF`b$69M0r;ZAQ0GVTtbJljffr1o!;Ly+3<52K!vkFN7pvt+sVzQd`ZNJKU4Xji+fCSq3%h;?A9#f zIq#OtUsz@4@IIcg%b|yNJ|Wi;>bB=hE2zQhR%eL?<9i{pyCEOq-VH$VVylTt{5uU`N`L9khLd56S2gbNs2tp1jw`bSSrY~edONVwF-nBp(Syy{?rL|p~WM=mi z%?%m50{h_~0vCG4L=n_Q-Dc;`up`v7x|)rZr`eJwi^`r13En2%)Y%MYI_>P6RF zt?X}&686}#Mx@!KN5q%8!)LReh(sz%M89t+jqt|6#S0|$)Tssh2pxBVGhGOttIma* zR66ON_fA7t-3iT&5gA*e`|n1*>F_<3uA37K8~f^WgoKaJ_#SREHrah~<}QTTpZC!N z>F zQPp~Awb41JwYBDOupYkJv4QA^RcjpPgz>gCBh)=<3VdVKw=WD!X!F>5V|ePXNZS5? zs5&ytLQ$}2ss0?v8C@gV`l!jujCH+!Ys?xEKnKvmiw^tSbF=M{dA5p+Ue&;^wIW$8 zbCeHG{+o}Ez|M=ZaDgZgwz*mhCSIfLbt zzVXH9oM1PEe|#M`_pJRHl&=%*8{7(znLA8xma(su9w+ zz3_AyuYNZxW4&q7Q(63?V|faF)8VH4Sh_d7|Kq32v-fm)GL`6_{zOQ%s&qSGgytKK z{y{ab{1rExjM+_Y@7+1wjol6U9`5ZHa`YXw+l-MNq3*Kdp1#PCr4L_yePk~N<2^Ml zh3_da`@6k%#@$opeAebs`ld7^WJwQ^!;SvU%YPWSJ+IH@?XVfA!yFwB5{F$6Ef9@c z$DVPr=U(`CP47pZirp7vtiGDB>qS5L26u#~r-8sXPu@klGyNXd09((AM?=;avj)a{L+rDoPwrg@ zZv)viq)E`Itp|OFZ8u{_|4rnLugcWLP@T1uI(^PrdS9pksA`etto{NhEm;n9y<4`$G)=B#=x=W$i6V2-F}c)GWWMp_ zus^GQ`r(`M6#tg^WCPe;Plj5wihqPUUS1&|yyFmFEL-r_i{pF>=-Z=WPmaEBqUJ?x zN9Z^lJ!Q7nS<{V)r~7*GSr7ME+v4K`yl}&J%=z)kOo3fjKx{aUJ3q`b`*hC`XUrTD z2RQrmDq(Ohgsuq1;pg6efv>p>`@`pA0c5o@|y%ZFIgo7*D5On*ty}jvkUaM zcgnUrtE_a#+BtUU2pOjvPj3FI#>ZCjJ~7^jz~(b@S$y4mrtT|TUS>>8mM7cgtlFGS zZ*$I=fS&p~7Y}XADYaDp61n9dT5g%kmnWC(EU8*@lSI8VOZ+&19|O22bRl$a$X>oL zbbsi9(3?XKhAxHN<=+}|=sz3+TzWwC?Cmgx3MiNyoc5zsO0PS(uW~Atmd&>SLMfw+ zFwD?wyoRp!YX$`Y&=p3~FEF5HFa`MDQ_s)LyKY!>5x-#Y%0Bn8MtCHe1zyB6N-lll2PKYD3GT;}%v=(w;ggBOMn>TIWc{o|qDDxh1&nWG>6LVJ zE;K^cv&&xmy)DfMeYD2;+ueAdm6dd1jqKagNP-wLt5#cF zU@uBDLMPQ`LsR%wL#0jMyAC~_v*v^QZ&AKtv;2Pn1xShm1>`B~w)6$rDsqCx5dpw| z$J*!65|qQup8C5Xqg}m(V{>OlZaSUIori+yeek-g^)*>FLLW`X0z3_-aG!MUCc~SE z92~#*=!Blm@oU}F*)5VLnXU9z=g-A1N<3P{&-7*zo}U-H=sT`^7uk-&qY8Sq>0fIV zAC&KNv&h1yQ@%;P$85`ws$$s8DT`S>ybcw|&Io-pRWm%>W$ajm8Qz}#FfRvV+Qyu} z-HBa87dczx%+5=59U+o))+)Viq1NVGfb~U49@+QwUCU$9Zsd-B&+y=g|AZ@!_+P9z z@5-u&LbbI05Z6yM#KNgWu3n9m_Y!Kq?!M#Dz{xk8E0O$?2m|>Q2T;bn$#y?+!zZkNg z$qAiW7@7w~klpYjbU9Su z`gB?cm{zcAhmECR*SW6CnfI~`jgV#FSh`*B`c(x6zY85iV}(j~iA!3#YHEaUehN>Y z?7{l|FLP~{|Lw7UUW&j`*hj`s0=*q;hFTQyJHNcR_+39*w@0Pm{3?5#KVs9ZNoIu9 zk66%$M(E(2UHtr_{r?K2%3*79;TsqWkIP@@W?*#0zem!Tu7A|EM2NuKeAjeytiP?Y z{*aNc-OvbK4hi*KBhp1bo3&xSZjJEjMW3$PC12Ykym*P7F?!~`xtBhjaB8jX9fN&q z8qV35v5wd7oHsrtbT_6U_V!TS-9I9~`|wXr4)ia%{GR(#y`$jAr*$uR1M zIm*cchxQ-OU%m6ZERcbdye?FgOpdWeNA%1u9;W?eIsLf#P>8YK0r%bf&g0(?7gFdc z=9NA}lrM`vs=jo7#gA0-cZ41faRIIVUU6smuEgfS%smmB<2};fuB^t{99|eCT}Z^- z&JZF+V64Tkglp})>BB;fj^?au#yX~trx$a6lVMLB%;~qB%;hgxL%CzaRP%pX_f!mmW6W`vXt-gxcKB_{RcTVCCcEsyG2 zzZ>{0{y#+He8~t04H3BYBg?OTwGe zJ`$Rbk=9Or81xomAwx%=7JUqV;VO77&uVJ2MYb zw@s*V^6XAOF$tcj5$Ykbo%_PyUF9yNZC1CRKSd49Q#DBPw5EJBrQwqTT1@-fDzixorN5?XsqqQ;`Bk7OQBI<+#2(Yq{FFqD`kA0Q!UCS5xE$$-9xgl0y zG9r;~u)jP9+(T)f2${2{Vd5fy1!KPP$J6_4Jm4+`cZ+xSx2bk|h*dX*7_@xs)8z1` zG$Uk5D`^SMlAG`Qi2nh}_j|yrwf7Vz8>K@*22^WnBmG>n>f5Q%?MfUKv{p>AUaosm}rEi=a@o9`4Gb2BA}f*R*c_qSmq`67V%-~2CTjMI!d+GMtlfqR&h>^;sOrYNug8|H`3z^n zn)zu_I%?m1_Y|4)p%L1~TN(1^q!I?c>u;yim{4Y)y)n%QHB3iUS)W>(E%L1v>y{c$ zTe-p`SH3cJ?Rsv{^n%M~->Snm9Ncb|aFxvw!khD>fd2=y4hnoyoHPST=f8hecM%ULZ zE*t{7Xa6~2H-jVeK&bJJbC0aU?#Sar}0p@_)U8q3EaB+o1JJU(f^&mPfP&+E-t*SGlT-VOO|`R=rrLVOt^ z+2a#?Bh*a~3%`uPL#Dc8Rxg^ zLiN>H*EH7oD%PDD9-*_Lxp&>{)|C`LwhLr$#k_ei00F~k82AS0~iMyTWEDU_SaJ|p>w zK7g+&H<4Q$xboQ;3@X4Ip_W$z%8jR#MaJeea~aKBbuJgWZRZ|Z?6DynV%xYc$x8nq zd}|4ko&P>zq)&%+*3e_k`>w1X?u^h}OZr5wCnINe`i|{!Cv#6S^9`z+XyL01PO!}3 z1{|{EP&Sx7$H;Y4ak#;1Fa?}`b4?ly=07iVW^?Q5(>wkaVT3M)dKzuV=QuB|V*w)j zZT6_CWf4+nltw*ggFNrgheoJ&=W>F{9_9sI^@L7iBP6PDG>XO>MK}|Ow9>2rX@sukDj6_Av4*W4Q~GqxuG2@p32&I@+vCXm_1qYRsMv+6 z`su;LgUIw$wq`$Ehd<`}OvG!(Azdw@5x((gKD5`g<0EJN(Z~M&k3crAY~aQH{?LK` z!;fyA?LLsY6cVlx>T+QzJn*G;gwBRu7djVe%IXf6ts5coAFj%#|4Tw}u`xpIadA$DKgU52dlYzZH+TGP-PHe12)yZcvBjjviV=pAlT5HI3-B|lDnm0Gr z*4TnAtvSBHKu1$ZFMGy{t+A%7>-vN{=c0KaV>^jW?ah8z(;EyME!i^%-~AyyqiwYm zDXp|4*2vRZ1iQz@!ssAV#PT7QC_E z=EwnV%=-4}pvzouQ|1nKT=)^H&4!qbVl>Zov6?gHrqa~IPcJ>_d}xG<*<79}pSup% zHC0`B+U*#Rj4``Ze?A1b&Zl50qJnCu@LDSSRMZIWWsLa|>c_#|JkT2_Ba+={`XfZA z2(3g`Hpm*Ohr*6fBT~2>YcjfL3#%q8S4jF`Q2&h~@bI9s(S$V| zLROc?%7e&`e(e}*IOr)ZdYe(U$ki7z^pF{AOSa*mn|#AI>ph!s&w%GbBSem#6qnn@ z2w6^DUE^jU5_imi(7SA1x2DG|lXK|kYB;Lzs0`0gj|=6tZhGN@yV!-N`}Np;wtu6@ zU)*>%XoRY{iKexlHzMR30Uoq)62P~mYuXY{0%lqJ&DZSYn=^kgG(xk(9BBf~fx?u) z7bY9c@wEoHwKKAIw%6nspY>PHI<~sSYIhEcv4#Xzb8{kQS^muTt1}1xiRv63sEXKl zHL}9{-O$nBc)uZkiTlcfch7&oq7fo@t+%6-|V16y8qlUjqAHzsi%8FZXM?KLfzTc^*l0n z#9xcwkwfSF>7AFal2$*t@^VAMQ49ovQh!Vl&QzCCSq^}bC1 zwrlz$wCG%q*@jFK-n(NKS=}d*?Z8MW4Rf%zk<}8dWE-6ZUcV8jE`1*Ts(9qIcxOm* zMPfmhwtCry!~q6>%V@0qZCiC07Ff;KwmE(^)D69Hb}I*?^tAZt zF-BgvMu@EOgiLjf&@3{;-FC~;IqK)v^~R`l>Aa~)|s9DCk}cICWItvX^z&pjE@lc zMyGP!BfJtdC`QOQ9ZfX-ji50+c~KdmqAzl&>9{XsZN0D8#Wyn70ITqe+H$s(9~ z#*XlGie33xKOJ;gS07-aqkU_Qo!L2`>j*W1W<;J;1!XT2?6z5TRt`_ZU@qMnUM(^xu_gvZ$Ixy47 zeM*8qWbnT(;JtkC5)zmzz^8vyx~`>st|ev9yf;4-ZypY6p@&>3g%#g)8XM~vk6Yl| z8-|>9Bh=K?^Uk#lM6JhO%R|viZS5Wv$*UE9vC)Cp{jeM}hhL1jTH;!vs@+Z*A+F7# z?AS-@++hzcX%HelG!5F_&lxN5uGAGLXU)?C{myMkI=;s z{q%_xkDI-1{p3dIC_BcTHEhsP?DW>|2oEbC=z_1di_6*((pL@U@zcv4LxniPgGr<& z>%KlB-y^Fs#v1YwzJtllBF_nx)g0NdfRrr|VKy|r7NYzWZ^;pa`DU##`P z*a#J~wM|`p-pyD-=B>67x)2gF(=|0`=D4Vj$QQ1)?t)3@2(f_&8NV&t^-UEU zBh-Cqo<8Gvb{j6$1Itl54??$ma{WV3{a#mMvdP;0Gwp}>52KNR9)Lg}bzm@fE;K@Y zEj8C*9X8h6Fg=_o;O!u{PmT;{8*Df@K0@T^rAyA6hz6vGmbHQcOXuj&A0fQ?20gKO zOS&UO1Oy`ILnFkTuowe@`Vl(r#SOWG_=n#%H?Id{4xbP-gk2{DFf~lp>l;UOXw}$m zXoN0@>aH-+r7uJy#3wD=wP*d^p_B9{R4 zA&`ZzW8|AjGR=H%$!@75)KJuAHrcmUXvqD@>77qWb3HIefPxG(z+^`iRVbJdHP8KIL5o1fo(765?M!3bRrG0xc;B)KNJ z4tI-;Ro1$rYO`{J)`0e|Xl;{kzJYC`$aT#e80(IWNahl_PftUPdtTbMwwbF7T#}Qq zj)967XzLni;ph!|gx`izeGUCLtx4IrEy9gsCzq^cd@nQ?#7ok*DSkAY;*-;o1A_AE zkOz*8koDT97@x-FgdCX>s!ooJ@uGLR9sb~EIROaO`$nt^fU#&!Waq5wLBkkXdPI$k zHFZLiRsPoVZeFHr&S!XYw~LBA^($O{(B)T#Hm`Phjf*c;Y+jqosbA&tt6hGL%dd6$ zbuK^T^208_-sMMJeuK-8hBmKr`Rr47q+x^&CTOFKE~;pvh#qQaq2x!ebs=L{E`DAN zpWn%4H^kL_CpY1OVD&LQK0XsJMz4R*cUdz1NEj)2GW2f$z1VL|!QG$s@sQk^lsh^6 z=FkWkS4_r6Nd8Vb6t>sxMV^ikQbY9UYuS2#sP$7D#%qJDd2*faAp#fuJ*DBd&iWCm zUi|Er`fecLtzNc_pV@gJ*Ac1)=Yg3SZO@M)qX|JYZ||{=344z>GRBLe>22`LWYGpOY?xBlL8kFjKR( zTDJ|IU$^$n7kUpN_PR~ygM@4C+vaqVQys?Xs=e(D7#HQ=zW*%;sx0*yYlJ}1$M;O- zzJfE>m!jmXHCA_yi}!TNAnvl9EdyQ+1Un3=Q4WacgA#Q7N2LGC#A|X3If% z>=b%tUlXp4;k*N?vW_#sB`z$RZTWunI6+!&1YkEr>+ejxz94P)$~T6W2;kd}@CDsK z%|KZvW3F5I8+vzoUIt~frzLOa0+|(!%$-zx0RQ;uomZuqj}}RTkJ_W~jf|h9);t)X zl^5-$;L?K5g}&OUzjZ^+nYSmk){&tTnZZlxu^yW*?gxb22_TYBT^ozDo{@PH$IOm0{BQE9EZy78ev z6*pBVrl5V^GYhRQiUQX_Zsf+wb4L`lb&32ow9n7MF*07AA8OdD=en@v2Q*Fx!$HX% zeED!YDJRuQ0)6WD#~ZI<9t-ir`tBv3*Sitk{!`14(mO&Y9VH@6X8ZAK8zVW1XHRzM ztQ}+Q!L;aG-XhP7?oZ6X2+iT6xM9?)sBUDA)PS&AMQPH9u z;cb$t(fVnvE3&lNdA;G#wo2k$H*Oz65A6sU(=Si+M9zM;Uv9lrB1GCVWQBh*h&CeBP={Oe}A@X#Atu+tQPbCk}PupynQ|DZxXM~K= z&34_rA|Dy2m;ADEmmBI&96Uf+)30MRWHbJyG9t4Zg6WT(-nlzX(@QOp!fpmYU{(mY z&&*Rt`NpLe&EU~GjE`}U_Cuud**ZUl<7AfHvUv<YyNpZdKnzt83Ohc*wnJgobZ zE`Pw~54!vzmp|=KYD88 zTNIlw*vW+>9QvY7F874?Q+jjcPW0(?1Z42XLve0%ul0*pv>v#nbKuvF>2Qd1N!~)z z$+xeUKQG(wA-wC0>kPlmoKbkxQ* z_BElx8LOQBdqeLI_3)_PlkSFNm8Z8l>EPDJ*x?P|G-$lbbHH6`MySJhP7`5~C$x?b zPm8R_(tFmiT6*WKoq3yCU> z=F2lQw#M}7VI0XM-E>wzJxiDle!#OiYXILD2~xE{6B*jLXnuCTb+MaqrTNEGqPM0! zu0%)Xpjkd`-Mryru>haLz0h@3$4(B@Wlam<=dw6pf2PagPh8i@bwduol*Zj_P(IRU zlf57aO~%tXHQs;h`SkJfIJ}$m)cBL9ulPMjhmPaU4syWsrel1B=A%&mYBv({4luHG zIN*BV8QTkKyJP8d(0wp8Lgr_Ac<3`mcMAnpx<{zk$)L(O+IK?^%RiM}@tJ8ag~)tV z`Tf*)yY2|Y5#r*vhJ;0!xQ!Qzx_?EAmZzDYJpBF6$o=WKE({+j*FA$sfz*!Bb{Wj+ zG0z|EktOle(A>6)UrT5f)oVyLS*H^IOu~_8+~u};waW5P0B%FvTeJQMsW9_WbX{&m zo{sL6>TgO_J?v7Bj0?{Q^{6)1@A(u%*O*h~#d%`qL#U&mK=D)u4!t_VTEs72g zlyJb+(?dMXkG184IKV(B`R1H`+d@tJ+0?{a(>@rYo6g4JY9}6y-yYJk(-_XSM#lMR zP97V-K76t{LfbJkcGB~K6q31Goi^^`bd+r|Y7AlFJDTfg_UFvp#09aqJT4C{?f zwKnVB^u`+i3A*OSgWR(R*h%Zr3JMWP0p3ZMa_L0c54$*>e ze`zaC0*a*#0Cd0N$pkHe}EDq^}?*(Z_2poYG z25?5G(5S%AV}&c>;5ZCx!dpYPrGI&yE7VKr7Xn#fu#W!s?Yu145jrYSB(11JEYdVW zGtUE_6rFx8lH>RY)k~Jm^THbCxEpc={RO`pd;Mqhpa1!BMnC7^_Zq}dJXMGo+a%4+ z+3;T$(r#D7!B)=PqanGG1N!D1&c}?oZ9C^OHbQrYyhgxRrj<9_x{!HCBpLS9)>ni^ z=vr*4jlY=EcyZcGA-=*(A6?x=`o>2Hf4!R=X2;;I>sqnxfyEBHd!faT!#82K?_Ara zkKV91L{aWQj$HLWLXU)CSoEiz&f4m%`H^XG7RYHEcJ3o`-C_;NQQN%kjmXbip57iX zQe>W=WX!QQLdDhXZ=7wRQ(p9wcPAa8hjiJ_p8XPVHX^sKy-pCHO_2@T|?OLlZWlT5WChH``{ICSNx@? zcUJzzpD$l3lDM($e9$_FERQlZJj)y~W-vK9Grp%J<})N`^`){Xa+b0xC#*?5#r z=iy(@dFZwHrI3nJsVa%SXY@Q=^}<274c;nNm28?j9~vQ*ZQkKgxux3EZ>5tTA(&zH z0EN0k%=gflcLn)w_U>H%J2*xN9__&sPIwsT%3-icv;XA(WBWh#B`qwO#Md^y@)lNgPV{9Ns5 zyBHbeE8k+QM#TI6ko+`9+7X(Ms^&-z<+I1q}#^m^~%JJ=C z0BnKPphZWWVU)OQ-jJ5#RhnN17^XtK|MSzf@7Aq-!i4DUAJ;#9_~i4P zomUzTLCr{0KcGyI~VWNwcq$(RtjaFeip`3Qw!FH*&a)!w75RZp>ZD zSbY-zt|v#Qu@P#{=rPxEeKzQ_Ti`S|+uGiB+w+a%e#!pzV?S-*MDM=L-#GkFpa5V@ zLPlr~!t4Z)3`@@J@JJtU4t8F@TkZf{#@HI6qk_>*WDZr1PPY4!5WgE5p@&1}yG9V( zAK}?6?$pAY(>2_E#AIu(^+=U?TgrX=u6b;TjMFhfbBOfGTHBD;Eigt%ID`=@w2yabm7zTRbB0GmjfAM9#xm`{lR`T!O{~mL0mp7#MxRI6C zO52~$*xz_&Z>i7L!^XQrBXrVXqX;Fd^{h}vNOH7SJ^YfWSFLj7eQ<*CXG4kze|M;_ z{Hl8%do=SBE%}P7MbY(ioh0+vx~7HNjnwsSwdc?kpJm^fkr7gM-B$XnU(*9dyuWQ- z%d72eb}?hdl{^}uuipQLKlZ3+0>Hb=Ea^_8W~uYd>yLlK%|aMN&|=$01dMk(=H$64 z-LVc|k?^>4YsNj72|-T}#ZeVh=12WrlxBoz7jxpQ5>qo;eXvEh%WIAdL_LVE$UxqC z^k}R$>9ywA-0y!+e>Wl{w0#6Kf6_5*O#>rfqMrcu!L&zOy{tDpYrQldUNk$pj*9H%jMw%_q3YVk zz#8`T5C4kpbJ80jv-2L%S0^JR=rj7=b^KCV8b=7~)YR4!fXPNl4D1NRUy)OjAk@zw$jdtP3AG@>bZb~3VND`L> zrrirk){)%c%|KQ%YoU?y>XJicZI1>!K zF>5_Z2vqM8fm9HEComU^yO^0DOiA8Zc3gWY5i zDh@Ueu!KL((8!#7EdQz}n`x8o_D3L#>qo!exGl65$co9ll$QE|GVFrQIO4wjn!6=9Ba#MVG$1e2pwoX(DVrzmC#nZOyKuIz1h>= zXKxm0ZNOwIL6fKvx*V$E_rW8yup&_||N zs%QSDNykRy=+ZU}FjaTmE_Sj{sqc)P4~-C;?9U0X#`qL6T@dB3k6AU@sGDynMtEx) zg859U>Fq4Fn&`0}Bx6E0hqEE%wt1l$p`+{@7>$uNww+F6#Z0dD&K&&z613J*wW~!1HbM|sMebZkqDJU}(42U3-mj#C zm)2LW$HFtu+5d#mJY95(M6)r%pWQ!f9^PR@YA%MjPp}ItriAkQL*`MzJRY9BsuIe| zj__U)_ATIs#X4a7+2ZB!4f}EFdL%NnN2WFe=pW>U_-ScjuXD8pXE!>3jE|7@+N+ho z_HNt0Dfz3bN#?(^VqXGZJ!!nM(A47M(*KI zQ+ZT+$;gvfksG1L)5fzolYTE3Mo2-OlwvBDqDw3FRE?Ea0yS}534uYLji#BNc`Y}3 zaP{yZKii(mqL}F+SNF!c#@%u+#9eyvWa(pTgbsYk%cmRCzZ^O!uPa`k{KJ%&BlF@a zLCaAZjg3(En(a!KRw#KeR^A*>p@U?Et`@=8;L@{PAhSOZbR$20h3EQxFl`N2nS z=H*5e41crm*A{8c=SSJuD3gDaknIm}Os_-^cel_9UmheS?4GN>hCgqK9{ zvUx4dr233;cgGcElgLYo95%=ra{5t#D++Z)g*#L#!-yskYT&z%I>x~^N6UOy_+Vz3 zPX?2yDZ&+J*4XyId12KiQC}wWU)#ZTQm7`Z<%&^@Cmyp#Jd0W#wfUZ#ikuS#_L_Rn z!P_*U8uP+f9$%OfqX3g-#>fnQ;5T!jY0<=uy1Y>|^d>HE=JFOUZ{_kfE^p`Z4leKH z@-8mF$>q1W{5F^0;qtp&eviw$xx9zVd$)1rvwhnOb+tRVLD~r0>A`jQN#rm{Cw+L- zu{XJS@r{s>EbpZvJ@joaseh@IZIyQBW7~yFYQ0_APkb8NbG_qA5@F6FgvxYWLIo9& zM96rRHaa@Wvi)PKHpt9_qn=>3n+^*W@`x5`h_ zIC*j@kvEqw>*9OO<*YU9?z<*ykwmOhJny_sHrlnwv);_XXIgI?F#h;NdNg%I8Gk@D z^ufnxE!?iq|8K7a-+8DLYQr@_-!=DG)yjvYbLhj5&ssU6(4EIr`kS5ckEiqFBg)0l zM<16pQT<(`50J~bA#8N>AG{$vnuDBzM30|UvQK(`-y_~eHxO=W91wn(Na{0ZT|rl0 zAo+}8m=LO8wR)GoKpFa2L(KfqJI2%1`Tsc?`nb~2I89PM!R3BYW0uPtmpiz;ip!lt z-=^k`BA&|MEP9LRt)jPycp&F_oX2sV#(5a$S)504p2T?&=Q;e{qW6g2D|(;k{h|+u zJ}BbU{;-Ih`ccuxM0kf^_diy~H#3S6j1fPE?PTT5jp7jcH|dQ8%{Z3RQJ!>w)qRX! zb$Zdmxks>QNb7Som;Cd_6YBUmzcn-_?sqZt-CVwh%d5HYPXaVv!{vLq+%=?~Bf-Yy zPtY_z*KpauF#5hPYvo!YMD@upE$gSq)uB&n-}25zfpRYRAtI^Y6wz{4a5kM2F%Ex8 zltl31A;USug9k4!92j7aGdC69BY6l$htD?p_=pW&imV%dJT|R<63vNfJLUTf<}n}N zXqoSa^rSv9VGGj#C^)JniTLpfst{)?eSHgaKur?i2_85zM2dDYl0pVa%&woyT$llsI!O(I8Ye~?2D<8dV&b1LIb ztI~HuEZAObQsPoZ9h5}y^SxRvW+@!x9KKCBg-wkm5vJ&qatZ>soRWR&ClTWge=vm( z$4i2I^acOWXZ?eIU>|vk5V^PvF>S~s(V%@|#6PFgzK7J|g3XW)MEx3#Tc~z;gn_!d zMW{+5FmhgDuv3pd=CC|Y1^STp(jf2s?Bz(}4Tj?RP9=UiUHD>~%=LL-9p(`yb4UyQ zB(jX-aX#!z-#H+8rSA4J6;aetHH1VcxRpe9!I_5_HwWgCXI`u<`qQE$ni08b*?10< zbjCE|gDV4lmUD_3k9{u*I?ziB>vte!qR+@g=mm~x8Xp&HDON_MJcoXw10>)UWydcX4Ll|eigL4FGOquzh6gbk<5 zLfAlF9z59rSHr(>WE0qi(CpK^8gOK`m}T6sb2?QXf6Yc{Ac3{$`En^>$Ng^s3Tj<6P^O9(z)LK6_95cIXV%rV6 zCQ+<2Yh$tEf?*9=#_=ADO%Ym?$XTOrzP*hZcEK_aIgd@N&-3F0dQg11+T$X|2am-h z9)6J-bl?vaM<$$oTKuv9$e;_qT@eS=Pa;=t9Aoo<(ZgJPNFry_t8HEAW*i^L3;ATs zt4G%Di)GM@EMq=r$XCuoqh6Xhu?Wu#JQzw`&?LD_E#*exqI+<^TaHkB8~U31+`+Do zQOj)RqeDlYR56$I!1uC$O3S`ScvgfAvexJHkUHit-%FiJKl_poodSUFfk-4FI+C}Pxbz`#inteeiC64Wnixw=24u#Qfo{$ z>R-;ol1swMn4CmdeXL4O%XhS>CFQ8bw5i*)O!!70+ebw8>Au;o>uvB)?c_gEPMT~(IE!Kol9c*xrT%C5P5qnI zIbH_88Mp2I(w#&^Pxf#ol@=$-49+>*9>-Z$$eG`4mypF#$(<-Vm`?_sfOEag2Wzvv zWA#4zD2j<~&(}4HnCbGuDV(*Vp2^Y?2(rHU*?FIUgU%(%n#Wu)cD#$xX|HLKu}8&60vl+ zjr%r>OLohp;Q2{oxKrlol_!=GJ*B;`cQvXj*?U`I|xN2feq=Tz+*0Jp6B@i5Q|BKhX}aO zF-4tZN>Y8bE4bsWiFssw z{2)&_K7QcCLqGG7ML#}xW#`n#2isESt`|%qXAU!9KSt7LOQDvKpy&psTr6Y^chP}9 z#xTV=Jmw}5JljF0E-~oemqwG*WqS~*j-5)ds-{D&)OK4YPXp{zcT9ia{qPT_OeIu#Ew|JXF7y6UP zOMrbJ#7?u)b+U*N?nWWvI;RuIhr{z}McAliE-kX&HR#$eN+MifmFQ!dG_cyVmT`5U z1HFtV5xT+dZ{(xHwJh@3!C&TCCv)+WF=Sl^`0N`65*cW8kRAMD1}%= zo4KU@cc4u1_N8WBw|GdDc$q|2o%n+=qB=*N&-Aq(1qR`iDf=WDfQ1 zYCWnx`q63m>|g!I%iGFm^$m;u)7GoRNFrRoB}}oHv`)4cWxsey@DRT+ba^%;$7gJ)alIOR$5&cb zjJ(-*;>0md6{^TFvsT0ieyxc8M|GIz9FuqJrjHylmcu4|cEP^Jq^@YBUhu-%L^InF z%nW*VZLU+3kFnk2$3<`H2EVI&dh z0@L>xE$ZO0N|wPiF5@mPCQ%#%d#+PBs1wW_^rX^9m(w~}kLZ~eCDEJ+f6!t5t`Mgs zY0OK34coy6IdH6TX_TghhN|0^{zY^Dov-DcTI;e+eDyMt6U%uCl6b_5O_vFNSQh=@ z?311=u2%oFi1^3{v7$4H=0wgrcAZ!BBf}VTY2lHd>__kS>Ufofg-QDRMGE~XYLW~oylgQN%4M$XMRH?6KoVj(!q3^Y{z) zDMBCp#6^yrL+lW*b=9ZMu$Z6g{V;q)@Ux>Hi&YExDV?nIiz-0)DkU=7DQ{JBx;uSyjsTLL-5au z;3ZKEnJ9F%MOkf=-JznoAntPX|is zlSWQXbfSyUuwk1H`J#A)#~5<-lL$UM+lOac_8A_b<0o_P79|mw_Hvh7={jDf;O5|} zV}kIKXigMY8+pt*ZMck^N#x2Q?FR~`zflDw@trPoytZIcn?&$fZ7|_GourvD_~=d| z^v5b5^cOjNMvr&FMoZ+FXExC{QU5n6_Qnav1EPVSM0j_vh>YMZ9>8;^khk~D8T1%$ z8TVeb-mUgjk~|r6cCh85JE1-{Z3`K);Z@wI_~L%F|I8;&F!PBCn`DkhoG%}&_Kq;ZVkIDPmi-6-kVfo&9B5}g)dcFBMo~6}I&vmv={FzV|Lk?O}jr zAekh`w>yGiTxU4X6iKwz${x_TbLQAl$=#Zk7<>w#7XRUS9gx8!Jm;9-t(z<|xasvf zt-fbz=1id+)0 zvN`ejVe{c!Rr|{r^N_WRk0vb7aWy3Avp=j5ebU6Lh&BF^KlK2COm zAO5x~iEqkIP>$~u`Tk8kBoVvEYv(e*RXo;?QfJ*6r|BVwFBW7wx3(*ce*G5sTb4YBkugl|*B3?=I} zHA%FYV=*uc5uBazjVwA!-~_=uj9YI~-_A8lKk_z8*x=s6ZcNZt3ykiV82in9Kg)W* zM*r<2{!G+=A%P?uqC&!+I1(@=g!Uxb$}^W9x?Fa)YJKQHC*|WlGW6jg$2d`;$2zUY z*<_49emMqKB#ACn>df&DvoEyhL)Wy3JJZ*6XL_mHIgwA#r2eILG_u5D-{J~+3$ShR z!M-Q;@qxJL(-Jd!h>O1cOX?$we)`DJIw!1==h1pDGW7A!)yueJPU_Ey%0F4E?X*M4 z9>uI_dmYhG5}|kv1_8kXRPEA}(Ou4?irpz}lssC8ioOKSaJ^HBu z!ox38!TIrK1>FLL>ksDYlLFLU_{mp|n4RW4uS@^w*zR5ysK zK|M59hQ`&P1}!LI`d36uW-e2|jt~L}S_pLreMM9W>TB4>LPHmM-Z)D~jyD#!>mqM) zJNy3_I4zUZs_Sq3UCxYij@Vp3~9|4C4pIuyDz(t=7URd=j@S*~?;h(~B|jo`Kjjz%yO!7C!T zJ%VEq9FO2c1Scan6~QYDn8MN121Gg=!CVA)MDVHz?leH#cNw7VyA5c+CxTZ;@FxtI zQ!9Y^8jqm*dn4FwKzmmK!-#QU^BLyyeO&a(pjq`Ra3+HJ2o@sX$mNq2aP;!YKqp!L zWWZ6(Cxdp?uK-6gpFEO}XoGw<8=OZ*pDe~{%qIg*Wj-0KRQ(G0u*)OdJ@{n6ZG=w- z+@1K0ke$7`D?rQ5pHBwPP6p0S2F^|f3)PfrPAdXuCyO~d88|x`+!N@|jOgMxL96|;>fbhxS-l|^#ZpVD`$e*nG75GyT{OJgskUXaqN6tpz zgk&)%Bm*ZT14o*HBhA2(X5dIiS<^pL>}2p~4cJUIfcfVx~ieyvjsFL<=+?mf#`EE zKhH&q;D-rx*1Yb3v{hFxu;OhnaV@AAK`60Mgjn}m{ z#Qq5veI~j5Q!e`CIem8iSv63J{qqR^MFd`wtnFXMk$)AzzmDLK1PCE}{cj@pw*}<% z{<{MH{dRT!Lmc^!0(^FH`A=N*$>2X%{R;e-2>xpX|1E<59>M>JfZ+b8=zo#K|6NA5 z0}dL{{&4^d<_}Ki!T@EEOV|JopFA>L^(!zE!Hxo^(7W3}ZDTZoyuHMjVkDoe26;~# zcqZ$|HcQ zBd}_6(YQtzSA65z2%b>p@EFm!&Omk=PmJJ625K8mHek-T7%-;>h<&R^0N)ltDu73V z##17AY6RCuaDxGMZj9h*5!7O+etI0aDS~|lh&{sq)in^a@l1~(w%>q}0|v~gLttb& zj@0om@~k*=vjHQ|HbC2R3>bN?0VB@~Apg=Pm=b6_-y?u;7rj9ALcD&FI4-38TX0BH z-;Ed3(Oy<1yV^c;q1#$Gu(H~jZ>_BzSXpXz&vsVNAGmq?+0(9^eTO>B-PO+0^x@u& z=~LB==~HFK^jvGLTU6h0ITMQPWh!rtB8LRo+^^~IOvUjNkT+1Bbkwed%#;m}gMwd`fTyrn$U zTv~cW@y;xFTdT{>rRll*R$6Oi>I*L`-g@e()%N*zw|!4N|K`h@UsX}GJCa$&iLnfN z^);SxD4Hubuu|rZ_OIV98%VC-V*~3~hZjA(?Hkf`TWh)BvCG>twI_!<=g)V%XNFI; zm(P`WCpaHYx7IE!xm>P*Q;Zd#i|p`9Z{D7@ZgaI8M2pPS>V@U?++E$C&f7Xgba!CR zb~`JDy*IE=w^llbgp^sRHx(pvR%;AmRlG52X~xnb$jEx=US`h z+sjQXHrMJEnX1_*yNmVK-&=EM7cX=dIv2fwJuN46Q)t?cod*{dR&~2*o>}s#yko7k zyfEjCk7~5DbdLu|x;o^%bEno;nirRkwU_Tc)|szW?#ElKj^@~zGG z(uGwkoVqCITkW-ztFu%`$;nmy+$!N^Z2Ww)JHMzhb85#pc<+$8BCz9ldw#WJCxw=l z+D4`i*4TcXl?{j|=6M1LcQ^z@SxV6;0uh<-$Sw7pA3&vM6 zp>v^oqSI}kZO=Em?Rw!x%aFrquJtoUnrBs2j^$#zyV&+3PM#;S{t(1deEq!9wf-no zJ~7J05an1`j?Q(C*O9iXKw5V;dkJ0c1T@MFg;qS=nNP~T<6ttgPN|G~n}4c0P@Bv1 zt)-=U%k0ivDu}A6*_F;dR&^jB?ku}-D#l!M?e5}8rK>8Hz~0s5`2oh^_Pm|$F;8?B ztg*-B0O&1GnO+#xo#J%stmDu6@6@qTFWTf?>v?0^4=%fEcNKUjFH=!A0rQB;u#RtMk8@WM z8iRF9r|a`Io0H;xCw9#DDX!3+nrTyoBV+xX+g0O*GfHB55$w1aGrt!%D)0L(kL7gt zrcGsBWxU57&)jg=-yOsPdsuf8ugAn{z8i#!%jeN*uR@0B+V$#6Q|tc2=2=tckNr z7uFUdXGd3#*NXR++v4F?KIwV~+Ro-m`)G4{VX0N0os&96I)gld8Q@KItFxrA%M$O= z`B&ZX`uW#fSYFI_7LIevI@dbYS!&Na$ZPJ{YIOMUYW*ZIak$l7IM(X+j@j}0wpm=5 zA`d#``W@I)i}mw&&gkeLU>$0%t!FJQ^-E2boa&*aUaVb(cW^%3GRhu4Z zFR#=$hLP0})OZGW_BQtDawi5oR-n(5$;IZ9Zk}1hyfN91zT->A7MLv66gSAO$g|-1 z;y}(OYKBFI*Lshaim|ie0X-0Dt?K8aor}35c@?t6t{!XVteF!%W=X6pl6;FyqJ?!}T zuyBrOZx@-dwf-m1VNRlYta&y^T=1x#xJf{PI+d1|`s;Qa)R_pcIyKboH$=T5R$i?QWA9ed=V^*f3l3Mk)`o-zGjlLqfdHQ+N z&nd>9#lf)IH$(M1;kuXa@%pGPdUj~bhWD>CTx*jT$VtCc z&9;`jOGddBFSlGZ6V(krJV%TTy!`AcuM>Um5u=4!9DmXl2`D1Ft2d*;_rbjmVsK!0 zv2fR2lvdjn>{lMKb(?k>^|;-@y{f0XdfNci(U}ys5tW zHN}r>`q;(9jCV`_=-}#H70nDR(Uk*ZVMPXp;$jV-Y1SKl;&8LuoIc*X?~IRtQN0W5 zRnD*d_3Clng}2vw#o-UUa_-D;Zl_wSdNnN>m@4mfV)F7@xO>VApt#i=ZsA`YUlui3 zy*8G?C)L$aJ^O6%?z+n&@!fZ1QO{xCj3bL-A5Lb_f1r;pR%Ka?UTKZ_H0yg3$uAyy zhv+?@=hu}U+*Q42m9H9#!inUp^RMEDKHEO0FKhh>P`?OVk?&vn-fP{+#5&Hn z;_H82oQaQ!v*F=!v^>;1J%@A`UB_DM_Om9D8o1Z$tf+1@{i0I^9_M+c4rVf+J^hlC zx>@F@p72yt^Ww9Q!>y053D2xKW0omljai~#oeJ?eSU1FeH$A;}xL&mlA|E@qIIkZ7 zwf*2Ye%sMQec!bY-_rXEvOHDVK<)A9`MaY>KVK5?QSfu)4>b(miZIS3-|VGoLZ@V zj-_+Hr5_JCzpZ=qld$?5wEUcuH~$@Ly|Z|j8+)0sQ~onQ9AkTr`w4HL(#Gq7GWqJ7 zVM*?RWpH;G{Qg#bsE~E988~at>qk1hceU&4 Ojj>5Q^^;8P(*FU96uG(p diff --git a/static/doc/distributed-process-platform/doc-index-95.html b/static/doc/distributed-process-platform/doc-index-95.html deleted file mode 100644 index b279f84..0000000 --- a/static/doc/distributed-process-platform/doc-index-95.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - _)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-A.html b/static/doc/distributed-process-platform/doc-index-A.html deleted file mode 100644 index 6469674..0000000 --- a/static/doc/distributed-process-platform/doc-index-A.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - A)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Index - A

actionControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
AddressableControl.Distributed.Process.Platform
afterControl.Distributed.Process.Platform.Time
apiHandlersControl.Distributed.Process.Platform.ManagedProcess
asTimeoutControl.Distributed.Process.Platform.Time
AsyncControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
async 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
AsyncCancelledControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncChanControl.Distributed.Process.Platform.Async.AsyncChan
asyncChanControl.Distributed.Process.Platform.Async
AsyncDoneControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncFailedControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncLinked 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
asyncLinkedChanControl.Distributed.Process.Platform.Async
asyncLinkedSTMControl.Distributed.Process.Platform.Async
AsyncLinkFailedControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncPendingControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncRefControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncRemoteTaskControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncResultControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncSTMControl.Distributed.Process.Platform.Async.AsyncSTM
asyncSTMControl.Distributed.Process.Platform.Async
AsyncTask 
1 (Type/Class)Control.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
2 (Data Constructor)Control.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncTaskControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncTaskDictControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncTaskNodeControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncTaskProcControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncWorkerControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-All.html b/static/doc/distributed-process-platform/doc-index-All.html deleted file mode 100644 index a05bf55..0000000 --- a/static/doc/distributed-process-platform/doc-index-All.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Index

actionControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
AddressableControl.Distributed.Process.Platform
afterControl.Distributed.Process.Platform.Time
apiHandlersControl.Distributed.Process.Platform.ManagedProcess
asTimeoutControl.Distributed.Process.Platform.Time
AsyncControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
async 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
AsyncCancelledControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncChanControl.Distributed.Process.Platform.Async.AsyncChan
asyncChanControl.Distributed.Process.Platform.Async
AsyncDoneControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncFailedControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncLinked 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
asyncLinkedChanControl.Distributed.Process.Platform.Async
asyncLinkedSTMControl.Distributed.Process.Platform.Async
AsyncLinkFailedControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncPendingControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncRefControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncRemoteTaskControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncResultControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
AsyncSTMControl.Distributed.Process.Platform.Async.AsyncSTM
asyncSTMControl.Distributed.Process.Platform.Async
AsyncTask 
1 (Type/Class)Control.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
2 (Data Constructor)Control.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncTaskControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncTaskDictControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncTaskNodeControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncTaskProcControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
asyncWorkerControl.Distributed.Process.Platform.Async.AsyncSTM, Control.Distributed.Process.Platform.Async.AsyncChan, Control.Distributed.Process.Platform.Async
callControl.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
callAsyncControl.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
callAtControl.Distributed.Process.Platform.Call
callForwardControl.Distributed.Process.Platform.Call
CallHandlerControl.Distributed.Process.Platform.ManagedProcess
callResponseControl.Distributed.Process.Platform.Call
callResponseAsyncControl.Distributed.Process.Platform.Call
callResponseDeferControl.Distributed.Process.Platform.Call
callResponseDeferIfControl.Distributed.Process.Platform.Call
callResponseIfControl.Distributed.Process.Platform.Call
callTimeout 
1 (Function)Control.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
2 (Function)Control.Distributed.Process.Platform.Call
cancel 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
cancelKill 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
cancelTimerControl.Distributed.Process.Platform.Timer
cancelWait 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
cancelWith 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
castControl.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
CastHandlerControl.Distributed.Process.Platform.ManagedProcess
check 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
conditionControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
continueControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
continue_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
DaysControl.Distributed.Process.Platform.Time
DeadLetterControl.Distributed.Process.Platform.ManagedProcess
defaultProcessControl.Distributed.Process.Platform.ManagedProcess
Delay 
1 (Type/Class)Control.Distributed.Process.Platform.Time
2 (Data Constructor)Control.Distributed.Process.Platform.Time
DropControl.Distributed.Process.Platform.ManagedProcess
exitAfterControl.Distributed.Process.Platform.Timer
exitHandlersControl.Distributed.Process.Platform.ManagedProcess
flushTimerControl.Distributed.Process.Platform.Timer
getTagControl.Distributed.Process.Platform
haltNoReply_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCallControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCallFromControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCallFromIfControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCallIfControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCallIf_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCall_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCastControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCastIfControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCastIf_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCast_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleDispatchControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleExitControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleInfoControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
hibernateControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
hibernate_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
HoursControl.Distributed.Process.Platform.Time
hoursControl.Distributed.Process.Platform.Time
infiniteWaitControl.Distributed.Process.Platform.Time
InfinityControl.Distributed.Process.Platform.Time
infoHandlersControl.Distributed.Process.Platform.ManagedProcess
InitFailControl.Distributed.Process.Platform.ManagedProcess
InitHandlerControl.Distributed.Process.Platform.ManagedProcess
InitOkControl.Distributed.Process.Platform.ManagedProcess
InitResultControl.Distributed.Process.Platform.ManagedProcess
inputControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
killAfterControl.Distributed.Process.Platform.Timer
linkOnFailureControl.Distributed.Process.Platform
matchCondControl.Distributed.Process.Platform
MicrosControl.Distributed.Process.Platform.Time
microSecondsControl.Distributed.Process.Platform.Time
MillisControl.Distributed.Process.Platform.Time
milliSecondsControl.Distributed.Process.Platform.Time
MinutesControl.Distributed.Process.Platform.Time
minutesControl.Distributed.Process.Platform.Time
multicallControl.Distributed.Process.Platform.Call
newAsync 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
newTagPoolControl.Distributed.Process.Platform
noopControl.Distributed.Process.Platform.Test
noReplyControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
noReply_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
noWaitControl.Distributed.Process.Platform.Time
periodicallyControl.Distributed.Process.Platform.Timer
PidControl.Distributed.Process.Platform
Ping 
1 (Type/Class)Control.Distributed.Process.Platform.Test
2 (Data Constructor)Control.Distributed.Process.Platform.Test
pingControl.Distributed.Process.Platform.Test
poll 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
pollSTMControl.Distributed.Process.Platform.Async.AsyncSTM
ProcessActionControl.Distributed.Process.Platform.ManagedProcess
ProcessContinueControl.Distributed.Process.Platform.ManagedProcess
ProcessDefinition 
1 (Type/Class)Control.Distributed.Process.Platform.ManagedProcess
2 (Data Constructor)Control.Distributed.Process.Platform.ManagedProcess
ProcessHibernateControl.Distributed.Process.Platform.ManagedProcess
ProcessReplyControl.Distributed.Process.Platform.ManagedProcess
ProcessStopControl.Distributed.Process.Platform.ManagedProcess
ProcessTimeoutControl.Distributed.Process.Platform.ManagedProcess
RecipientControl.Distributed.Process.Platform
RegisteredControl.Distributed.Process.Platform
RemoteRegisteredControl.Distributed.Process.Platform
remoteTaskControl.Distributed.Process.Platform.Async
replyControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
replyToControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
replyWithControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
resetTimerControl.Distributed.Process.Platform.Timer
resolveControl.Distributed.Process.Platform
runAfterControl.Distributed.Process.Platform.Timer
runProcessControl.Distributed.Process.Platform.ManagedProcess
runTestProcessControl.Distributed.Process.Platform.Test
safeCallControl.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
SecondsControl.Distributed.Process.Platform.Time
secondsControl.Distributed.Process.Platform.Time
sendAfterControl.Distributed.Process.Platform.Timer
sendToControl.Distributed.Process.Platform
shutdownControl.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
sleepControl.Distributed.Process.Platform.Timer
spawnLinkLocalControl.Distributed.Process.Platform
spawnMonitorLocalControl.Distributed.Process.Platform
startControl.Distributed.Process.Platform.ManagedProcess
startTestProcessControl.Distributed.Process.Platform.Test
startTimerControl.Distributed.Process.Platform.Timer
stashControl.Distributed.Process.Platform.Test
stateControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
statelessInitControl.Distributed.Process.Platform.ManagedProcess
statelessProcessControl.Distributed.Process.Platform.ManagedProcess
stopControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
stop_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
TagControl.Distributed.Process.Platform
TagPoolControl.Distributed.Process.Platform
taskControl.Distributed.Process.Platform.Async
TerminateControl.Distributed.Process.Platform.ManagedProcess
TerminateHandlerControl.Distributed.Process.Platform.ManagedProcess
terminateHandlerControl.Distributed.Process.Platform.ManagedProcess
TerminateNormalControl.Distributed.Process.Platform
TerminateOtherControl.Distributed.Process.Platform
TerminateReasonControl.Distributed.Process.Platform
TerminateShutdownControl.Distributed.Process.Platform
TestProcessControlControl.Distributed.Process.Platform.Test
testProcessGoControl.Distributed.Process.Platform.Test
testProcessReportControl.Distributed.Process.Platform.Test
testProcessStopControl.Distributed.Process.Platform.Test
TestResultControl.Distributed.Process.Platform.Test
Tick 
1 (Type/Class)Control.Distributed.Process.Platform.Timer
2 (Data Constructor)Control.Distributed.Process.Platform.Timer
tickerControl.Distributed.Process.Platform.Timer
TimeIntervalControl.Distributed.Process.Platform.Time
TimeoutControl.Distributed.Process.Platform.Time
timeoutControl.Distributed.Process.Platform.Time
timeoutAfterControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
timeoutAfter_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
TimeoutHandlerControl.Distributed.Process.Platform.ManagedProcess
timeoutHandlerControl.Distributed.Process.Platform.ManagedProcess
TimeoutNotification 
1 (Type/Class)Control.Distributed.Process.Platform.Time
2 (Data Constructor)Control.Distributed.Process.Platform.Time
TimerRefControl.Distributed.Process.Platform.Timer
timesControl.Distributed.Process.Platform
timeToMsControl.Distributed.Process.Platform.Time
TimeUnitControl.Distributed.Process.Platform.Time
tryCallControl.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
tryForkProcessControl.Distributed.Process.Platform.Test
tryRunProcessControl.Distributed.Process.Platform.Test
UnhandledMessagePolicyControl.Distributed.Process.Platform.ManagedProcess
unhandledMessagePolicyControl.Distributed.Process.Platform.ManagedProcess
wait 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
waitAny 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
waitAnyCancel 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
waitAnyTimeout 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
waitBothControl.Distributed.Process.Platform.Async.AsyncSTM
waitCancelTimeout 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
2 (Function)Control.Distributed.Process.Platform.Async
waitCheckTimeout 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
waitEitherControl.Distributed.Process.Platform.Async.AsyncSTM
waitEither_Control.Distributed.Process.Platform.Async.AsyncSTM
waitTimeout 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
waitTimeoutSTMControl.Distributed.Process.Platform.Async.AsyncSTM
whereisOrStartControl.Distributed.Process.Platform
whereisOrStartRemoteControl.Distributed.Process.Platform
withinControl.Distributed.Process.Platform.Time
workerControl.Distributed.Process.Platform.Async.AsyncChan
_asyncWorkerControl.Distributed.Process.Platform.Async.AsyncSTM
__remoteTableControl.Distributed.Process.Platform
\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-C.html b/static/doc/distributed-process-platform/doc-index-C.html deleted file mode 100644 index 34a1d65..0000000 --- a/static/doc/distributed-process-platform/doc-index-C.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - C)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Index - C

callControl.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
callAsyncControl.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
callAtControl.Distributed.Process.Platform.Call
callForwardControl.Distributed.Process.Platform.Call
CallHandlerControl.Distributed.Process.Platform.ManagedProcess
callResponseControl.Distributed.Process.Platform.Call
callResponseAsyncControl.Distributed.Process.Platform.Call
callResponseDeferControl.Distributed.Process.Platform.Call
callResponseDeferIfControl.Distributed.Process.Platform.Call
callResponseIfControl.Distributed.Process.Platform.Call
callTimeout 
1 (Function)Control.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
2 (Function)Control.Distributed.Process.Platform.Call
cancel 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
cancelKill 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
cancelTimerControl.Distributed.Process.Platform.Timer
cancelWait 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
cancelWith 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
castControl.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
CastHandlerControl.Distributed.Process.Platform.ManagedProcess
check 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
conditionControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
continueControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
continue_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-D.html b/static/doc/distributed-process-platform/doc-index-D.html deleted file mode 100644 index 3ba91c8..0000000 --- a/static/doc/distributed-process-platform/doc-index-D.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - D)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-E.html b/static/doc/distributed-process-platform/doc-index-E.html deleted file mode 100644 index 9e8d3a3..0000000 --- a/static/doc/distributed-process-platform/doc-index-E.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - E)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-F.html b/static/doc/distributed-process-platform/doc-index-F.html deleted file mode 100644 index 1647cc0..0000000 --- a/static/doc/distributed-process-platform/doc-index-F.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - F)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-G.html b/static/doc/distributed-process-platform/doc-index-G.html deleted file mode 100644 index aa2a0b6..0000000 --- a/static/doc/distributed-process-platform/doc-index-G.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - G)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-H.html b/static/doc/distributed-process-platform/doc-index-H.html deleted file mode 100644 index c7faf21..0000000 --- a/static/doc/distributed-process-platform/doc-index-H.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - H)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Index - H

haltNoReply_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCallControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCallFromControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCallFromIfControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCallIfControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCallIf_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCall_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCastControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCastIfControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCastIf_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleCast_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleDispatchControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleExitControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
handleInfoControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
hibernateControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
hibernate_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
HoursControl.Distributed.Process.Platform.Time
hoursControl.Distributed.Process.Platform.Time
\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-I.html b/static/doc/distributed-process-platform/doc-index-I.html deleted file mode 100644 index a55aef7..0000000 --- a/static/doc/distributed-process-platform/doc-index-I.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - I)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-K.html b/static/doc/distributed-process-platform/doc-index-K.html deleted file mode 100644 index 2177cb2..0000000 --- a/static/doc/distributed-process-platform/doc-index-K.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - K)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-L.html b/static/doc/distributed-process-platform/doc-index-L.html deleted file mode 100644 index 41e1047..0000000 --- a/static/doc/distributed-process-platform/doc-index-L.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - L)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-M.html b/static/doc/distributed-process-platform/doc-index-M.html deleted file mode 100644 index 461ed68..0000000 --- a/static/doc/distributed-process-platform/doc-index-M.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - M)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-N.html b/static/doc/distributed-process-platform/doc-index-N.html deleted file mode 100644 index 9818f98..0000000 --- a/static/doc/distributed-process-platform/doc-index-N.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - N)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-P.html b/static/doc/distributed-process-platform/doc-index-P.html deleted file mode 100644 index bd3d5bb..0000000 --- a/static/doc/distributed-process-platform/doc-index-P.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - P)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-R.html b/static/doc/distributed-process-platform/doc-index-R.html deleted file mode 100644 index 9ae094a..0000000 --- a/static/doc/distributed-process-platform/doc-index-R.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - R)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-S.html b/static/doc/distributed-process-platform/doc-index-S.html deleted file mode 100644 index 96818c3..0000000 --- a/static/doc/distributed-process-platform/doc-index-S.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - S)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-T.html b/static/doc/distributed-process-platform/doc-index-T.html deleted file mode 100644 index a34167a..0000000 --- a/static/doc/distributed-process-platform/doc-index-T.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - T)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Index - T

TagControl.Distributed.Process.Platform
TagPoolControl.Distributed.Process.Platform
taskControl.Distributed.Process.Platform.Async
TerminateControl.Distributed.Process.Platform.ManagedProcess
TerminateHandlerControl.Distributed.Process.Platform.ManagedProcess
terminateHandlerControl.Distributed.Process.Platform.ManagedProcess
TerminateNormalControl.Distributed.Process.Platform
TerminateOtherControl.Distributed.Process.Platform
TerminateReasonControl.Distributed.Process.Platform
TerminateShutdownControl.Distributed.Process.Platform
TestProcessControlControl.Distributed.Process.Platform.Test
testProcessGoControl.Distributed.Process.Platform.Test
testProcessReportControl.Distributed.Process.Platform.Test
testProcessStopControl.Distributed.Process.Platform.Test
TestResultControl.Distributed.Process.Platform.Test
Tick 
1 (Type/Class)Control.Distributed.Process.Platform.Timer
2 (Data Constructor)Control.Distributed.Process.Platform.Timer
tickerControl.Distributed.Process.Platform.Timer
TimeIntervalControl.Distributed.Process.Platform.Time
TimeoutControl.Distributed.Process.Platform.Time
timeoutControl.Distributed.Process.Platform.Time
timeoutAfterControl.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
timeoutAfter_Control.Distributed.Process.Platform.ManagedProcess.Server, Control.Distributed.Process.Platform.ManagedProcess
TimeoutHandlerControl.Distributed.Process.Platform.ManagedProcess
timeoutHandlerControl.Distributed.Process.Platform.ManagedProcess
TimeoutNotification 
1 (Type/Class)Control.Distributed.Process.Platform.Time
2 (Data Constructor)Control.Distributed.Process.Platform.Time
TimerRefControl.Distributed.Process.Platform.Timer
timesControl.Distributed.Process.Platform
timeToMsControl.Distributed.Process.Platform.Time
TimeUnitControl.Distributed.Process.Platform.Time
tryCallControl.Distributed.Process.Platform.ManagedProcess.Client, Control.Distributed.Process.Platform.ManagedProcess
tryForkProcessControl.Distributed.Process.Platform.Test
tryRunProcessControl.Distributed.Process.Platform.Test
\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-U.html b/static/doc/distributed-process-platform/doc-index-U.html deleted file mode 100644 index 2885c60..0000000 --- a/static/doc/distributed-process-platform/doc-index-U.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - U)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index-W.html b/static/doc/distributed-process-platform/doc-index-W.html deleted file mode 100644 index de4b277..0000000 --- a/static/doc/distributed-process-platform/doc-index-W.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index - W)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

Index - W

wait 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
waitAny 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
waitAnyCancel 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
waitAnyTimeout 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
waitBothControl.Distributed.Process.Platform.Async.AsyncSTM
waitCancelTimeout 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
2 (Function)Control.Distributed.Process.Platform.Async
waitCheckTimeout 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
waitEitherControl.Distributed.Process.Platform.Async.AsyncSTM
waitEither_Control.Distributed.Process.Platform.Async.AsyncSTM
waitTimeout 
1 (Function)Control.Distributed.Process.Platform.Async.AsyncSTM
2 (Function)Control.Distributed.Process.Platform.Async.AsyncChan
3 (Function)Control.Distributed.Process.Platform.Async
waitTimeoutSTMControl.Distributed.Process.Platform.Async.AsyncSTM
whereisOrStartControl.Distributed.Process.Platform
whereisOrStartRemoteControl.Distributed.Process.Platform
withinControl.Distributed.Process.Platform.Time
workerControl.Distributed.Process.Platform.Async.AsyncChan
\ No newline at end of file diff --git a/static/doc/distributed-process-platform/doc-index.html b/static/doc/distributed-process-platform/doc-index.html deleted file mode 100644 index a7ddf2d..0000000 --- a/static/doc/distributed-process-platform/doc-index.html +++ /dev/null @@ -1,4 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform (Index)

distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

\ No newline at end of file diff --git a/static/doc/distributed-process-platform/frames.html b/static/doc/distributed-process-platform/frames.html deleted file mode 100644 index 1b4e38d..0000000 --- a/static/doc/distributed-process-platform/frames.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/static/doc/distributed-process-platform/haddock-util.js b/static/doc/distributed-process-platform/haddock-util.js deleted file mode 100644 index 9a6fccf..0000000 --- a/static/doc/distributed-process-platform/haddock-util.js +++ /dev/null @@ -1,344 +0,0 @@ -// Haddock JavaScript utilities - -var rspace = /\s\s+/g, - rtrim = /^\s+|\s+$/g; - -function spaced(s) { return (" " + s + " ").replace(rspace, " "); } -function trim(s) { return s.replace(rtrim, ""); } - -function hasClass(elem, value) { - var className = spaced(elem.className || ""); - return className.indexOf( " " + value + " " ) >= 0; -} - -function addClass(elem, value) { - var className = spaced(elem.className || ""); - if ( className.indexOf( " " + value + " " ) < 0 ) { - elem.className = trim(className + " " + value); - } -} - -function removeClass(elem, value) { - var className = spaced(elem.className || ""); - className = className.replace(" " + value + " ", " "); - elem.className = trim(className); -} - -function toggleClass(elem, valueOn, valueOff, bool) { - if (bool == null) { bool = ! hasClass(elem, valueOn); } - if (bool) { - removeClass(elem, valueOff); - addClass(elem, valueOn); - } - else { - removeClass(elem, valueOn); - addClass(elem, valueOff); - } - return bool; -} - - -function makeClassToggle(valueOn, valueOff) -{ - return function(elem, bool) { - return toggleClass(elem, valueOn, valueOff, bool); - } -} - -toggleShow = makeClassToggle("show", "hide"); -toggleCollapser = makeClassToggle("collapser", "expander"); - -function toggleSection(id) -{ - var b = toggleShow(document.getElementById("section." + id)); - toggleCollapser(document.getElementById("control." + id), b); - rememberCollapsed(id, b); - return b; -} - -var collapsed = {}; -function rememberCollapsed(id, b) -{ - if(b) - delete collapsed[id] - else - collapsed[id] = null; - - var sections = []; - for(var i in collapsed) - { - if(collapsed.hasOwnProperty(i)) - sections.push(i); - } - // cookie specific to this page; don't use setCookie which sets path=/ - document.cookie = "collapsed=" + escape(sections.join('+')); -} - -function restoreCollapsed() -{ - var cookie = getCookie("collapsed"); - if(!cookie) - return; - - var ids = cookie.split('+'); - for(var i in ids) - { - if(document.getElementById("section." + ids[i])) - toggleSection(ids[i]); - } -} - -function setCookie(name, value) { - document.cookie = name + "=" + escape(value) + ";path=/;"; -} - -function clearCookie(name) { - document.cookie = name + "=;path=/;expires=Thu, 01-Jan-1970 00:00:01 GMT;"; -} - -function getCookie(name) { - var nameEQ = name + "="; - var ca = document.cookie.split(';'); - for(var i=0;i < ca.length;i++) { - var c = ca[i]; - while (c.charAt(0)==' ') c = c.substring(1,c.length); - if (c.indexOf(nameEQ) == 0) { - return unescape(c.substring(nameEQ.length,c.length)); - } - } - return null; -} - - - -var max_results = 75; // 50 is not enough to search for map in the base libraries -var shown_range = null; -var last_search = null; - -function quick_search() -{ - perform_search(false); -} - -function full_search() -{ - perform_search(true); -} - - -function perform_search(full) -{ - var text = document.getElementById("searchbox").value.toLowerCase(); - if (text == last_search && !full) return; - last_search = text; - - var table = document.getElementById("indexlist"); - var status = document.getElementById("searchmsg"); - var children = table.firstChild.childNodes; - - // first figure out the first node with the prefix - var first = bisect(-1); - var last = (first == -1 ? -1 : bisect(1)); - - if (first == -1) - { - table.className = ""; - status.innerHTML = "No results found, displaying all"; - } - else if (first == 0 && last == children.length - 1) - { - table.className = ""; - status.innerHTML = ""; - } - else if (last - first >= max_results && !full) - { - table.className = ""; - status.innerHTML = "More than " + max_results + ", press Search to display"; - } - else - { - // decide what you need to clear/show - if (shown_range) - setclass(shown_range[0], shown_range[1], "indexrow"); - setclass(first, last, "indexshow"); - shown_range = [first, last]; - table.className = "indexsearch"; - status.innerHTML = ""; - } - - - function setclass(first, last, status) - { - for (var i = first; i <= last; i++) - { - children[i].className = status; - } - } - - - // do a binary search, treating 0 as ... - // return either -1 (no 0's found) or location of most far match - function bisect(dir) - { - var first = 0, finish = children.length - 1; - var mid, success = false; - - while (finish - first > 3) - { - mid = Math.floor((finish + first) / 2); - - var i = checkitem(mid); - if (i == 0) i = dir; - if (i == -1) - finish = mid; - else - first = mid; - } - var a = (dir == 1 ? first : finish); - var b = (dir == 1 ? finish : first); - for (var i = b; i != a - dir; i -= dir) - { - if (checkitem(i) == 0) return i; - } - return -1; - } - - - // from an index, decide what the result is - // 0 = match, -1 is lower, 1 is higher - function checkitem(i) - { - var s = getitem(i).toLowerCase().substr(0, text.length); - if (s == text) return 0; - else return (s > text ? -1 : 1); - } - - - // from an index, get its string - // this abstracts over alternates - function getitem(i) - { - for ( ; i >= 0; i--) - { - var s = children[i].firstChild.firstChild.data; - if (s.indexOf(' ') == -1) - return s; - } - return ""; // should never be reached - } -} - -function setSynopsis(filename) { - if (parent.window.synopsis) { - if (parent.window.synopsis.location.replace) { - // In Firefox this avoids adding the change to the history. - parent.window.synopsis.location.replace(filename); - } else { - parent.window.synopsis.location = filename; - } - } -} - -function addMenuItem(html) { - var menu = document.getElementById("page-menu"); - if (menu) { - var btn = menu.firstChild.cloneNode(false); - btn.innerHTML = html; - menu.appendChild(btn); - } -} - -function adjustForFrames() { - var bodyCls; - - if (parent.location.href == window.location.href) { - // not in frames, so add Frames button - addMenuItem("Frames"); - bodyCls = "no-frame"; - } - else { - bodyCls = "in-frame"; - } - addClass(document.body, bodyCls); -} - -function reframe() { - setCookie("haddock-reframe", document.URL); - window.location = "frames.html"; -} - -function postReframe() { - var s = getCookie("haddock-reframe"); - if (s) { - parent.window.main.location = s; - clearCookie("haddock-reframe"); - } -} - -function styles() { - var i, a, es = document.getElementsByTagName("link"), rs = []; - for (i = 0; a = es[i]; i++) { - if(a.rel.indexOf("style") != -1 && a.title) { - rs.push(a); - } - } - return rs; -} - -function addStyleMenu() { - var as = styles(); - var i, a, btns = ""; - for(i=0; a = as[i]; i++) { - btns += "
  • " - + a.title + "
  • " - } - if (as.length > 1) { - var h = "
    " - + "Style ▾" - + "
      " + btns + "
    " - + "
    "; - addMenuItem(h); - } -} - -function setActiveStyleSheet(title) { - var as = styles(); - var i, a, found; - for(i=0; a = as[i]; i++) { - a.disabled = true; - // need to do this always, some browsers are edge triggered - if(a.title == title) { - found = a; - } - } - if (found) { - found.disabled = false; - setCookie("haddock-style", title); - } - else { - as[0].disabled = false; - clearCookie("haddock-style"); - } - styleMenu(false); -} - -function resetStyle() { - var s = getCookie("haddock-style"); - if (s) setActiveStyleSheet(s); -} - - -function styleMenu(show) { - var m = document.getElementById('style-menu'); - if (m) toggleShow(m, show); -} - - -function pageLoad() { - addStyleMenu(); - adjustForFrames(); - resetStyle(); - restoreCollapsed(); -} - diff --git a/static/doc/distributed-process-platform/hslogo-16.png b/static/doc/distributed-process-platform/hslogo-16.png deleted file mode 100644 index 0ff8579fbd897417b0d6dad6e920f8882138a7c0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1684 zcmV;F25b3=P)4Tx0C)j~RL^S@K@|QrZmG~B2wH0nvUrdpNm;9CMbtL^5n^i$+aIn^?(HA4aZWV5ov6ELTdbo0FI&wK{O>*+w4vx20?>!`FrQsdJlnHR>OPy zcd~b_n$otK2Za4V;76L-DzNVtaSB-y0*E}{p()372;bw_^6ZZ}PI-92wGS&j#91PI zKs7DSe@(bk%_Y-7gGe}(^>I=@oY#w#*Bu9GZf3^F5WP>3rn}7Ut74&?PWBFvy`A)a zPP5)V!Xd&78LdA?xQ(9mjMYElVd13a#D+Z_7&Y|xU=_C-srWU*6kiZcC!$nw*)9$7 zn6CX+@=AhmkT}X@VSsa5NKe;HZuq)~1$`#h6R+ZTR#D-3j}vF!)ZOnz+5)dI4jl{{ z44Mr{P!L4~VVJN`K!!XTF*LGrKO?IK8z<8w`3e3jI8lUGNUta*C8 zn(P`s>{pjD=7Kek#B;Fw@hxAK%$F&Q6vg9J^Xf~4by_hu-=A!MJ3Znq&n~srbFGPs zH&&aMXZ>nO`|hf|ljc?VPhR!${AbO?W8x_>CU%PFA&Hm8F7cAsOREdwU~R_;ot1_u z(ruCYB-LPGn!NQdT|ZlRy+(fw^-+`=%+gee_kY4FWHg<*4sZI8+sFJD270UUORdLHO0nA4V) z%{fwsET5CQ>B?eK%uw4yQc~9?*JVo2}ze(;aRcp*ceL#HUJSllrgm5wQKR zQu+C;QrUh^8rFfA`ftFz{YAidi-`aL010qNS#tmY4c7nw4c7reD4Tcy00T@(L_t(I z5sj2vNEA^R$7gqDc6T=2^@fUA2(c`MltuL5<|KW>RWz$&YbU@|M|{$E*8Tu-Ux!w z1Y*Dr&Ubfr&v-nZaaB{3ilRumrjPmk{sZvQEWlW+{o~IH|8)=s6c#X9S5s5d%J z4@)&QH5|xQY-)^L1n0pTRu0Lx9`08YTjTwn^6 z0;b1+aQ@)n;Em$q;=7BBi)v0zj&o^g>0Whp^_^5IbxIUP8C@y9;R?*Ouu}rmfxbU= zwtWVNke-m!=`7bYEhWpcI5#)9qp`8E0lr6IQ)ARL3Ui}Af@grj8aN1=r>Cb+prlzO zNfJs*N_tUm2ZL%5* zPmL2??da$TR904gL(VDAQ-Fv_Dk}Pdw*4T(%*f4MKLRg=4ekMjhe2mW zMFsBwg%ftWT}0kxRaIk1k7qJ8*#cKB;Ft{i`zVIs-Nqge;!!Ld7#O&Qqu7e0sJmP) z$MW*>L$vSB&dxp@iA3U9fo)-7!Czlr{|o7Hv{1oyg3xsu%gn@(b1>$;SM-ZaQ`HV=V0s;lr%d8bd;xY zGwNvm3=Iu=tyXIgtJnf@A(2S@M140N ew{UA~tMxaJq;$xaSSi*30000distributed-process-platform-0.1.0: The Cloud Haskell Application Platform \ No newline at end of file diff --git a/static/doc/distributed-process-platform/index.html b/static/doc/distributed-process-platform/index.html deleted file mode 100644 index 85cabeb..0000000 --- a/static/doc/distributed-process-platform/index.html +++ /dev/null @@ -1,8 +0,0 @@ -distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

    distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

    distributed-process-platform-0.1.0: The Cloud Haskell Application Platform

    Modelled after Erlang's OTP, this framework provides similar -facilities for Cloud Haskell, grouping essential practices -into a set of modules and standards designed to help you build -concurrent, distributed applications with relative ease. -

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async-AsyncChan.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async-AsyncChan.html deleted file mode 100644 index 03849c5..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async-AsyncChan.html +++ /dev/null @@ -1,9 +0,0 @@ -Control.Distributed.Process.Platform.Async.AsyncChan

    Control.Distributed.Process.Platform.Async.AsyncChan

    Exported types -

    type AsyncRef

    data AsyncTask a

    data AsyncChan a

    data Async a

    Spawning asynchronous operations -

    Cancelling asynchronous operations -

    Querying for results -

    Waiting with timeouts -

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async-AsyncSTM.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async-AsyncSTM.html deleted file mode 100644 index 92f7cc7..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async-AsyncSTM.html +++ /dev/null @@ -1,10 +0,0 @@ -Control.Distributed.Process.Platform.Async.AsyncSTM

    Control.Distributed.Process.Platform.Async.AsyncSTM

    Exported types -

    type AsyncRef

    data AsyncTask a

    data AsyncSTM a

    data Async a

    Spawning asynchronous operations -

    Cancelling asynchronous operations -

    Querying for results -

    Waiting with timeouts -

    STM versions -

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async.html deleted file mode 100644 index 2b56eba..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Async.html +++ /dev/null @@ -1,9 +0,0 @@ -Control.Distributed.Process.Platform.Async

    Control.Distributed.Process.Platform.Async

    Exported Types -

    data Async a

    type AsyncRef

    data AsyncTask a

    Spawning asynchronous operations -

    Cancelling asynchronous operations -

    Querying for results -

    Waiting with timeouts -

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Call.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Call.html deleted file mode 100644 index e9136fb..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Call.html +++ /dev/null @@ -1,4 +0,0 @@ -Control.Distributed.Process.Platform.Call

    Control.Distributed.Process.Platform.Call

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess-Client.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess-Client.html deleted file mode 100644 index 3f2eacb..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess-Client.html +++ /dev/null @@ -1,5 +0,0 @@ -Control.Distributed.Process.Platform.ManagedProcess.Client

    Control.Distributed.Process.Platform.ManagedProcess.Client

    API for client interactions with the process -

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess-Server.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess-Server.html deleted file mode 100644 index e125476..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess-Server.html +++ /dev/null @@ -1,7 +0,0 @@ -Control.Distributed.Process.Platform.ManagedProcess.Server

    Control.Distributed.Process.Platform.ManagedProcess.Server

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess.html deleted file mode 100644 index 5bf8442..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-ManagedProcess.html +++ /dev/null @@ -1,9 +0,0 @@ -Control.Distributed.Process.Platform.ManagedProcess

    Control.Distributed.Process.Platform.ManagedProcess

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Test.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Test.html deleted file mode 100644 index 03394ac..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Test.html +++ /dev/null @@ -1,4 +0,0 @@ -Control.Distributed.Process.Platform.Test

    Control.Distributed.Process.Platform.Test

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Time.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Time.html deleted file mode 100644 index 4fc0e81..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Time.html +++ /dev/null @@ -1,4 +0,0 @@ -Control.Distributed.Process.Platform.Time

    Control.Distributed.Process.Platform.Time

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Timer.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Timer.html deleted file mode 100644 index c77445d..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform-Timer.html +++ /dev/null @@ -1,4 +0,0 @@ -Control.Distributed.Process.Platform.Timer

    Control.Distributed.Process.Platform.Timer

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform.html b/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform.html deleted file mode 100644 index 79f142d..0000000 --- a/static/doc/distributed-process-platform/mini_Control-Distributed-Process-Platform.html +++ /dev/null @@ -1,8 +0,0 @@ -Control.Distributed.Process.Platform

    Control.Distributed.Process.Platform

    Exported Types -

    class Addressable a

    data Recipient

    type Tag

    type TagPool

    Utilities and Extended Primitives -

    Call/Tagging support -

    Registration and Process Lookup -

    \ No newline at end of file diff --git a/static/doc/distributed-process-platform/minus.gif b/static/doc/distributed-process-platform/minus.gif deleted file mode 100644 index 1deac2fe1a42e35b994f1b855488f392c50f6a89..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 56 zcmZ?wbhEHb * { - font-size: 93%; /* 12pt */ -} - -#mini #module-list .caption, -#mini #module-header .caption { - font-size: 125%; /* 15pt */ -} - -#mini #interface h1, -#mini #interface h2, -#mini #interface h3, -#mini #interface h4 { - font-size: 109%; /* 13pt */ - margin: 1em 0 0; -} - -#mini #interface .top, -#mini #interface .src { - margin: 0; -} - -#mini #module-list ul { - list-style: none; - margin: 0; -} - -#alphabet ul { - list-style: none; - padding: 0; - margin: 0.5em 0 0; - text-align: center; -} - -#alphabet li { - display: inline; - margin: 0 0.25em; -} - -#alphabet a { - font-weight: bold; -} - -#index .caption, -#module-list .caption { font-size: 131%; /* 17pt */ } - -#index table { - margin-left: 2em; -} - -#index .src { - font-weight: bold; -} -#index .alt { - font-size: 77%; /* 10pt */ - font-style: italic; - padding-left: 2em; -} - -#index td + td { - padding-left: 1em; -} - -#module-list ul { - list-style: none; - margin: 0 0 0 2em; -} - -#module-list li { - clear: right; -} - -#module-list span.collapser, -#module-list span.expander { - background-position: 0 0.3em; -} - -#module-list .package { - float: right; -} - -/* @end */ diff --git a/static/doc/distributed-process-platform/plus.gif b/static/doc/distributed-process-platform/plus.gif deleted file mode 100644 index 2d15c14173d23f664b955cd24f51c82f5f09d91d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 59 zcmZ?wbhEHbgbBX M^XE!9f*2UA0nx1yDgXcg diff --git a/static/doc/distributed-process-platform/synopsis.png b/static/doc/distributed-process-platform/synopsis.png deleted file mode 100644 index 85fb86ec84907bcc86531dc82871948ff4d471fa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11327 zcmV-FEWp!=P)4Tx0C)k_S!GyNTeqHT_l8Y(cXyX`gGi?cY`Qxn1VID|MJXwjPC)?)F$h6K zMMOd+6hs7sqbPzXbr*U(-*=zy-hcPcUC*=TdiNM(jyd-lv&OpsU|J&v2m2!^0SE{T z54F(O;E2!K(!rTCW z%wV;vdzf1QjBf#e&~gh74F>?Z4a=WLg$KhJ^$5nap>PLbJadS>e&h8+?D`9%QNL`g zEVKbYGXj7k5Q(8)0Fd#*a?VIMFW3*64geVHKzE-&0BG!BtmfuTbO(T`0Jaeg2nagF z{V*1E{Wm{e|AvV~*MEExiC+KU-~R=!2{)|c6Bg`GjQ;iG|FQ`1kAUCTuZtQk34#8{ z4r4(3g7#|{=Z@d+d#}7f!3C=>=26vx*jwA8>@MS>RG@Tt_zt3hie^T z_?0%9VUd=)Fos7I z^ghPh%Jy%YZ|)vCf6EaFPai$Q-!=$ppK!y&wrJs)bNdAuANB!m3n34Tfj{s75g-&U z1A!Pg3bcXF-=!Gv1VmU93G2duANT;{0JugFTqg*|oPXPC|A$2HS3NJd-hcPV3EW`Y zh=1Dr-5Mv{<{zIvz#Ybay&^Vcn^E_`qRfl{{bzYkp)4~$~NAx_VB;E z{?P)PU)DbV{Qi#~0H0@T9czDj06@6MNq8OrpdAz(9qQxd9nPr<&s+~tPQySqaZyfb zNh!%g_5YjeaLxMN*$sv_p;d%b#U$Wpz0Geb0U>E+EOsEQ;I!&= zNC6q(BFFWohy&t- zL?CHM5mJM6p`(xmWDmJOUQi$u0mVUQpbRJ*DuT+OI;a`C4fR4p&?xj8nuk`Puh35f z55*JWF{C0=8)=GkKzbrWk@3iMWInPS*@Wyu4kE{pbI3L14-^JPgW^Pq!Q<2bWsPz} zg`nb5nW!REEvg;Wj~YYGqt;RTXfiY_S_G|(HbmQ@z0gtU6m&ki8r_B-Ku@3-(OVb{ zh8`n;QNS2r>@mKWSWG773g!l;2Q!LUz-(f%SSG9pRuyZCC1S&|DcC~nb!<2G1$Gg; zjU&Zz;G}VSI0sxHE(w>9tH<5Py}&KucJP#VKD;vC6z`6Y#%JLx@m=^4{33pbgo;Ff zM3uyf#Fr$Iq=2M}WPoIbWP_BHl$%tE)ST3Z^fYM!=}po{r1PXd2-E~&f;PdC5J9*= zs3G(aUK2LR$jJD~G{_vt!pSa>)sa0QdqcKOPD3tEZbLrbsZB|wjHfK7yiNI%a+8XNN{Y&qDu61Js-9|yYMB~K%}=dM z?M|IcT|xbTdVvN>!$YG@<3@9arjllWW|0;{D?n>V>r0zK+erJ2cAbuzPL|Gw?j&6? z-95TFdL%tRy&=6neHMKS{UrTQ1~vvw1`mcbh9-s=4Br`97&RC@7}FVVFitT3Wa4Df zW%6UX#MHqw%Zy?cW;SPzV!p~ez`Vvn%c8>K#*)s`!ZO8*U=?PyV2x$1V13HE$;Qs6 z&lb#9$o7D3jh&udgWZ=sm;FBb3I`2`8ix-@E=M=VM@~9UO-_H#0?vNUbuLye1Fi_J zGOlM_JKO@?*4#+T3Fgmx>$N#hD=6JCPAiC=8LR|tcUDX*;jHjawc-Aa(!}p@(S{y z@=fw93cLy~3MC3J6=@aC6f+ecDWR3LloFKgD*aHFR}NQhQU0tVrsAhkud;kZ;E2bO z$|DP^+^R&?GSxXXPBj;`QnfjCE_I@Mx%xW|9u0SmYKzbdmB(*}d+O)oF zD{G(9?$JT&=D|u+DJZ zNWtioQNJ<4*wVPj_}x+AqoGH;Ob{kUCOIZE$M}u~9_ug#riP|Drn6=OW+7&G%rWL> z=Ede8ETk;rECwxUES)XuEw`++tg@`8tp%+ktov*zY#eRsY`)v-*k;?#*-6-)vU_6B zZ0}>=>40^xaj16KJg$2@@A#sloMVdPRon; zro?jMrmLZAiR-$Xw%cX5Rd)^dT=x|ZRgY|sB~Mk)Y|mvcRj(Yc6>oL#eD5_MZJ#2a zFTMu8*L=VGnflfE9r)Y&-w413xCGn|qz?28>kOxb4~I`91S8Hy%txw47DsMJ*+jLTq&gXR@@ceibXxRMj9yGtEGpJ5wl9t= zE-`NYl;)|jcqraAzAu3%Avt03wEpSZM3O|m#Ni~#r0k?`XKc@OC9@@;PF^^xf3_io zJS8;cWvWW*wR5O*KIfjL$)pvg?Wen^KhBWM$j{i#bjy5vUg~_o`GX6d7oKIwXI;IB zxfpnH@{;j<`HmaI~Pakhkz+;ck(4 z(L}LU@r@GJlC+ZVSKP0>xT6f*a^OxsWU@9UjK2+LN4pu2v z)m1ZBXH@Ui1lG*eTGaN}Db&@~v({%dAQ~bXR<1ijt)TYR@l+GyI++oAU8_Vo_$j=4_z&e7XOxBI$Oy4voD->JFFb+`B) z-My^)B=?i=A9TlbZ}tTDto3^JF7!F~O+T=EFy3$8|7^f`;L$_9hYtod2fH7sKDs-k zJaqf9;^U4d@=w~I$~|oxmK$z+CjYE`L}8@!xzh8l(IcbxU#P$69n%?mIBq!pWa8Mw z=%n@JtCx;1=U%zLT7K>S`pZ=0)Xwzj8T3s0Eahze8`d}FZ-w68n3JEoH?K4Q^qu9q z=>@li)%RiVcNddCkbTHs;#jI%mR`QQqPOz=CgGy+9whdp4g`BLCvp!8U&;uov(!a2t+bEnRv6HXyi9t`-YglcEo`$K zI8GTZXYLH1F5YE+b^&9-c%dfYc~N>X1MygiCdpZ8N*OKLV7W5+5rusvVP$KTgd_E; zV`@J%*flk^Jhjj1)aX9cTQC5ItVZ(2W=FkE;*aH-)|+*kk6SET?pjmWaNEk+>D${o z_#cmV%sNr-bj$gX%QW$m8{|&wA?SI;%go!uC))SCU%7vKz~jI-L0?1Ap^RZ7;i?hG zB3+__P9{WW#uUa@#oavB8Q+`m==5;nXwvwZiR6j1<0+%5!{;8Q^`_s>XwIxTUvlAM z)|rdpmprp=bM$iM@_6#8@((Vr7Q8HcP;{fXs3iGH;8nY8TBRaov}JqcixtC_ZBw07?YBCLI#1vB=rX<|d6)j~ z?!9;SA9XkN4rDD83J6N{$`!z{xG&lW}=KCd6md=WHe zF)la3F!5t@`sLkMS6?Sg5vR3gcxTbGOK%>(y*_twKH{Cjg64anMViI^4{J-a%g0=3|@n*5+(H4=G;Z`Bm z0XDw2UUnY#t`5ZG&WObDFO_)C zCe0{aEki1k_dNXt+=U-mA1_W_8p^(%Qj|@Mb z9sM+h7-yIepVWIvd=>Y)XzKR#)XeT1jH zI8-@&65hs?W6g0$Tn9b?K9MevmJ{6JljSOT6GbGYHWfM5G<6M41g#z&E8Qx6H$yI? z50eHn6Z1ODBi1suSavH8F-{EUJXaTYHjh8AJ|73)7XPq7gt>OirQ5IDz)!g7S$y<#pnvPn` zTCcP(>sag3>W=B<=vx}l7>pa{8`&AN7|$LpGx0noeC)GnyV)so9SefRgyl6WA8Q%w zeVfO&`F8I1(hk7k+3~B6fhW|RD4pIpx4EPekGo2^q1>k2n?25Xx_BviQ+coYJoGK~ zi}SY&kPV~?{2VkK+z^r;>Jw%VE)ao-y@)AN%A4?QY z!X(X~xtpASHaNvFl_z!g+(cSqdP;^mD`$^mG5`i zpn$&+Rk%>pUtCp^dd2Um*){o6wlZ|t=klqF!OHfk>gs};%-W>7nEHr@(CeX%5lwM7 zQg7xp*S7SwzHLLbOLn+*Uc0?`NAB*$d)wWCJsW)~{h|X4gV%@BpPU*_8L1qd8t0!( zdySmVd!st{bK%K{=9Rj&=Ffv)KX1|hFxkC)82{hg(&3(fkq6-NB>?O?0kGBtAd?QJ zm0$~|LIBLj0I*U5i1iA9XzK$|?dCuG2lOlFq=GX}9v}f{nuc(O=>uZH1yBw;!3bD_ zU{(i`gLA_m=mOLPjX+-zbO8W#QsA+O&>1m7Uxak_`<>>nu%o*kx!T2DqomQ{`*59GHMHWa@qZ7S~^!Kl)z@vEz7SZjuAWovinywxMoS2FN7 zEH|1t%4A}H?2754xrD_j%Moi{n>gE7_6iP##}7_;J59Lg5Ifz(-D^B~y{dc!eQ)?H z1`GsQ2d{)Cgfm98MOmHv9&;s5@6?xs(nO0hxa6LcxN|CLdl`M_GqP+i31t7w9nHU9 zkY40hVt!S*RG^%pl2DDR1@+)Ms)_U_Lks^c#r9*J-d)LeEAIFAEIl9{kQ}rbihXiz zxOZfJbZ?wtQtXx5l+ld&8>=~scSi5kK8P(dtn9DO{nh=s_)Emb(M`^+uiKA)7VrA) zEB#tO5ODlSVZM$P@WWh#2Fx+Iz|6u~m`%6|24UXdCqxG`1g0=2kOkd@#-Q&AR(P%P zMdTpvAy(jBM;jT2tUyk{D~~EF3{{U>K(nFk;T(JdLx-`&6l3PF0@xsI7Y>87!d2q7 z@J9GD{0|aKlAELyq`{in5#@A}YP&ZEYQ#XH-V)Gsvv6_^~14ao?j4lj=6k7|w9iW!UZJhhvUlPHq(FxfQ) zq?V>>q`%8dxgeZ1aw#H*HTOZjUjc35y<*QR6jwV-iRB~}tyPXS=-S45n}+?ysv9OZ zzqJ(K(rR1j$hs}xHG4PtzG(M&@2Lj@{VyISJQ5#z^W@U7{hV|l=i6Vte3RLV-yYuK+dKCw{z!laG%#N$3ABJM%p<0O zYA^skKqQbP%m$r-WBwLFh0ujLomRwONMWQ8vL5*f<`CmhgJ?Rm2f718hVj63W7)9r z*mpQXTq~XnpG|@xNg&xFjU_!Gq>|CVvs#J#1w}9=HDxE2J2egUAWZ`85!yYvKKcv> zJ4PYKJ*G+KW|m8=VQlv7TJY|}%00wyKDli~41a=UN19Bb{{JVSQ=?d&3H&&qviwE*<+| zre!9^?4cDF}{Txa*#Kx+jZQvyZXwvVVG@WYFu7)G)>HwaCho zPBE;pGpDX4cqED@Z6)`nTsY^LE}F4-ek7|Lj+#LpTmF}Vfuf?4z^j_2v}GSEI;v7@ ztn0YySFg7=Mcq_r{?^*qM(m*I?Cd&z=li|$-7G!jeOwO;25=992SX5MzsmCeV$vtN*Wk9q%cvGzm6 zlGZYQ`Nc~9M~79`)tR-DzwAEIeH!_EZe4SI`^$~5?i-97Prt=)N^Q<3ePg@o zht*Hi&(|HuI*eO3a z*sFk(4fq>KkN@xQ6^F(cm~$_2K14li9;XkV|9<@!M&f%8Nam8p00009a7bBm000XU z000XU0RWnu7ytkil}SWFRCodHT?u#;Rkr@KbUNvfeG_5`YY-wNfPp{+o{ADgGcxep z5O;8ydCWk3pWowCbe1RjK4lzy;4&jKqk}U-a1=+ud7z@;LLwlFC>S)v1jwFrI_XY2 zop;WyuIf%_F~x?x|CCgE~7q5lBOq0>MKUdH^|7ARquk zTn+*P5DlHMG@8ELxbaVWHf?&T znHpfF&E_pZ&^rD;1;7qozi0Q$(`V)7{8<+kI>wdbHk%E>!9AN2eO+^{$KB)hHtVU6 z4;0@%KYw`%{kM%aj|)L>`1``u*EM%B_Ep|f_7iHT~t6&rZsneaT;XVt##n z3*O&%0=#!k4Gq$@x_XoAC663)d$?Wm=UXTrha?_sgD)BZa!4dhf)W5g$)o+5f!@!6p= z7>#E6lGpa0z~7?)*juclePn!mT$U>W2F?VqT7?}(LqHHhL#3+DoNXk5_#Pb{(lwSP zZ<=X|iSbjYeFoatR`H}3=!RdX3qeSTbc>FTPC&5WKoW3vT<}n4p!jve)Qtntp05&Y$`N~L&mauhNrjZlt#E%Rdnz*4RdA(~WsS0P~4Cker*^h9K3rID79 zAhx!)2_f*-6tD+E@|~5o_HbR*DQEm#fix64W;xPOIEsuwz3>ej`Mg}wlx+M?%^s;7 zt7<_1|D+24j|zb6{d*Duo)R*nQ%A&N`m}UK6}Gim#oV|jr-^I5{&3u6Y!z0&JjK=N zf~iA{0UNr_&1RH*=FkdaRxmwXu@ih1pW6b!KwO1@&&hNBf0 z=VYU~zns|bF>|Ig{pE8Oi&e4q8Sf>;d>$HnJ*g4^2E{@!BWJXj|MK2>t{)#4iCiKM z_X3_Wd3!22SVWGECF_5t9Wx1ebdVe1IRabo*K&Me+mp(08G`jsI~A7O*rz=A?*I(Ym_y4*ZBHj<`2EIL z@XCfeuGtW8G6RGFlFM<@CjE-OtU#5a;0kB%yXw(N%<3n(~sBeG(H{~)Y9EAyo%kT#Rg2j zpdOnacnjrpoDswQL%S&=xD)LJZ^c?^7~tUKxVSW2U-+UJ`I8c2{Q|sd4FLUcTr-0M zaqMa26wFKpz7U~s3AlNV^qhrHMbm9<`9gTLcVV_VCkYcW$bp+1aV?*4j`n;5NQvl5P$NHC1)DVqF ze?14Uta}S5dTDmrRR#Fn;tPAZ>c6M&cw`%zt17X5(`x+mXPZPMYENh$xHA{IIn#Q& z^ zG}YF_5*3HIuofIEDMeLB1jc8M#;C+D(d52>)gx`#@~i9ZqkAV_+e~x*&R~QFvHtHw zX=O8P?QIyJ9Ss9*B|&g;0hMp z3Alm-uHb+xn7Ts16&!E{`__2XkJh+p1UhOAxPk+&;D9SQ;0g}7f`^~4p*Mp`Hum_uHM8Ep9TllPO>m-^Cs zpVwg1bK6i`-w1z*2vDs7WXVaJJHyU=rk@Vk3#W^iKzdl}7D4^3u#E2B8*>%rGlt8u z5=Bg)^vMF>N2OW-kTeo=C=#;#Uwg6hiz=At%UPznGuZL$9uX3jIcgXzEoL+}ne7De zePX!NLIZ__1sfvpaY5fTR( zUH5HKQ7-^w@TCk-ATqS$+;^2Y-9Yg{p~En8>~LcE&~OCN2SO-y!qgT7qsff0kWR!$ z^D81!lBm$TfXL;}=Y9YJK+SF{!{d*=}ZDsk}pA}{0WdF3_)n|T5 zFNK7P(SF;zrP#jx9qieE2>F-K@p;gyHGt(@rI_!hEt)McpP}lbFn3v=a0JCAI=-Ld z^HfmLKw}#PgVO)j-n&3BpR3@}{)WrPilHHGIK3w22T8R6=u<`rMwjnBh~jFy5zt}A zN81hv!KkMXNNPDnh1mq7H@>uwma1@k3;2!wtQCOj+9tn%uigkWBw{AL|5)BofhX2& zA+XZ302%fCsUzg9CimQPVv`f;C6O8|{n>ML#6sZcPqU_9DPe!$!>g7coyleK6R!5=0O9Kit+4(r(6 ziv6QJ8-P(X4Sa3SakRGjFIv?a0G4_jZD3}d!^RD-cH>&cq5?d2jrKkeAp_;!Ur#;& z9W7Y4e9epUX=T6m-g%gom8l&2YDT>Vpn#D2K2TLOYC9;D1)wkDRn>N#8T3J_^Lk0W z2GEDo5^3Wxdgdfd9w7&WOIUcVywJ$#^9sz{H)rNATQUdN%*}+3f?}K#TL)6Cfb&`3 z%&Qjw3IaWJ_$1z;4dDsM&%YQ~=42pUgopbkSWmW!9lu+5e2Bl(Hp~!=)psw#l#5d7 z<59t4!9`Er%bRtn7l4p3WRMY9&31sf7Q0{HC$^-K>G(;07G_Pk5PmWfQbk{$>nD;C z$aX+;iw(co_@<~Qn^p+B=a%_MiWA>XQ&sn1{z<(6(1#*dufHEF>#Fe8m!&8!F2%dw zHlg}-8UFYJZG<8tdn)d^eHPNC3G-m$^7_440RBMV3*u1l6Q_-MckXuK!rmQ$k)#dR$sG z@^U71!@qOSF|2)@pOpG;Qm+AE#NKTmpy<6aRJ-8I$ex7UR10>zRSMI&Dx4*+aC%oe z$>ksZdHCl3@33X-u5M#~!F>8s>bP;(@Z1iZ5DQ57E(pe>^RmdH=2Rkv1Y;;r0f4a|kUQI?AO7tZbEf zJ(*E203jiWBR5FKRnt*$=_L9l06hS)bRb+XpPQ(|6)W>G1u?i-W6WoCJgUlRkTWYJ9y;~2lKhQP~5|72z2_#^8q&npdI^OKWZnM4)jd~lxFIKK%PKOm(9u+`!IG4P>PAtq9@Rh0JE!{0DuH! zkK`y|6ZXDM&ju*fYcM2?dkd?0BQd?AvKl9=rI$l^%Bzo%82pwp_ z3!t@d`N^j}MPee&>2}gr!FRvB)4o^~UCPYDMfxiI>b@c+MsVI_ZG?n%#SdILF9)yD z8iBv~&32h6$j=)^`5;_--)1F7aK==Pycf`JwRRcIa&EjD`NGhX@h9M+TM4YCmA;oJ zrO3=nv3MeD1n(z%`&dZj&7(JU#eehVv~0XE^yJ%^arZ3+;^s6cinJi_LRv*8MlRsh z{Xp^er2%-zvwii|iPQND<~cxwB;)S&_u$&{D%8_7aQMh%>8YP30yAe!z=De>;j*0J zN>6b7(K|VAAJyy)=J$-BZpMp7n5{I{+sN@1<}jm{UYm<6az zC)2KLBDKeY!To$ha&qG2BZqfAotPNM^BbQ^H8u4$*;5z(vZ|_v=c1LgH4&aJ8cR)s zhZ25=_;#ffO9d0sLd30K^&jiDoI6+3R|Htse-FYDw`bL=buUu;*yY6jR@v$9iMtOO z{Jm)a77X@ba%$f%7edh>l!!{woQDqvAyLn?wOiY*$B%zo zv32X~pEWczvH$rLZ56cfy6vr`0a$epDA9d}4E`PkfT>4BU?%e$j!CrfB%e1P1~}M{ zuQ8DZRRHLI>|J6XE5CNbPoY`u^Tv~L_DESt0J@K9biv&;RPgs@1TwMtC4bqg&n_U& z^RqpU@fmCZV8(Krcxd8Db|Y=v9v+%_sqO*ye5%7a4GH|cY5=AL^#T?U?(IAraOf}Z znfd(s?_l?Sx}{(;kM%5!ES&ry9?r8?uz9NYQ(Ynr1^j&q08@d8z|&jaWMSaE-1`Sx z2*lKk?$1KN8*2mJGw(g3`l+riN$dE3Q~;P7LCd=wx?7hW&8J3pu z_e%g|LIn2Oqk!C_wTCQ#s9zKa2tdEcq}@UR0njdQ`-LnZ0R1A9b_)drK)bx{7qWl= z^ovZ|Eff#{?eex?$N~b;FEVMjP(T2*%iDe-`+v|7m{y$1dn*6{002ovPDHLkV1lnB B5rhB$ diff --git a/tutorials/ch4.md b/tutorials/ch4.md index ed1ac1f..b04958e 100644 --- a/tutorials/ch4.md +++ b/tutorials/ch4.md @@ -7,101 +7,108 @@ title: Managed Process Tutorial ### Introduction -The source code for this tutorial is based on the `BlockingQueue` module +The source code for this tutorial is based on the `BlockingQueue` API from distributed-process-platform and can be accessed [here][1]. Please note that this tutorial is based on the stable (master) branch -of distributed-process-platform. +of [distributed-process-platform][3]. ### Managed Processes -There are subtle bugs waiting in code that evaluated `send` and `receive` +There may be subtle bugs hiding in code that evaluates `send` and `receive` directly. Forgetting to monitor the destination whilst waiting for a reply -and failing to match on the correct message types are the most common ones, -but others exist (such as badly formed `Binary` instances for user defined -data types). - -The /Managed Process/ API handles _all_ sending and receiving of messages, -error handling and decoding problems on your behalf, leaving you to focus +or failing to match on the correct message types are the most common and +other, more esoteric problems exist, such as badly formed `Binary` instances +for user defined data types which can crash the sender or worse, in the +presence of [_unsafe operations_][4] and unevaluated thunks, unexpectedly +crash the receiver. + +The /Managed Process/ API handles sending to and receiving from the server +process, in-process error handling and message decoding, leaving you to focus on writing code that describes _what the server process does_ when it receives messages, rather than how it receives them. The API also provides a set of pre-defined client interactions, all of which have well defined semantics -and failure modes. +and failure modes. There is support for sending messages to/from a process' +mailbox, using typed channels for inputs and outputs, RPC calls (i.e., waiting +for a reply from the server) and fire-and-forget client-server messages. -A managed process server definition is defined using record syntax, with -a list of `Dispatcher` types that describe how the server should handle -particular kinds of client interaction, for specific types. The fields -of the `ProcessDefinition` record also provide for error handling (in case -of either server code crashing _or_ exit signals dispatched to the server -process) and _cleanup_ code required to run on terminate/shutdown. +Managed processess are defined using record syntax, providing lists of +`Dispatcher` objects describing how the server handles particular kinds of +client interaction for specific input types. The `ProcessDefinition` record +also provides hooks for error handling (in case of either server code crashing +_or_ exit signals dispatched to the server process from elsewhere) and _cleanup_ +code to be run on termination/shutdown. {% highlight haskell %} myServer :: ProcessDefinition MyStateType myServer = ProcessDefinition { - apiHandlers = [ - -- a list of Dispatcher, derived from calling - -- handleInfo or handleRaw with a suitable function, e.g., + -- handle messages sent to us via the call/cast API functions + apiHandlers = [ + -- a list of Dispatchers, derived by calling on of the various + -- handle functions with a suitable thunk, e.g., handleCast myFunctionThatDoesNotReply , handleCall myFunctionThatDoesReply , handleRpcChan myFunctionThatRepliesViaTypedChannels ] + + -- handle messages that can only be sent directly to our mailbox + -- (i.e., without going through the call/casts APIs), such as + -- `ProcessMonitorNotification` , infoHandlers = [ -- a list of DeferredDispatcher, derived from calling -- handleInfo or handleRaw with a suitable function, e.g., handleInfo myFunctionThatHandlesOneSpecificNonCastNonCallMessageType , handleRaw myFunctionThatHandlesRawMessages ] + + -- what should we do about exit signals? , exitHandlers = [ -- a list of ExitSignalDispatcher, derived from calling -- handleExit with a suitable function, e.g., handleExit myExitHandlingFunction ] + -- what should I do just before stopping? , terminateHandler = myTerminateFunction + -- what should I do about messages that cannot be handled? , unhandledMessagePolicy = Drop -- Terminate | (DeadLetter ProcessId) } {% endhighlight %} -Client interactions with a managed process come in various flavours. It is -still possible to send an arbitrary message to a managed process, just as -you would a regular process. When defining a protocol between client and -server processes however, it is useful to define a specific set of types -that the server expects to receive from the client and possibly replies -that the server may send back. The `cast` and `call` mechanisms in the -/managed process/ API cater for this requirement specifically, allowing -the developer tighter control over the domain of input messages from -clients, whilst ensuring that client code handles errors (such as server -failures) consistently and those input messages are routed to a suitable -message handling function in the server process. +When defining a protocol between client and server, we typically decide on +a set of types the server will handle and possibly maps these to replies we +may wish to send back. The `cast` and `call` mechanisms cater for this +specifically, providing tight control over the domain of input messages from +clients, whilst ensuring that client code handles errors consistently and +input messages are routed to a suitable message handling function in the +server process. + +In the following example, we'll take a look at this API in action. --------- ### A Basic Example -Let's consider a simple _math server_ like the one in the main documentation -page. We could allow clients to send us `(ProcessId, Double, Double)` and -reply to the first tuple element with the sum of the second and third. But -what happens if our process is killed while the client is waiting for the -reply? (The client would deadlock). The client could always set up a monitor -and wait for the reply _or_ a monitor signal, and could even write that code -generically, but what if the code evaluating the client's utility function -`expect`s the wrong type? We could use a typed channel to alleviate that ill, -but that only helps with the client receiving messages, not the server. How -can we ensure that the server receives the correct type(s) as well? Creating -multiple typed channels (one for each kind of message we're expecting) and -then distributing those to all our clients seems like a kludge. - -The `call` and `cast` APIs help us to avoid precisely this conundrum by -providing a uniform API for both the client _and_ the server to observe. Whilst -there is nothing to stop clients from sending messages directly to a managed -process, it is simple enough to prevent this as well (just by hiding its -`ProcessId`, either behind a newtype or some other opaque structure). The -author of the server is then able to force clients through API calls that -can enforce the required types _and_ ensure that the correct client-server -protocol is used. Here's a better example of that math server that does -just so: +Let's consider the simple _math server_ we encountered in the high level +[documentation][5]. We could allow clients to send us a tuple of +`(ProcessId, Double, Double)`, replying to the first tuple element with the +sum of the second and third. What happens if our server process is killed +while the client is waiting for the reply though? The client would deadlock. +Clients could always set up a monitor and wait for the reply _or_ a monitor +signal, and could even write such code generically, but what if the code +evaluating some such utility function then `expect`s the wrong type? We could +use a typed channel to alleviate that ill, but that only helps with the client +receiving messages, not the server. How can we ensure that the server receives +the correct type(s) as well? Creating multiple typed channels (one for each +kind of message we're expecting) and then distributing those to all our clients +is awkward at best (though we will see how to do something like this using the +API in a later tutorial). + +The `call` and `cast` APIs help us to avoid this conundrum, providing a uniform +API for both the client _and_ the server to observe. Here's a better example of +that math server that does just that: ---- @@ -153,15 +160,19 @@ server's mailbox is taken out of the programmer's hands, leaving the implementor to worry only about the logic to be applied once a message of one type or another is received. +We could even hide the math server behind a newtype and prevent messages +being sent to its `ProcessId` altogether, but we will leave that as an +exercise for the reader. + ---- Of course, it would still be possible to write the server and client code -and encounter a type resolution failure, since `call` still takes an -arbitrary `Serializable` datum just like `send`. We can solve that for +and encounter data type decoding failures, since the `call` function takes +an arbitrary `Serializable` datum just like `send`. We can solve that for the return type of the _remote_ call by sending a typed channel and replying explicitly to it in our server side code. Whilst this doesn't make the server code any prettier (since it has to reply to the channel -explicitly, rather than just evaluating to a result), it does the +explicitly, rather than just evaluating to a result), it does reduce the likelihood of runtime errors somewhat. {% highlight haskell %} @@ -189,22 +200,23 @@ wrapper functions. An additional level of isolation and safety is available when using /control channels/, which will be covered in a subsequent tutorial. Before we leave the math server behind, let's take a brief look at the `cast` -side of the client-server protocol. Unlike its synchronous cousin, `cast` does +part of the client-server protocol. Unlike its synchronous cousin, `cast` does not expect a reply at all - it is a fire and forget call, much like `send`, but carries the same additional type information that a `call` does (about its -inputs) and is also routed to a `Dispatcher` in the `apiHandlers` field of the -process definition. +inputs) and is also routed to a `Dispatcher` in the `apiHandlers`[2] field of +the process definition. We will use cast with the existing `Add` type, to implement a function that takes an /add request/ and prints the result instead of returning it. If we were implementing this with `call` we would be a bit stuck, because there is nothing to differentiate between two `Add` instances and the server would -choose the first valid (i.e., type safe) handler and ignore the others. +choose the first valid (i.e., type safe) handler and ignore the others we'd +declared. -Note that because the client doesn't wait for a reply, if you execute this -function in a test/demo application, you'll need to block the main thread -for a while to wait for the server to receive the message and print out -the result. +Also note that because the client doesn't wait for a reply, if you execute +this function in a test/demo application, you'll need to block the main +thread for a while to wait for the server to receive the message and print +out the result. {% highlight haskell %} @@ -652,3 +664,7 @@ processes is cheap, but not free as each process is a haskell thread, plus some additional book keeping data. [1]: https://github.com/haskell-distributed/distributed-process-platform/blob/master/src/Control/Distributed/Process/Platform/Task/Queue/BlockingQueue.hs +[2]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html#t:ProcessDefinition +[3]: https://github.com/haskell-distributed/distributed-process-platform/tree/master/ +[4]: https://github.com/haskell-distributed/distributed-process-platform/tree/master/src/Control/Distributed/Process/Platform/UnsafePrimitives.hs +[5]: /documentation.html diff --git a/tutorials/ch5.md b/tutorials/ch5.md index 3f268b4..f3e0fdf 100644 --- a/tutorials/ch5.md +++ b/tutorials/ch5.md @@ -12,4 +12,25 @@ and monitoring their lifecycle as it changes. The ability to link and monitor ar foundational tools for building _reliable_ systems, and are the bedrock principles on which Cloud Haskell's supervision capabilities are built. +The [`Supervisor`][1] provides a means to manage a set of _child processes_ and to construct +a tree of processes, where some children are workers (e.g., regular processes) and +others are themselves supervisors. +The supervisor process is started with a list of _child specifications_, which +tell the supervisor how to interact with its children. Each specification provides +the supervisor with the following information about the child process: + +1. [`ChildKey`][2]: used to identify the child once it has been started +2. [`ChildType`][3]: indicating whether the child is a worker or another (nested) supervisor +3. [`RestartPolicy`][4]: tells the supervisor under what circumstances the child should be restarted +4. [`ChildTerminationPolicy`][5]: tells the supervisor how to terminate the child, should it need to +5. [`ChildStart`][6]: provides a means for the supervisor to start/spawn the child process + +TBC + +[1]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Supervisor.html +[2]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Supervisor.html +[3]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Supervisor.html +[4]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Supervisor.html +[5]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Supervisor.html +[6]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform/Supervisor.html diff --git a/tutorials/ch6.md b/tutorials/ch6.md new file mode 100644 index 0000000..015c176 --- /dev/null +++ b/tutorials/ch6.md @@ -0,0 +1,350 @@ +--- +layout: tutorial +sections: ['Introduction', 'Unexpected Messages', 'Hiding Implementation Details', 'Using Typed Channels'] +categories: tutorial +title: Advanced Managed Processes +--- + +### Introduction + +In this tutorial, we will look at some advanced ways of programming Cloud Haskell +using the managed process API. + +### Unexpected Messages + +The process definition's [`UnhandledMessagePolicy`][policy] provides a way for +processes to respond to unexpected inputs. This proves surprisingly important, +since it is always possible for messages to unexpectedly arrive in a process' +mailbox, either those which do not match the server's expected types or which +fail one or more match conditions against the message body. + +As we will see shortly, there are various ways to ensure that only certain +messages (i.e., types) are sent to a process, but in the presense of monitoring +and other system management facilities and since the node controller is +responsible - both conceptually and by implementation - for dispatching messages +to each process' mailbox, it is impractical to make real guarantees about a +process' total input domain. Such policies are best enforced with session types, +which is part of the Cloud Haskell roadmap, but unconnected to managed processes. + +During development, the handy `Log` option will write an info message to the +[_SystemLog_][3] with information about unexpected inputs (including type info), +whilst in production, the obvious choice is between the silent `Drop` and its +explosive sibling, `Terminate`. Since in Cloud Haskell's open messaging architecture, +it is impossible to guarantee against unexpected messages (even in the presence + of advanced protocol enforcement tools such as session types), whichever option +is chosen, the server must have some policy for dealing with unexpected messages. + +------ +> ![Warning: ][alert] Watch out for unhandled deliveries, especially when using +> the `Drop` policy. In particular, unhandled message _types_ are a common cause +> of application failure and when servers discard messages without notifying their +> clients, deadlocks will quickly ensue! +------ + +### Hiding Implementation Details + +Whilst there is nothing to stop clients from sending messages directly to a +managed process, there are ways to avoid this (in most cases) by hiding our +`ProcessId`, either behind a _newtype_ or some other opaque data structure). +The author of the server is then able to force clients through API calls that +can enforce the required types _and_ ensure that the correct client-server +protocol is used. + +In its simplest guise, this technique simply employs the compiler to ensure +that our clients only communicate with us in well-known ways. Let's take a +look at this in action, revisiting the well-trodden _math server_ example +from our previous tutorials: + +{% highlight haskell %} +module MathServer + ( -- client facing API + MathServer() + , add + -- starting/spawning the server process + , launchMathServer + ) where + +import .... -- elided + +newtype MathServer = MathServer { mathServerPid :: ProcessId } + deriving (Typeable) + +-- other types/details elided + +add :: MathServer -> Double -> Double -> Process Double +add MathServer{..} = call mathServerPid . Add + +launchMathServer :: Process MathServer +launchMathServer = launch >>= return . MathServer + where launch = + let server = statelessProcess { + apiHandlers = [ handleCall_ (\(Add x y) -> return (x + y)) ] + , unhandledMessagePolicy = Drop + } + in spawnLocal $ start () (statelessInit Infinity) server >> return () +{% endhighlight %} + +What we've changed here is the _handle_ clients use to communicate with the +process, hiding the `ProcessId` behind a newtype and forcing client code to +use the `MathServer` handle to call our API functions. Since the `MathServer` +newtype wraps a `ProcessId`, it is `Serializable` and can be sent to remote +clients if needed. + +------ +> ![Warning: ][alert] Note that we _still_ cannot assume that no _info messages_ +> will arrive in our mailbox, since it is _impossible_ to guarantee our `ProcessId` +> will remain private due to the presence of the [management][mgmt] and +> [tracing/debugging][dbg] APIs in distributed-process. Servers that use the +> distributed-process monitoring APIs, must also be prepared to deal with monitor +> signals (such as the ubiquitous `ProcessMonitorNotification`) arriving as _info +> messages_, since these are always dispatched directly to our mailbox via the +> node controller. +------ + +Another reason to use a _server handle_ like this, instead of a raw `ProcessId`, +is to ensure type compatibility between client and server, in cases where the +server has been written to generically deal with various types whilst the client +needs to reify its calls/casts over a specific type. To demonstrate this +approach, we'll consider the [`Registry`][1] module, which provides an enhanced +_process registry_ that provides name registration services and also behaves +like a per-process, global key-value store. + +Each `Registry` server deals with specific types of keys and values. Allowing +clients to send and receive instructions pertaining to a registry server without +knowing the exact types the server was _spawned_ to handle, is a recipe for +disaster, since the client is very likely to block indefinitely if the expected +types do not match up, since the server will never process such requests. +We can alleviate this problem using phantom type parameters, but storing only +the real `ProcessId` we need to communicate with the server, whilst utilising +the compiler to ensure the correct types are assumed at both ends. + +{% highlight haskell %} +data Registry k v = Registry { registryPid :: ProcessId } + deriving (Typeable, Generic, Show, Eq) +instance (Keyable k, Serializable v) => Binary (Registry k v) where +{% endhighlight %} + +In order to start our registry, we need to know the specific `k` and `v` types, +but we do not real values of these, so we use scoped type variables to reify +them when creating the `Registry` handle: + +{% highlight haskell %} +start :: forall k v. (Keyable k, Serializable v) => Process (Registry k v) +start = return . Registry =<< spawnLocal (run (undefined :: Registry k v)) + +run :: forall k v. (Keyable k, Serializable v) => Registry k v -> Process () +run _ = + MP.pserve () (const $ return $ InitOk initState Infinity) serverDefinition + -- etc.... +{% endhighlight %} + +Having wrapped the `ProcessId` in a newtype that ensures the types with which +the server was initialised are respected by clients, we use the same approach +as earlier to force clients of our API to interact with the server not only +using the requisite call/cast protocol, but also providing the correct types +in the form of a valid handle. + +{% highlight haskell %} +addProperty :: (Keyable k, Serializable v) + => Registry k v -> k -> v -> Process RegisterKeyReply +addProperty reg k v = .... +{% endhighlight %} + +So long as we only expose `Registry` newtype construction via our `start` API, +clients cannot forge a registry handle and both client and server can rely on +the compiler to have enforced the correct types for all our interactions. + +------ +> ![Info: ][info] Forcing users to interact with your process via an opaque +> handle is a good habbit to get into, as is hiding the `ProcessId` where +> possible. Use phantom types along with these _server handles_, to ensure +> clients do not send unexpected data to the server. +------ + +Of course, you might actually _need_ the server's `ProcessId` sometimes, +perhaps for monitoring, name registration or similar schemes that operate +explicitly on a `ProcessId`. It is also common to _need_ support for sending +info messages. Some APIs are built on "plain old messaging" via `send` and +therefore completely hiding your `ProcessId` becomes the right way to expose +an API to your clients, but the wrong way to expose your process to other APIs +it is utilising. + +In these situations, the [`Resolvable`][rsbl] and [`Routable`][rtbl] typeclasses +are your friend. By providing a `Resolvable` instance, you can expose your +`ProcessId` to peers that really need it, whilst documenting (via the design +decision to only expose the `ProcessId` via a typeclass) the need to use the +handle in client code. + +{% highlight haskell %} +instance Resolvable (Registry k v) where + resolve = return . Just . registryPid +{% endhighlight %} + +The [`Routable`][rtbl] typeclass provides a means to dispatch messages without +having to know the implementation details behind the scenes. This provides us +with a means for APIs that need to send messages directly to our process to do +so via the opaque handle, without us exposing the `ProcessId` to them directly. +(Of course, such APIs have to be written with [`Routable`][rtbl] in mind!) + +There is a default (and fairly efficient) instance of [`Routable`][rtbl] for all +[`Resolvable`][rsbl] instances, so it is usually enough to implement the latter. +An explicit implementation for our `Registry` would look like this: + +{% highlight haskell %} +instance Routable (Registry k v) where + sendTo reg msg = send (registryPid reg) msg + unsafeSendTo reg msg = unsafeSend (registryPid reg) msg +{% endhighlight %} + +Similar typeclasses are provided for the many occaisions when you need to link +to or kill a process without knowing its `ProcessId`: + +{% highlight haskell %} +class Linkable a where + -- | Create a /link/ with the supplied object. + linkTo :: a -> Process () + +class Killable a where + killProc :: a -> String -> Process () + exitProc :: (Serializable m) => a -> m -> Process () +{% endhighlight %} + +Again, there are default instances of both typeclasses for all [`Resolvable`][rsbl] +types, so it is enough to provide just that instance for your handles. + +### Using Typed Channels + +Typed Channels can be used in two ways via the managed process API, either as +inputs to the server or as a _reply channel_ for RPC style interactions that +offer an alternative to the `call` approach. + +#### Reply Channels + +When using the `call` API, the server can reply with a datum that doesn't +match the type(s) the client expects. This will cause the client to either +deadlock or timeout, depending on which variant of `call` was used. This isn't +usually a problem, since the server author also writes the client facing API(s) +and can therefore carefully check that the correct types are being returned. +That's still potentially error prone during development however, and using a +`SendPort` as a reply channel can make it easier to spot potential type +discrepancies. + +The machinery behind _reply channels_ is very simple: We create a new channel +for the reply and pass the `SendPort` to the server along with our input message. +The server is responsible for sending its reply to the given `SendPort` and the +corresponding `ReceivePort` is returned so the caller can wait on it. For course, +if no corresponding handler is present in the server definition, there may be no +reply (and depending on the server's `unhandledMessagePolicy`, we may crash the +server). + +------ +> ![Warning: ][alert] Using typed _reply channels_ does not guarantee against +> type mismatches! The server might not recognise the message type or the type +> of the reply channel, in which case the message will be considered an _unhandled_ +> input and dealt with accordingly. +------ + +Typed channels are better suited to handling deferred client-server RPC calls +than plain inter-process messaging. The only non-blocking `call` API is based +on [`Async`][2] and its only failure mode is an `AsyncFailed` result containing +a corresponding `ExitReason`. The `callTimeout` API is equally limited, since +once its delay is exceeded (and the call times out), you cannot subsequently +retry listening for the message - the client is on its own at this point, and +has to deal with potentially stray (and un-ordered!) replies using the low +level `flushPendingCalls` API. By using a typed channel for replies, we can avoid +both these issues since after the RPC is initiated, the client can defer obtaining +a reply from the `ReceivePort` until it's ready, timeout waiting for the reply +**and** try again at a later time and even wait on the results of multiple RPC +calls (to one or _more_ servers) at the same by merging the ports. + +We might wish to block and wait for a reply immediately, treating the API just +we would `call`. Two blocking operations are provided to simplify this, one of +which returns an `ExitReason` on failure, whilst the other crashes (with the given +`ExitReason` of course!). The implementation is precisely what you'd expect a +blocking _call_ to do, right up to monitoring the server for potential exit signals +(so as not to deadlock the client if the server dies before replying) - all of +which is handled by `awaitResponse` in the platform's `Primitives` module. + +{% highlight haskell %} +syncSafeCallChan server msg = do + rp <- callChan server msg + awaitResponse server [ matchChan rp (return . Right) ] +{% endhighlight %} + +This might sound like a vast improvement on the usual combination of a client +API that uses `call` and a corresponding `handleCall` in the process definition, +with the programmer left to ensure the types always match up. In reality, there +is a trade-off to be made however. Using the `handleCall` APIs means that our server +side code can use the fluent server API for state changes, immediate replies and +so on. None of these features will work with the corollary family of +`handleRpcChan` functions. The difference is perhaps an aesthetic one, as the +following example code demonstrates: + +{% highlight haskell %} +-- two versions of the same handler, one for calls, one for typed (reply) channels + +data State +data Input +data Output + +-- typeable and binary instances ommitted for brevity + +-- client code + +callDemo :: ProcessId -> Process Output +callDemo server = call server Input + +chanDemo :: ProcessId -> Process Output +chanDemo server = syncCallChan server Input + +-- server code (process definition ommitted for brevity) + +callHandler :: Dispatcher State +callHandler = handleCall $ \state Input -> reply Output state + +chanHandler :: Dispatcher State +chanHandler = handleRpcChan $ \state port Input -> replyChan port Output >> continue state +{% endhighlight %} + +------ +> ![Info: ][info] Using typed channels for replies is both flexible and efficient. +> The trade-off is that you must remember to send a reply from the server explicitly, +> whereas the `call` API forces you to decide how to respond (to the client) via the +> `ProcessReply` type which server-side call _handlers_ have to evaluate to. +------ + +#### Input (Control) Channels + +An alternative input plane for managed process servers, /control channels/ provide a +number of benefits over the standard `call` and `cast` APIs. These include efficiency +- _typed channels_ are very lightweight constructs! - and type safety, as well as +giving the server the ability to prioritise information sent on control channels over +other traffic. + +Using typed channels as inputs to your managed process is _the_ most efficient way +to enable client-server communication, particularly for intra-node traffic, due to +their internal use of STM (and in particular, its use during selective receives). + +In order to use control channels as input planes, it is necessary to _leak_ their +`SendPort` to your clients somehow. One way would be to `send` it on demand, but the +simplest approach is actually to initialise a handle with all the relevant SendPorts +and return this to the spawning process via an MVar (or similar construct). Because a +`SendPort` is `Serializable`, forwarding them (or the handle they're contained within) +is no problem either. Combining control channels with opaque handles is another great +way to enforce additional type safety, since the channels must be initialised by the +server code before it can create handlers for them and the client code that passes +data to them (via the `SendPort`) is bound to exactly the same type(s)! This means that +there can be no ambiguity and therefore no unhandled messages due to runtime type +mismatches - the compiler will catch that sort of thing for us. + +[1]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Service-Registry.html +[2]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Async.html +[3]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Service-SystemLog.html +[mgmt]: http://hackage.haskell.org/package/distributed-process/Control-Distributed-Process-Management.html +[dbg]: http://hackage.haskell.org/package/distributed-process/Control-Distributed-Process-Debug.html +[rtbl]: http://hackage.haskell.org/package/distributed-proces-platforms/Control-Distributed-Process-Platform.html#t:Routable +[rsbl]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform.html#t:Resolvable +[alert]: /img/alert.png +[info]: /img/info.png +[policy]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html#t:UnhandledMessagePolicy + From 0ea265f4fd10fc746dc525e664ecbc68ad703815 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Fri, 28 Feb 2014 17:11:55 +0000 Subject: [PATCH 010/108] Tutorial 6 improvements --- tutorials/ch6.md | 292 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 261 insertions(+), 31 deletions(-) diff --git a/tutorials/ch6.md b/tutorials/ch6.md index 015c176..1d46084 100644 --- a/tutorials/ch6.md +++ b/tutorials/ch6.md @@ -113,8 +113,8 @@ Each `Registry` server deals with specific types of keys and values. Allowing clients to send and receive instructions pertaining to a registry server without knowing the exact types the server was _spawned_ to handle, is a recipe for disaster, since the client is very likely to block indefinitely if the expected -types do not match up, since the server will never process such requests. -We can alleviate this problem using phantom type parameters, but storing only +request types don't match up, since the server will ignore them. +We can alleviate this problem using phantom type parameters, storing only the real `ProcessId` we need to communicate with the server, whilst utilising the compiler to ensure the correct types are assumed at both ends. @@ -216,7 +216,7 @@ types, so it is enough to provide just that instance for your handles. Typed Channels can be used in two ways via the managed process API, either as inputs to the server or as a _reply channel_ for RPC style interactions that -offer an alternative to the `call` approach. +offer an alternative to using `call`. #### Reply Channels @@ -225,9 +225,8 @@ match the type(s) the client expects. This will cause the client to either deadlock or timeout, depending on which variant of `call` was used. This isn't usually a problem, since the server author also writes the client facing API(s) and can therefore carefully check that the correct types are being returned. -That's still potentially error prone during development however, and using a -`SendPort` as a reply channel can make it easier to spot potential type -discrepancies. +That's still potentially error prone however, and using a `SendPort` as a reply +channel can make it easier to spot potential type discrepancies. The machinery behind _reply channels_ is very simple: We create a new channel for the reply and pass the `SendPort` to the server along with our input message. @@ -245,7 +244,7 @@ server). ------ Typed channels are better suited to handling deferred client-server RPC calls -than plain inter-process messaging. The only non-blocking `call` API is based +than plain inter-process messaging too. The only non-blocking `call` API is based on [`Async`][2] and its only failure mode is an `AsyncFailed` result containing a corresponding `ExitReason`. The `callTimeout` API is equally limited, since once its delay is exceeded (and the call times out), you cannot subsequently @@ -257,13 +256,13 @@ a reply from the `ReceivePort` until it's ready, timeout waiting for the reply **and** try again at a later time and even wait on the results of multiple RPC calls (to one or _more_ servers) at the same by merging the ports. -We might wish to block and wait for a reply immediately, treating the API just -we would `call`. Two blocking operations are provided to simplify this, one of -which returns an `ExitReason` on failure, whilst the other crashes (with the given -`ExitReason` of course!). The implementation is precisely what you'd expect a -blocking _call_ to do, right up to monitoring the server for potential exit signals -(so as not to deadlock the client if the server dies before replying) - all of -which is handled by `awaitResponse` in the platform's `Primitives` module. +If we wish to block and wait for a reply immediately (just as we would with `call`), +two blocking operations are provided to simplify the task, one of which returns an +`ExitReason` on failure, whilst the other crashes (with the given `ExitReason` of +course!). The implementation is precisely what you'd expect a blocking _call_ to +do, right up to monitoring the server for potential exit signals (so as not to +deadlock the client if the server dies before replying) - all of which is handled +by `awaitResponse` in the platform's `Primitives` module. {% highlight haskell %} syncSafeCallChan server msg = do @@ -277,8 +276,9 @@ with the programmer left to ensure the types always match up. In reality, there is a trade-off to be made however. Using the `handleCall` APIs means that our server side code can use the fluent server API for state changes, immediate replies and so on. None of these features will work with the corollary family of -`handleRpcChan` functions. The difference is perhaps an aesthetic one, as the -following example code demonstrates: +`handleRpcChan` functions. Whether or not the difference is merely aesthetic, we +leave as a question for the reader to determine. The following example demonstrates +the use of reply channels: {% highlight haskell %} -- two versions of the same handler, one for calls, one for typed (reply) channels @@ -315,27 +315,256 @@ chanHandler = handleRpcChan $ \state port Input -> replyChan port Output >> cont #### Input (Control) Channels -An alternative input plane for managed process servers, /control channels/ provide a -number of benefits over the standard `call` and `cast` APIs. These include efficiency -- _typed channels_ are very lightweight constructs! - and type safety, as well as -giving the server the ability to prioritise information sent on control channels over -other traffic. +An alternative input plane managed process servers; _Control Channels_ provide a +number of benefits above and beyond both the standard `call` and `cast` APIs and the +use of reply channels. These include efficiency - _typed channels_ are very lightweight +constructs in general! - and type safety, as well as giving the server the ability to +prioritise information sent on control channels over other traffic. Using typed channels as inputs to your managed process is _the_ most efficient way to enable client-server communication, particularly for intra-node traffic, due to their internal use of STM (and in particular, its use during selective receives). +Control channels can provide an alternative to prioritised process definitions, since +their use of channels ensures that, providing the control channel handler(s) occur +in the process definition's `apiHandlers` list before the other dispatchers, any +messages received on those channels will be prioritised over other traffic. This is +the most efficient kind of prioritisation - not much use if you need to prioritise +_info messages_ of course, but very useful if _control messages_ need to be given +priority over other inputs. -In order to use control channels as input planes, it is necessary to _leak_ their +------ +> ![Warning: ][alert] Control channels are **not** compatible with prioritised +> process definitions! The type system does not prevent them from being declared +> though, since they _are_ represented by a `Dispatcher` and therefore deemded +> valid entries of the `apiHandlers` field. Upon startup, a prioritised process +> definition that contains control channel dispatchers in its `apiHandlers` will +> immediately exit with the reason `ExitOther "IllegalControlChannel"` though. +------ + +In order to use a typed channel as an input plane, it is necessary to _leak_ the `SendPort` to your clients somehow. One way would be to `send` it on demand, but the -simplest approach is actually to initialise a handle with all the relevant SendPorts -and return this to the spawning process via an MVar (or similar construct). Because a -`SendPort` is `Serializable`, forwarding them (or the handle they're contained within) -is no problem either. Combining control channels with opaque handles is another great -way to enforce additional type safety, since the channels must be initialised by the -server code before it can create handlers for them and the client code that passes -data to them (via the `SendPort`) is bound to exactly the same type(s)! This means that -there can be no ambiguity and therefore no unhandled messages due to runtime type -mismatches - the compiler will catch that sort of thing for us. +simplest approach is actually to initialise a handle with all the relevant send ports +and return this to the spawning process via a private channel, MVar or STM (or similar). +Because a `SendPort` is `Serializable`, forwarding them (or the handle they're +contained within) is no problem either. + +Since typed channels are a one way street, there's no direct API support for RPC calls +when using them to send data to a server. The work-around for this remains simple, +type-safe and elegant though: we encode a reply channel into our command/request datum +so the server knows where (and with what type) to reply. This does increase the amount +of boilerplate code the client-facing API has to endure, but it's a small price to pay +for the efficiency and additional type safety provided. + +First, we'll look at an example of a single control channel being used with the +`chanServe` API. This handles the messy details of passing the control channel back +to the calling process, at least to some extent. For this example, we'll examine the +[`Mailbox`][mailbox] module, since this combines a fire-and-forget control channel with +an opaque server handle. + +{% highlight haskell %} +-- our handle is fairly simple +data Mailbox = Mailbox { pid :: !ProcessId + , cchan :: !(ControlPort ControlMessage) + } deriving (Typeable, Generic, Eq) +instance Binary Mailbox where + +instance Linkable Mailbox where + linkTo = link . pid + +instance Resolvable Mailbox where + resolve = return . Just . pid + +-- lots of details elided.... + +-- Starting the mailbox involves both spawning, and passing back the process id, +-- plus we need to get our hands on a control port for the control channel! + +doStartMailbox :: Maybe SupervisorPid + -> ProcessId + -> BufferType + -> Limit + -> Process Mailbox +doStartMailbox mSp p b l = do + bchan <- liftIO $ newBroadcastTChanIO + rchan <- liftIO $ atomically $ dupTChan bchan + spawnLocal (maybeLink mSp >> runMailbox bchan p b l) >>= \pid -> do + cc <- liftIO $ atomically $ readTChan rchan + return $ Mailbox pid cc -- return our opaque handle! + where + maybeLink Nothing = return () + maybeLink (Just p') = link p' + +runMailbox :: TChan (ControlPort ControlMessage) + -> ProcessId + -> BufferType + -> Limit + -> Process () +runMailbox tc pid buffT maxSz = do + link pid + tc' <- liftIO $ atomically $ dupTChan tc + MP.chanServe (pid, buffT, maxSz) (mboxInit tc') (processDefinition pid tc) + +mboxInit :: TChan (ControlPort ControlMessage) + -> InitHandler (ProcessId, BufferType, Limit) State +mboxInit tc (pid, buffT, maxSz) = do + cc <- liftIO $ atomically $ readTChan tc + return $ InitOk (State Seq.empty $ defaultState buffT maxSz pid cc) Infinity + +processDefinition :: ProcessId + -> TChan (ControlPort ControlMessage) + -> ControlChannel ControlMessage + -> Process (ProcessDefinition State) +processDefinition pid tc cc = do + liftIO $ atomically $ writeTChan tc $ channelControlPort cc + return $ defaultProcess { apiHandlers = [ + handleControlChan cc handleControlMessages + , Restricted.handleCall handleGetStats + ] + , infoHandlers = [ handleInfo handlePost + , handleRaw handleRawInputs ] + , unhandledMessagePolicy = DeadLetter pid + } :: Process (ProcessDefinition State) +{% endhighlight %} + +Since the rest of the mailbox initialisation code is quite complex, we'll leave it +there for now. The important details to take away are the use of `chanServe` +and its requirement for a thunk that initialises the `ProcessDefinition`, so it can +perform IO - a pre-requisite to sharing the control channels with the spawning process, +which must use STM or something similar in order to share data with the newly spawned +server's initialisation code. In our case, we want to pass the control port from the +thunk passed to `chanServe` back to both the spawning process _and_ the init function +(which is normally de-coupled from the initialising thunk), which makes this a good +example of how to utilise a broadcast TChan (or TQueue) to share control plane +structures during initialisation. + +------ + +Now we'll cook up another (contrived) example that uses multiple typed control channels, +demonstrating how to create control channels explicitly, how to obtain a `ControlPort` +for each one, one way of passing these back to the process spawning the server (so as +to fill in the opaque server handle) and how to utilise these in your client code, +complete with the use of typed reply channels. This code will not use `chanServe`, +since _that_ API only supports a single control channel - the original purpose behind +the control channel concept - and instead, we'll create the process loop ourselves, +using the exported low level `recvLoop` function. + +{% highlight haskell %} + +type NumRequests = Int + +data EchoServer = EchoServer { echoRequests :: ControlPort String + , statRequests :: ControlPort NumRequests + , serverPid :: ProcessId + } + deriving (Typeable, Generic) +instance Binary EchoServer where +instance NFData EchoServer where + +instance Resolvable EchoServer where + resolve = return . Just . serverPid + +instance Linkable EchoServer where + linkTo = link . serverPid + +-- The server takes a String and returns it verbatim + +data EchoRequest = EchoReq !String !(SendPort String) + deriving (Typeable, Generic) +instance Binary EchoRequest where +instance NFData EchoRequest where + +data StatsRequest = StatsReq !(SendPort Int) + deriving (Typeable, Generic) +instance Binary StatsRequest where +instance NFData StatsRequest where + +-- client code + +echo :: EchoServer -> String -> Process String +echo h s = do + (sp, rp) <- newChan + let req = EchoReq s sp + sendControlMessage (echoRequests h) req + receiveWait [ matchChan rp return ] + +stats :: EchoServer -> Process NumRequests +stats h = do + (sp, rp) <- newChan + let req = StatsReq sp + sendControlMessage (statRequests h) req + receiveWait [ matchChan rp return ] + +demo :: Process () +demo = do + server <- spawnEchoServer + foobar <- echo server "foobar" + foobar `shouldBe` equalTo "foobar" + + baz <- echo server "baz" + baz `shouldBe` equalTo baz + + count <- stats server + count `shouldBe` equalTo (2 :: NumRequests) + +-- server code + +spawnEchoServer :: Process EchoServer +spawnEchoServer = do + (sp, rp) <- newChan + pid <- spawnLocal $ runEchoServer sp + (echoPort, statsPort) <- receiveChan rp + return $ EchoServer echoPort statsPort pid + +runEchoServer :: SendPort (ControlPort EchoRequest, ControlPort StatsRequest) + -> Process () +runEchoServer portsChan = do + echoChan <- newControlChan + echoPort <- channelControlPort echoChan + statChan <- newControlChan + statPort <- channelControlPort statChan + sendChan portsChan (echoPort, statPort) + runProcess (recvLoop $ echoServerDefinition echoChan statChan ) echoServerInit + +echoServerInit :: InitHandler () NumRequests +echoServerInit = return $ InitOk (0 :: Int) Infinity + +echoServerDefinition :: ControlChannel EchoRequest + -> ControlChannel StatsRequest + -> ProcessDefinition NumRequests +echoServerDefinition echoChan statChan = + defaultProcess { + apiHandlers = [ handleControlChan echoChan handleEcho + , handleControlChan statChan handleStats + ] + } + +handleEcho :: NumRequests -> EchoRequest -> Process (ProcessAction State) +handleEcho count (EchoReq req replyTo) = do + replyChan replyTo req -- echo back the string + continue $ count + 1 + +handleStats :: NumRequests -> StatsRequest -> Process (ProcessAction State) +handleStats count (StatsReq replyTo) = do + replyChan replyTo count + continue count +{% endhighlight %} + +Although not very useful, this is a working example. Note that the client must +deal with a `ControlPort` and not the complete `ControlChannel` itself. Also +note that the server is completely responsible for replying (explicitly) to +the client using the send ports supplied in the request data. + +------ +> ![Info: ][info] Combining control channels with opaque handles is another great +> way to enforce additional type safety, since the channels must be initialised by +> the server code before it can create handlers for them and the client code that +> passes data to them (via the `SendPort`) is bound to exactly the same type(s)! +> Furthermore, adding reply channels (in the form of a `SendPort`) to the request +> types ensures that the replies will be handled correctly as well! As a result, +> there can be no ambiguity about the types involved for _either_ side of the +> client-server relationship and therefore no unhandled messages due to runtime +> type mismatches - the compiler will catch that sort of thing for us! +------ [1]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Service-Registry.html [2]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Async.html @@ -347,4 +576,5 @@ mismatches - the compiler will catch that sort of thing for us. [alert]: /img/alert.png [info]: /img/info.png [policy]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html#t:UnhandledMessagePolicy +[mailbox]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Execution-Mailbox.html From a4d571ddb22177e6d5aaaf0ac287700ad80f1de0 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Fri, 28 Feb 2014 17:24:30 +0000 Subject: [PATCH 011/108] fixes --- tutorials/ch1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/ch1.md b/tutorials/ch1.md index 65a0c1f..a933919 100644 --- a/tutorials/ch1.md +++ b/tutorials/ch1.md @@ -25,7 +25,7 @@ If you're installing from source, the simplest method is to checkout the [Umbrella Project](https://github.com/haskell-distributed/cloud-haskell) and run `make` to obtain the complete set of source repositories for building Cloud Haskell. The additional makefiles bundled with the umbrella assume -that you have a recent version of cabal-dev installed. +that you have a recent version of cabal (with support for sandboxes) installed. ### Creating a node From 51c7ff433285b2066c54878b1f3a54835b6cc59c Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Fri, 28 Feb 2014 17:26:14 +0000 Subject: [PATCH 012/108] Move things to try and get sort to work in the menus --- tutorials/ch1.md => 1ch.md | 0 tutorials/ch2.md => 2ch.md | 0 tutorials/ch3.md => 3ch.md | 0 tutorials/ch4.md => 4ch.md | 0 tutorials/ch5.md => 5ch.md | 0 tutorials/ch6.md => 6ch.md | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename tutorials/ch1.md => 1ch.md (100%) rename tutorials/ch2.md => 2ch.md (100%) rename tutorials/ch3.md => 3ch.md (100%) rename tutorials/ch4.md => 4ch.md (100%) rename tutorials/ch5.md => 5ch.md (100%) rename tutorials/ch6.md => 6ch.md (100%) diff --git a/tutorials/ch1.md b/1ch.md similarity index 100% rename from tutorials/ch1.md rename to 1ch.md diff --git a/tutorials/ch2.md b/2ch.md similarity index 100% rename from tutorials/ch2.md rename to 2ch.md diff --git a/tutorials/ch3.md b/3ch.md similarity index 100% rename from tutorials/ch3.md rename to 3ch.md diff --git a/tutorials/ch4.md b/4ch.md similarity index 100% rename from tutorials/ch4.md rename to 4ch.md diff --git a/tutorials/ch5.md b/5ch.md similarity index 100% rename from tutorials/ch5.md rename to 5ch.md diff --git a/tutorials/ch6.md b/6ch.md similarity index 100% rename from tutorials/ch6.md rename to 6ch.md From 42d5a0dfb56a9baea8699a63075d5fd9f24ae6e3 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Fri, 28 Feb 2014 17:31:58 +0000 Subject: [PATCH 013/108] Ooops --- 1ch.md => tutorials/1ch.md | 0 2ch.md => tutorials/2ch.md | 0 3ch.md => tutorials/3ch.md | 0 4ch.md => tutorials/4ch.md | 0 5ch.md => tutorials/5ch.md | 0 6ch.md => tutorials/6ch.md | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename 1ch.md => tutorials/1ch.md (100%) rename 2ch.md => tutorials/2ch.md (100%) rename 3ch.md => tutorials/3ch.md (100%) rename 4ch.md => tutorials/4ch.md (100%) rename 5ch.md => tutorials/5ch.md (100%) rename 6ch.md => tutorials/6ch.md (100%) diff --git a/1ch.md b/tutorials/1ch.md similarity index 100% rename from 1ch.md rename to tutorials/1ch.md diff --git a/2ch.md b/tutorials/2ch.md similarity index 100% rename from 2ch.md rename to tutorials/2ch.md diff --git a/3ch.md b/tutorials/3ch.md similarity index 100% rename from 3ch.md rename to tutorials/3ch.md diff --git a/4ch.md b/tutorials/4ch.md similarity index 100% rename from 4ch.md rename to tutorials/4ch.md diff --git a/5ch.md b/tutorials/5ch.md similarity index 100% rename from 5ch.md rename to tutorials/5ch.md diff --git a/6ch.md b/tutorials/6ch.md similarity index 100% rename from 6ch.md rename to tutorials/6ch.md From c163bfb9bc7eb66f124b414f3a4678146d242311 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Fri, 28 Feb 2014 17:36:41 +0000 Subject: [PATCH 014/108] Try this... --- tutorials/1ch.md | 2 +- tutorials/2ch.md | 2 +- tutorials/3ch.md | 2 +- tutorials/4ch.md | 2 +- tutorials/5ch.md | 2 +- tutorials/6ch.md | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tutorials/1ch.md b/tutorials/1ch.md index a933919..edb2ac4 100644 --- a/tutorials/1ch.md +++ b/tutorials/1ch.md @@ -2,7 +2,7 @@ layout: tutorial categories: tutorial sections: ['Getting Started', 'Installing from source', 'Creating a node', 'Sending messages', 'Spawning Remote Processes'] -title: Getting Started +title: 1. Getting Started --- ### Getting Started diff --git a/tutorials/2ch.md b/tutorials/2ch.md index 10db1d0..c16d302 100644 --- a/tutorials/2ch.md +++ b/tutorials/2ch.md @@ -2,7 +2,7 @@ layout: tutorial categories: tutorial sections: ['Overview', 'A Simple Example', 'Master Slave Configurations', 'Other Topologies and Backends'] -title: Managing Topologies +title: 2. Managing Topologies --- ### Overview diff --git a/tutorials/3ch.md b/tutorials/3ch.md index bd1a2ab..ea2e385 100644 --- a/tutorials/3ch.md +++ b/tutorials/3ch.md @@ -2,7 +2,7 @@ layout: tutorial categories: tutorial sections: ['Message Ordering', 'Selective Receive', 'Advanced Mailbox Processing', 'Typed Channels', 'Process Lifetime', 'Monitoring And Linking', 'Getting Process Info'] -title: Getting to know Processes +title: 3. Getting to know Processes --- ### Message Ordering diff --git a/tutorials/4ch.md b/tutorials/4ch.md index b04958e..0f71a76 100644 --- a/tutorials/4ch.md +++ b/tutorials/4ch.md @@ -2,7 +2,7 @@ layout: tutorial sections: ['Introduction', 'Managed Processes', 'A Basic Example', 'Building a Task Queue', 'Implementing the Client', 'Implementing the Server', 'Making use of Async', 'Wiring up Handlers', 'Putting it all together', 'Performance Considerations'] categories: tutorial -title: Managed Process Tutorial +title: 4. Managed Process Tutorial --- ### Introduction diff --git a/tutorials/5ch.md b/tutorials/5ch.md index f3e0fdf..45c28cb 100644 --- a/tutorials/5ch.md +++ b/tutorials/5ch.md @@ -2,7 +2,7 @@ layout: tutorial categories: tutorial sections: ['Introduction'] -title: Supervision Principles +title: 5. Supervision Principles --- ### Introduction diff --git a/tutorials/6ch.md b/tutorials/6ch.md index 1d46084..1269745 100644 --- a/tutorials/6ch.md +++ b/tutorials/6ch.md @@ -2,7 +2,7 @@ layout: tutorial sections: ['Introduction', 'Unexpected Messages', 'Hiding Implementation Details', 'Using Typed Channels'] categories: tutorial -title: Advanced Managed Processes +title: 6. Advanced Managed Processes --- ### Introduction From bc67edf1d997014235526488975e6bd0b0d059ce Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Fri, 28 Feb 2014 17:41:34 +0000 Subject: [PATCH 015/108] List tutorials explicitly, since github's rendering doesn't seem to sort them correctly --- _includes/nav.html | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/_includes/nav.html b/_includes/nav.html index 0ae3ec2..dbb968a 100644 --- a/_includes/nav.html +++ b/_includes/nav.html @@ -14,11 +14,13 @@ From 8920df8fd2990d0900fb9c0e43571e33e6a1c1b9 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Fri, 28 Feb 2014 23:40:48 +0000 Subject: [PATCH 016/108] wiki changes --- wiki/contributing.md | 25 +++++++++++++------------ wiki/maintainers.md | 14 +++----------- wiki/reliability.md | 2 -- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/wiki/contributing.md b/wiki/contributing.md index 01d8ab2..f6bac0a 100644 --- a/wiki/contributing.md +++ b/wiki/contributing.md @@ -24,11 +24,10 @@ We have a rather full backlog, so your help will be most welcome assisting us in clearing that. You can view the exiting open issues on the [jira issue tracker](https://cloud-haskell.atlassian.net/issues/?filter=10001). -If you wish to submit an issue there, you can do so without logging in, -although you obviously won't get any email notifications unless you create -an account and provide your email address. +If you wish to submit a new issue there, you cannot do so without logging in +creating an account (by providing your email address) and logging in. -It is also important to work out which component or sub-system should be +It is also helpful to work out which component or sub-system should be changed. You may wish to email the maintainers to discuss this first. ### __2. Make sure your patch merges cleanly__ @@ -47,7 +46,8 @@ local branch. For example: $ git checkout -b bugfix-issue123 -## make, add and commit your changes +## add and commit your changes +## base them on master for bugfixes or development for new features $ git checkout master $ git remote add upstream git://github.com/haskell-distributed/distributed-process.git @@ -70,9 +70,9 @@ conventions page [here](http://hackage.haskell.org/trac/ghc/wiki/WorkingConventi 1. try to make small patches - the bigger they are, the longer the pull request QA process will take 2. strictly separate all changes that affect functionality from those that just affect code layout, indentation, whitespace, filenames etc -3. always include the issue number (of the form `fixes #N`) in the final commit message for the patch - pull requests without an issue are unlikely to have been discussed (see above) +3. always include the issue number (of the form `PROJECT_CODE #resolve Fixed`) in the final commit message for the patch - pull requests without an issue are unlikely to have been discussed (see above) 4. use Unix conventions for line endings. If you are on Windows, ensure that git handles line-endings sanely by running `git config --global core.autocrlf false` -5. make sure you have setup git to use the correct name and email for your commits - see the [github help guide](https://help.github.com/articles/setting-your-email-in-git) +5. make sure you have setup git to use the correct name and email for your commits - see the [github help guide](https://help.github.com/articles/setting-your-email-in-git) - otherwise you won't be attributed in the scm history! ### __4. Make sure all the tests pass__ @@ -171,7 +171,7 @@ import Data.Blah import Data.Boom (Typeable) {% endhighlight %} -Personally I don't care *that much* about alignment for other things, +We generally don't care *that much* about alignment for other things, but as always, try to follow the convention in the file you're editing and don't change things just for the sake of it. @@ -186,18 +186,18 @@ punctuation. Comment every top level function (particularly exported functions), and provide a type signature; use Haddock syntax in the comments. -Comment every exported data type. Function example: +Comment every exported data type. Function example: {% highlight haskell %} --- | Send a message on a socket. The socket must be in a connected --- state. Returns the number of bytes sent. Applications are +-- | Send a message on a socket. The socket must be in a connected +-- state. Returns the number of bytes sent. Applications are -- responsible for ensuring that all data has been sent. send :: Socket -- ^ Connected socket -> ByteString -- ^ Data to send -> IO Int -- ^ Bytes sent {% endhighlight %} -For functions the documentation should give enough information to +For functions, the documentation should give enough information to apply the function without looking at the function's definition. ### Naming @@ -214,3 +214,4 @@ abbreviation. For example, write `HttpServer` instead of Use singular when naming modules e.g. use `Data.Map` and `Data.ByteString.Internal` instead of `Data.Maps` and `Data.ByteString.Internals`. + diff --git a/wiki/maintainers.md b/wiki/maintainers.md index 284d35e..e39dfed 100644 --- a/wiki/maintainers.md +++ b/wiki/maintainers.md @@ -117,17 +117,9 @@ What's good for the goose... #### Making API documentation available on the website -Currently this is a manual process. If you don't sed/awk out the -reference/link paths, it'll be a mess. We will add a script to -handle this some time soon. I tend to only update the static -documentation for d-p and d-p-platform, at least until the process has -been automated. I also do this *only* for mainline branches (i.e., -for development and master), although again, automation could solve -a lot of issues there. - -There is also an open ticket to set up nightly builds, which will -update the HEAD haddocks (on the website) and produce an 'sdist' -bundle and add that to the website too. +There is an open ticket to set up nightly builds, which will update +the HEAD haddocks (on the website) and produce an 'sdist' bundle and +add that to the website too. See https://cloud-haskell.atlassian.net/browse/INFRA-1 for details. diff --git a/wiki/reliability.md b/wiki/reliability.md index 9e9cc05..26e99b7 100644 --- a/wiki/reliability.md +++ b/wiki/reliability.md @@ -28,8 +28,6 @@ child processes. A supervisors *children* can be either worker processes or supervisors, which allows us to build hierarchical process structures (called supervision trees in Erlang parlance). -The supervision APIs are a work in progress. - [1]: http://en.wikipedia.org/wiki/Open_Telecom_Platform [2]: http://www.erlang.org/doc/design_principles/sup_princ.html [3]: http://www.erlang.org/doc/man/supervisor.html From 40e4c4a4d4393444a5488e8e03590305788f23d8 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Thu, 6 Mar 2014 18:40:35 +0000 Subject: [PATCH 017/108] Further tutorial improvements --- _layouts/site.html | 2 +- img/OTP-Diagrams.png | Bin 0 -> 55967 bytes img/one-for-all-left-to-right.png | Bin 0 -> 41971 bytes img/one-for-all.png | Bin 0 -> 52744 bytes img/one-for-one.png | Bin 0 -> 27208 bytes img/sup1.png | Bin 0 -> 10466 bytes tutorials/5ch.md | 156 ++++++++++++++++++++++++++---- 7 files changed, 137 insertions(+), 21 deletions(-) create mode 100644 img/OTP-Diagrams.png create mode 100644 img/one-for-all-left-to-right.png create mode 100644 img/one-for-all.png create mode 100644 img/one-for-one.png create mode 100644 img/sup1.png diff --git a/_layouts/site.html b/_layouts/site.html index 78e87f8..b6ce9ee 100644 --- a/_layouts/site.html +++ b/_layouts/site.html @@ -21,7 +21,7 @@ diff --git a/img/OTP-Diagrams.png b/img/OTP-Diagrams.png new file mode 100644 index 0000000000000000000000000000000000000000..a103f92f1dad22d5765f63b2ae1727519a9948ee GIT binary patch literal 55967 zcmZ_01yCGa)HVnMgA7gvcXxNU5H!f(ZUI7oph1GWgy0@DFt|e?ID<0~G{IeiTX6g5 zt#7w>tM=RKf&#h0@J8V$0?uSYr(>*DZ9hWeI>#g2WT^HphTHiLSl#i=U&jGZSNb9^NM&H>)uM-C}!V zSy=+D&%$!1uHgUuh;{^R#~&`X!zDnX|Lb80`UxvU2`vi#-;e)JhWziRih?O9#j*eQ z-zo}<-gbNa|Fa|}(7S;>yBgD0>+%D!iZ2wDyV3q9HcH6^HPnO^M_jiL- z?{li##1q^4K=RG%i&P*7gX&?>Nmh}pJ}c7fBaa@uW4~-t$7=W`wR``iW`cHKnx9k|N*=0HHB7(Al~ ze1mIp9+q46Ti31RC%{B0o-*l&;~e}cVVPy^iPxOni=CdRjVRUMmJlj?$GFgaS9i4L zV>lbot~49J#PpRyFg+vBPq9L`oZk0hyPhOWk>|PrQu01Xwh@qAc;V*Md5b!sRHTKDq_*OYg``g2Zne0N+r^Yx(ruT) zQBPP8770kA!~S=fHvfk)WuvZltzgbv3y|J0OZ(hO#>%%FmFhMjsKJEV_Ou#L8ogrd zZK@}a1w@Q&4ea$J3II=thAnRNMRAQYt7p>AnFgh$9_Lt)FB=&XCQr+>Cb+zO7E(;_ zY%$CG>{Ms!KQ)k?nfadQ)X7?M|K3ePRCf^C17QN8HM?!SK{lNI{==pQI1b7or^0LvJpNU#ArIL3Y~lkReFB16b=Za4(x0jj!F0r~Mk&ta}6 zmnfM1@wYK_KGq_(E!#cQYM~uzpNVX7*rcEDPobN z#S(GA_e=A+e|-3JK}G_WS;sUd){ND2Dx^F7fy@F=NX3JFk$!pfXtkLsv-xRA zv#pg3^Tk~Lid1QT*JtoxyF`P&^Mk}91D8tNFfhEK=5q!dZX@j_vl$PF<+G*&Q;T~l z2i|U=x8C1#K=0~QA)Ebxtdh|hBJ}M&HT{vwiB4|&+)wF@EG$FENPOJrQAjarM69G_ zJ=uZ|$=HFyC^?fP2d3Tz$5~;nDA-VIM9`4_|JxQL_$r0z? zbr9?+)6y>K`O@d5^3_@uSg2EFjmwQ(n`j+Ry6jdz0Z?>Wy)c4oMbbm+Z3tA9pKGtc zAwRv2J{p~Gc==xM7rL8ITvo~cM*eOvg69J>3nfrOuJuRNtXLM<@_*^Ofw!#1XH4UK zQC?MTWi0<>__0|Bp25QtP}?C_({{n09|9X4N=)PK7Y6pk8Fb4VMbWGX z*LC|#?W>8MPw_+U|AmqfTFv}k;op1yHN7=zOXjn)I6E+1 z$D(>(hlMp*4}cuYKD-20t8a|u%SvQWC%Uw8rmh9OtdO5>eF`S^4pRsk1QrOpta5s< zNN5G-Jci}C8mO?XjdR)#-&oLjy}!{mk_e0%3u0FM*!Il4@4ZzGDbu3Mx0OSWiMP7* zZkd0^Nx9fmO}oqwdN%sleyM-gJ{?%@rANp1Np%t6Hg|#2@Og~8sNggy>LkGM{@?zV zfTT}EL1V{|aD0P_MBNZnRT4g*%%oPV)-nz$w3jaTuo>CnE}!ym;aO(!I9Z`L%l?$Y zLgRNpTFHi@X#WUN9r#$~A7^++dOo=q1)n6zPs=t)PgGsOQVVfcd7%UNyUx_M+wNCN zKy^=IFE#~uREpkw_3s!P8G1?N+@&t#)HN>#Jp@a*q|n#}FjUKw<*=C-hl{z>>IHVa zPh0jk3TnNEcO{Fsm`f;oqP&J`Jq}vpIsTemq()~DPrPmwh%4Slf zl1gQlG5e`X-R)85DOLGRll6OCE3$REGTA4dO8-rap99ZqT-iP4%?x1eT zi(7_^fyd&A}Y{dI6GGvtBC+0$k3+TU?Sm@8Rti|C=hckAR(>rQ&c+S zHcnTpFle7M%;7%w5bB)9eeriiQ}l~|dO67xO&M>v%6c-}LWdzhslqkad&9TEa|z(^q&;irPdgB4;Wbhv zAv|Ic#I5m4f%S^J9~Y>Lcz5a0$w;|Pe@>p1pYnI*fosx5meN+bEb||y?SI5kJbe9K zV`g%(GrA{`cW{vH#Bw4XCl2bV)jcUE02ICdBK!;bMDnLtHsYnmY4`(E!jS~~mN8Gd zn}xqKUxrLjAkFS?E%H%!Uhbcb?B~HTFggTqN z{0n55Nwc4lu{+w%-7f;{m{-V(DwVoi#vid@7h6J6V9nMQP4K*}OaDjXQ(0x+xoDmE z4_2_{SRWnBV8ZN$E1T|h6hXgpA`(4xss zzV&g+RK7A>V}=kuVkKEDL7}=5wV554CriYm@NU6V!&0whY+OB7WX9Ao1UMUA6ms0z zRvNW$|LUdY#TT4Tc`+L+^c=wp}A~ zFD8rdsKXdoXd2a~h95QVk`9b4-;d{rHTeJGFaI1^9}I3=j2L(~wb}g+778RTiZ!Tv zU4YHqj}CnAxR|fDv!;|EK>MdwboRknqg=a4)9r3^bmBOc@?uJID z{6^s4bM&3DiNklJ^answ_j*b9i!Ixo6ZDhRfQRb^N!_G5x$idt-ZW2OB%RN`S+5?j z_qe_>{4y`g*;3UPE6oIpi)D2DyZF)tvy$X1p?t+V_)2R7m07ot=4ykre7-fBk!0dk z;-W!!ahcfko>F^Mk`dCy#f9K~FxJ#tnaIp zNjzQmrn|qs+0PhA?+!rX1>7*hzXD?EdH!SDwh2t32FK`dmRQvZ*zCX8f}(ho4u-L*WO0=!K}yq{_ij0=!Anw zCMpRwDB>XZCQ9wkC}~xhxv{E$I^Ty8N9TOeugDPR_cXd-lVJAU>;5yQzC?fY4cu!eyaBv5V@ZTKAMH zlZ+t`$FCspeq_!#masMXGE(C{V_&BYST^m zklz`lA&nN-Z}k=z3$dO0u1H)tEI*LArD7E!tIThZ z2j!2xj&dmsBB~)TVUse$^4R;jj)lVRdajY zPETWmD;hKo81)92y==Zses|Ec7O#Ckq!uzh3MC;2m2tjiE_VVbeG_&`?r(r4X~x(p zA^@gS+NTS87nWBFGpifp1Bji9p1GyEP_KBA4jLb`S|#*-_b`5(F2g?fgR;M#-1@U? zS!fg~bFYx&H7DDu;GTU!F%V;xc_LeA6eI&A@I@&wMAJ5IivDB2n9h#<$yL)TiG3OSSXi;%hlV{?p z9uiw4WFwxBLOyZS80KlpXbTmEIloqThHROS0>*(f)j2+><5glyxL(KH~|ZmnnS?!bVB zv!(a~e9O_)CrG6}*SF@~QM}UtEq+{&@316*RSkH7<9&3R@1xBWJ106M9u*pn-a;uf zGZ7J>3ijn_jQd0jj3^+;W_7N}q%Uz!xnPFFs9>-^O1JEoUV?CnEWqC#3s_xktV z14x;yx@C!!xU+d!DcoTSzY;P>BmN?YGi6^8O7r90CHh-wYF+_pAc)5CoffBmEm@tb&#Cr8mw&?VDx=HH#|~d~&PEJ1Ry*`4ReM`<_HfXSz|4eQm;-Nb(y?_! zB`&Gpgyr_mG&T>gC#{*sEkxc<;a|=A`autOk?vEsdyTI+9qH;dp1}!xV%UOjI3Uek z&vt3~X&FthnEJ`2nR}6IXEfsm2X_RCl8_RYi|qS#!lud{$bWZoe4Vs)JpuR@fk@wQ znND;4%)oPMQeHAsu z{6gO!3+D`Rmfz{c#f%ox;=F%E;Uife&uzS1%j6TXJ!;|VX>dK;A$j|g?5H+;x?T-7aJ@MQOU`6TprZWnoZVtAzMj>F%8&<$ zMdo_nDSl;p_`ApWt%~16{zI|P{&z^7<+{}QhXpgr&mdMFI{QwRL5w<^D{8R@nKOYt zldt|&kovZighH2-f}OJ1Hg@>^x2&B0e#V7P{yIsAj4U#^yPro!T@r)JC zdI=Pf(Za-e$87#FoJceuGpfnVvn%1D#d~x4I(}<%oRT9XBs_xp4x+4DBd$>SIe}2G zHqR-$ezbJ?&`<1Z*4O1$yL?^5ZY@=5J!QxNJZFCYdF3dWm|7l^-@0%SWi+i-X&0{3 zPsF1OJ~N2?Co*c@%q^(;6w-tF=Ag;A0u#v7bfzkuD%zeT|^Jbm)!g{opI zS*&Hh)W8}nYxaNiJ7g_E4NlOdRDRIZTYyVG2+3xFb)Z`KSk1ZBN_F1Fj!R5z1-{I zU3?E>d@^f#uigI7W5^;zZ(Brji7Tx_|KD2~$H45MamQ^4h6r%_gHC{cyO;CA>2o2X;suk^MNIaZEMekkBtnZI0R7D03_+vG-jHJF zO0O;^Ap{wSi5rH@;H~^>+UwF<1xu^l*sO2#dVeCg-|(_!Y{i*~>sLy6=MD16FcTp$ zXfhuD#gn--U8f4uC|ePJqyY*zvz7!I$BRen;F#MMP?*# zcK*`b@b_yqvh&P9?UTThI`wCVF5~oIyT623Situ>q0L-;nH6?+aaemC^oN1-WOeSx zOFqi8FHJhxA}%L=;CXt@PC$B=8fyh>f#jR&tp41^Ji5`<6=GhLf$VdMV$X7ZNUOVT zz80$YwjyGpdCdb%je1{Y_+h4^n)88IG5!cE4uThv9*|1^>0q{g6q%@fyOMNsHR5k^ z@TSsIS|-{1oax^J96yLVn#5>{zCHY77TXAaRqS)VDI9PiXLYknkr-!6PoRdpL$^*7Zopl@io#Yf#?}Ae&vwuVGDM9R^q|;l<#E zy0!08y<3RxagH_@cdyLxzZmiAF0{_*0GnvdY_JRSrY#u1UwT!^uZcZj<>0WexnukM zMa*EZZhFGkGc0GJxp9KMQc@SlfM^qMkq1&p)L5_dD)rLREXeU3kwJ+2XLWtjbVEi4 zR@kmJ2ySp62~Px=KTehmvw#LsqOPz9(1@;6F1DDywx0_QH~?xQ5`DpVQ73%Fyx zMg@WmnmB$=H=`Mp=6$Z9G|HzP9oqKS|Ge}V(D2xI?EdCKfXmKG6`m<*VVg=IM`{qA zs1+=Hb0Jt4JPA3ptG(n&f}Va%OEnVnh!?!eCW3T(p*Y^%v}Cj6me=)JDrZZvW|cWX z%+b=8^xqp<+z56T9W>nX7WvQ~bT|_#~j|vUto=Vh0qL!7Yh@;mJ z?B`sIkm62PG2l))#%gTQ_1&9(;=haBnQn2hsBBMa#zk8XFn^3J?&VIYgUI?uZsK6O zx?$tZMdDB}@_8r!uF_KKI&Bl2X-Kr;Wj3u-PZl9b@t6=ZKxhY+zH|QmD5JJZk(kM3@$NzFh8EmI3GPv8>=AW+{t4JmrWQ#a{B>N7~19I`f*tF$d zyJ4j0N6NN`a->o@do1_d2*dBjlx9crgwPw!*sILet_&INKOHz_?7qL>hGJ`vCLOCU zM`^1e6UD=AdZO5E#_s>M6|_9MPFcfdoV|K!ZmQA)ICP*7Vp3IF|3Ghe%kEyaYdeHsyU)wE#l^#v%Vgo*|=92D>mQ9oYV$u>2V;++@8`&CkwmFRC z_L9NQOsh(4_Pfz#3IF~DQG>VHtmHsnAM>^OM}vvKVmW6F+-XtplqSye75Z~+zdE$t z_kOpShFkLJ66gJ|b~eB8q*L!*r6IvS|tqW!3X$pPtp>g^V1#(vrbCm~^Y6u$Qxk5Q0ivp^wP zSBS7lqE{?7zWTslKIiTrRuv1b%$L!D+7b0%Bs7oA=Hrg8+4a7kDS7+fORrV0(Y1(2 zi6)@980{a^jG|fb+i!=#d9}QcjQ;`A3a`kpc8*RD?OSpEW~ozE0@79TIqsFS_-+09 z2|JQA*o{(Q5r3h415d2degQlJct)Qu@D%zB6{3zJ=t7FR5DE+Z zpGt#_eSzmhqZXcYl&Cr7=V?1$BVp}p7wLD+=~PLkQ66g52cNbZ%=eTLY!_6kP`(SR z3M%A*v1={GDEUsFQ=gEz@MD{H2@>cWA)o=*U61GM6)a7^bQyC1D;QOijPLjrLDOb- z<2qR{V|ETJ-@pEdtJfvc+7QB*g^?1}nbZ9xH7FUTsORzJ10tjhbJLJo!T~k-#_zt1 zrEgfp>R$lKrkoxq{b3UE!<%z0qv4Okeu?aj%`^lkrOawdlnGj1-M(si+F%-|iPxAnd1ZM@=x?X>Oa&ic{o z{Qwwrx+}o%3)i@FM2p*a;f2sZKD(`4b(xwZD_G0CL=+%lI;@zM)K4+zlA3GcH4?A) zQ0zGqil&g&nC_C>ZoqtF#P$b1m$);UuGa2#j4EAnp-sfS&7ex{kB}b<1V0TE>T$wS zei{IC1uz{RrlG28JF$B2u3v~p^Id9*u7&ovoEN1>oSszR&YjsOW**AK3G?(br{*l> zYL}`B9^;KD3i_{k8T#*HilL9n=-+V_LLBXGWhe5aY!Upxf3rcQ2P*efPbumw#;~*x zvAaq>OlO^s2yT%0>OeS=eWy}&L7wQ{NpXa`@spjqdzenK?+h`dUElc&*KNuL+xBW# zz{B>BIrGLa^}47d9R}s9Lvucaw28sq2RK`RW~0S&;z^EBYB8bz>ldP+NC5PaRpZ1ZxmoSu9a~ zz(+fzQq3%f@#O{fRzi1`sn5PMdgD3?&s>MhN2&ST7Tk*7X+*e!79BqIf{wFeLuFtC zQp!|07;3IO(TX8lWk0Q@oWH~;{3iGd1gfg?>)^+`sbW3;<1D5I515?T=YDM3uLPUJ zUAh}u7hhAnM`^sPQ+7^WGVHz@lWV%N>V`ux~6EYMP2Y1ZRnf)BGpm;vAPY?pyjY2 z>DV$xQE$B4*r7qIjhZHW3fa}N4q>#XEZ0ry@bi?+| zSPsrkm$rT8EHRIg<3^zmzoSWQnyfV(JKl{DwjS2BR}p}yRBT^muFD$$SS+Ig>b{A% zG)vR3tB{iW)SxK1mKgRuu&u?D3#qnw9Ushe3I_a=A>J}AxqKEliia&~3!ze>z);X} z=V7$^pX{((T-Hw7y-t?dmYSVYMsxzx#e&9@djDLgLQf)?EcRcISa54!*GMrwh)}1e z6?qSba4GHl9xPQ!nfZ2`Nelv$6rkuL{c06?hC zp#Ulxb|ik}I;Ls4Wq1sqM;WN?RiuNkwyESh+P?#|{y4VO*q?dwJVHhyut?yM)rar) zPRts-hs@cGnty_oxfy>r{!U`ot1#>Wo^C>x3t3ZnEX>(;%PfgdQF(3=&H|p|t26)ubiz(O|3TBUM~!TRK4aadQFsip!dPhQG8w7F^pZiVv?gyHwNIZ5mUxi!5`P&tpD11E@iVaWfNhL8@<_LrA_>YeNYVhvX-fTHaZ6JaxPXPwT8nW(Z z&*Q8?oNjE*J*k_jWT~6%%T3WykM%k3#mbDpTXKXzXH#iVn{L$?sggh~RrLpKUUG{P zW(lsMr>xJ8r;#!Ld~@PlU2dqmVZe?L%eGE*(ZF6GlP%z58;9`@^C^=_ zC5g0$fb@dA?Y##Nje>LqB*DL*V*UF-(8x(xJEN^m>^4`TrfTNXJKNZd##LunJo^=V zsE2glefgDY*UN>E{X1nEs!ILp1yw%&+(3K4c2W$D2hNh| zav3)b&oNHjVvEm6HLsSS(|hO*DF6jIs)+N3pc-rs+WE4&9#x?}HPbhNU!$QZJXwxh&kl0J30^d(#L zsCs7vm4v`(q*XPZPDb_WzTghkRDQ1M0s$+%ou0Y2Plxw*-bj!RL$c6cWPjs4b*oAZ zYm8eyQZ+LbT^k`e>bxk{c;zFYmlFHRgT-m4tqq_vyzQ^MW`tga!wxw;oVt&E9c<&y~FWW@k_G zmtkG2en_e}iS&`pT)Qq4CtM~(=Ev_anH}ddp_4nWm9|gQC2uJnYA%|5@Ds(H-={Ox z*XGJe+*)NZxzj-a{_0<`*HOqYT|y@Wofm(z&=6@_j#T~*y8IGP81SR{bWZ?{R9*#z{Y-pc=~CTf0W* zAkbR@0=?as#xkt#nE)~if>>oj^I>6v`a5R!e!529k?d++!H*$phhub@^a5Rv7iwQx zEduWbPLw)!vmJ|lFL#kgcvxascT=OvVdnMd&plZrj+QBVmg>IvKAR#WpVLj%nT-4+i1gM(E+B4v7s6ueYOH)e- z?AwGm5*Iw)AJ*oniY#7)(R0&CE~e#hl_%3hw| zI4-^w1U{C-J7yhbdwJ!(7BH9R-47`tctdA{_Vtoedsyq1fN=w4j$NUpJD`@vS62$^ z1SzAkt@UQCYqOL7u?FGl!C3T*!a*us9=D^Wl>&{I8&NcHnxIFk^S&y;8@jVT^A2T< zGX`E{8XMYCPQgeNR}?%2X?B_|vW__KyGSB}b(9?{|DMsGd-8)7^=iHp)GSG*2+J65 zh5zf-K8}T!r^DJoxnRtxN9@J#DLyF2y}=LVrFqH3T*erYRj%lD{lMxp7VVF>1V6Q( zxAo?)C~iUg30^u`Wsm4_Zs#U$Xm}7Rj^(R(=;Qt&Hg3@UK1-z|tUL2O%DymR5-kb; ztYoFqfX8gAb#7GY_l7%oPUoLQ6dcTVjl{)kI%yioNUs@)_cA zd2Qj~c2$;HTT4@H+&^tdyhykOA=-3*E%n(^)7;L3divY_)sPrXKNx9Q?frOPP6)Ko z`SQLBg;iG_us9>#A8yoWH-a&vO~z^cqwD^llIs$r=i39!n6n;O#jkR}-X*OI<>=Hc zgg|56LvapJ7yt$px(G;}K8%7+*HQU1RJ#@5AM|%UF%!w<7BPr%UYBU(vLmWXGZDX& z6P^+hMMWH}R;!JbAt1t!M{}zMq|~Lo12Qe=N4riPDwiL8X(bh{yTROwz{)B@3hUF9 zzUyy!L`ayn#1iWpAr2Z?`~{B?{~5Nwo9|8$yJ0NVj+J>H zTAuFyX299rw$0$zgIuxV>iJQT-eGAwk2AoR>D5*bd$pxF9jp@yEMm^O$1;svas7|w z^0?-nvSpI0FX-{4J&jIOkHMH}F=tG!3RUwPdEUIs0jt;~Mp+PUtUNTaK@?C6rI=)dg0|J{qg3%?XOOnY+ zi31H&b*78+a39ijtA%$XSgLp=tMyv_t@BzFE?^n!CmQ)&@4NWZKW)IWR5xVN8rR4Q zy~;&#pHckk34Ges9y+LD(74o%sf5YRloE~a3)Mfr0kCt`PXHdau~Ga$r?`!lBwGpC zfSjYI2fTJxZBV``VGG$-T1}$tdU^IU8T32toHH1E9}u}Ykf(t2&jl^q5)X*MV>bqf zVkr38eb$TR^@hsdwcPz=$GL6q?fH=3D`s`k$Wpc!oAn{!vljlk!dZA=CCm1FUAOmZi@Tu#VT+u%G|{IuS&J>%m}OR$vv5A3XC zG{hkh+|o#HpVYoOW)_b+Xm;d9ksKl2dL&!6-g}i2H3+snlo1D5tTx33YE||y_<)nvzBvZcIx|!gvdSYF> zV(2p0JGg8gEwXJYMb&hSlE02r+_S97d1+Q7q0$z)GSpb^PX#dL+?edP#-clr_&u+r zY>~REP4KXXtYY>NkEsQlsAG#a%oZQfN9zGP>v7k6 z5J|SKp48_AmpAd0Y^)ueYmlQd2wIJ95Y+9suJ zUDQtYHI|a)E}<=mE@}X5%7!MSzs#n&bq7!5*hx3}STBHwp$AWp)uXI%{j+x_C8S!n zjSmfZijwr$xCKvUDcrt_3@1QSV!gMF`U10NLTc7h}Y^ufJ$)L|MHiY%7?PApOa({tI|WDoty)m*Ncu^cA}>|7S8P{MS9%dR&_5GF7{ zS|8N2K$vNJ7Dp#ctbnLm3q3dj);%TbXZ&nLUaY4Wa#WHR+|*5m7s0MN%+S#IH4Hgx zFdLA_JjZS#k4xR`zb88>r#*}?s56k5c_tAhH@*FdLP}R47)B8;IuEg3u2etI@wU|E z3>%mtP;;<+Z3)75^F1GU>6!aGRkTw-MBUhE__!7^sTB;{AbJxs^f^}X+k+Z+Yrk>3 zf==kz9-z60fiL4i^kl>Np0W?vi^HVjo4%>;OUd`bAf1;u=D)vr`IZ+Y-RIPFnY<+f(xddU_HwqS$yCn9ZIipr7n% z?4YV?oDv6+jlXixtIPOJdEfZW2*W$@sq3k{KO`6vpl^;%DLP5c zI#UGjp%3QzO7!K??hno&$BxpvD$hX1<`i?{APsZ8A@Vt*Qgekn(K}t|l zHqD7x5SaTBk9VUdQpy=#pgT-==JTd1?u#MvZhq-Qt^i2#@vQIKi3-e#ww(laZRp{r zNl&aVR*lHt#PHwm1R*r@*?I4Jaki4^D}c!|M@cjpX(MMSG=Mui65u01E%-k1-|^BM zZi#$~Fqf3s4ZfdQ&NyP-yGebatHgV12Pc{jYG3y`F(X+L*A4e;z>fEqJB zfUqaYI(q&K2k(%;0GQ7ZqE6sXU0saWh-w6 zJ5A=F9x#M(4|1&tKG}GCn(Grpjd0^g8Z|gU^(yrpnj869pUXkudTEx~f-!idr0?kr zp_8Fe>-^sXd4DZgz9LeHWT6(pZM7WV)&RkcDeBj&0P7!{?PL@@dYgNBm)J)4#DX4UCa(ZbyTyql5wF;qp@;F~o%*Vi+C+P~qpIi!!<%MQlX5eAo zb=LKz3ifH!pC_cgP=Q#f1<`b#e*#FH?A}cETH}s`HT&JyeiK+=#;cJAIYH`6OJ~bu{e?}lG_uq{#ixIcq zks9N>rt{VV_{&yDy_LQ5-IB2hqWcuZDu;KyF;GTM%H}zEq&d>Y( z7cL^AV+EhA1f9%3(ZYkZ+^$_xHxcGa)0*z*&kj@H71JFjk|+(~=XU@7-8bOdyS@Bs zluTt$8ySab#{EB)8C0(BtGQT+a0sQNl_(A>OTv_4-#SDHg#E z?k7B%N6U=2hX>KF8ACfXMqt@^Ys-Y!zT|AFv8^z=Z^;C_x#FKPf$N%ENR;XSkS>};>1&)d0L3m3eC ziK@m(Eq(Mt^;X2(O$>K3Gv`xB9=!p-h*9wL*PC+s!<5W#V%UPd4_AjQVd!{X#MQE|gkxvE2|1?c zGKp=)2{f2tnR++g93lp#Q7>};XeDzn8zoEYI%^^sXx*yGVWwnu=DpqO#E}H&4?7;E zF8@1#aN;kg)sxOMEz9lUq**x#Bl){Ng{1j$qJ$xsVdvSRW9faY--?k!4qf9`wl6l&qX@E?q_l!igZxx?M2rHL@gq z2CFvxdlZGZ%+F0ai52))BZK{8bZWu`T82C*&bGG*xcOiHX*R(7cT01nY&=^JCzSwW z&S+{%4@4^tcl?{NMj2`QTJOw%6c?GOtL1N_j)j}AXUCI;3P-|P;6HJVUJK}96h55i zA=qQ`0y=T@B*Vd*Xn{XZ+E2S=nUAO45#hIJ50Isx&($A;>T_DdVp-e<5}JwD5b6xn z%CS|?qQG5zrfmdzZ9a?&n?k}N7sR;jIYV4XQe|z|VxGsIGqyL)9oTY~sT4)w!w6x9 zEH1W~NYZG;d;t=v-^B<^(ML;+E5sAuMT;arRWQA$5%*+WM^UI4Zv7TQO`6QzKq zpn*lg5I9J3a`vzLf9~ls3jX(#v@j1hUX-vt`3MkD0E+{iMKh1`?CD94tBK`2GgIu} zZ5g%D@yj^|obK7ezpDJKA&X4Y?Gaz4%}^91Ax_~EYgO=0dewBh@L5p+kSZkd*x zsNrhF8HD!09yjc*^!TORIj(20SZjz?K{tT-cW>?KjD+kwf~MFxUW_oI5-Z zfEelPrQ})W#nuqzRnCG5*33ZJgF_=NIseCP-#!KK-wZ)PYM0^_^33_c9#=b0G?)Yn zB`OA;qb@EnAnL?*&r(}dd^!NJfB%!(XGd*lRpVM9(~Mp)Z6r`#2GQxT5A1mHhvQ^) zZyYtTv1B@$gsUe&AnJIj1q8ezURqv4k1>Wc)+~EqmP9 zcmp4O+W8N|UulS*_=VitU+(_e$+FKQ*wjMB?*4mAs^Z2^fry#e)H&=Poq+e`1)?x*%RrOD|DfKjiL*6A8Z#pvH1& z>jfudpGsxNxXv=fY@`>_YK}GSBXG@MyNT}0FM?ge`kMAP^Ta`hAlLXgM2G%9o-6L> znaSt+U`&iPc2G}wh^WyxK5f~wjguN7kPolD#@xlykhYr3qaohYXeR_f-au4CyoKG{ z-UzHwgcn_2BNegL3>KT6DK7lCm=q{mf|>Ni!`any#f6F`nJv2fXM1|$5(E~}O4It> zQhN|89fj_Y3al_yO$dR~lo6g5)pkN!mi>2oCjWRO!=f0PKKxOQ#3rM!?YjRU+A!gn z_tD4!t=L-1v)cHFd;>VPgP6yhbEVyTqCod#ZAv4AK`DM15l4X{=q$NPkiHi#V>g1b zMBO<2pGdGQE#l3Owe2m;yTW`&Womr>Hl*HW2$5_HjTajmiT>7$=`^tb$K1F$~N;=>_Y& zl@UWjEp+!u4rE&nN(Hd7vAH9PUzES4^7GN#t;crPNJI-ollTb4DBE=MignA5^Mt=lNrvE3>Aoh_O1Ogf62s>{=Sh23> z!xHUJ_&vcGyg=!@?N@%gt0|h&WQ47h>zhjc?wmBc*ml>xkse~AxDM+X8=PS+^^>0e#ezr~Qau zDV`K6I}}f~gFq3))+Ey7!c0z_S?Gr79mZ_H2Yl-e)^_&g>@>~$j)q3#tIn0m>73qYi-0J{_6ywG$*|zw zpguxvwyV!y9AU}lbm_cG$8TR}#ZLhDz=&6MqHw9m35FIF5e`9x!o#^*myDH&SFAV7 z9trE$!lrwoZYOjU`buBPiQLlTU7OW7x20?bvH$bqF}kdF&{gB2pits3`|jn^{hG5S zCIeeK@Qq7Gn<198Ugz{HTgb`+GdO3^hb(>z?|b8pY5C0eo@fGwVuaV7U?|=t+2)rN(p~yGUF6=6i#x;NT9!Xop{LgM zeD7$VizBqLMJr=BDB<~+R`Cllj)U|0Q4|c>o^XBXzp)Gu&Gb8I_Xs4p33)*jiiBFU zn9arSqBhI!AWyK!caG25U`xLdgciq%02VDuxX`;~zf3xys6_kQxuM;yTgipLGS)r8-RE*b5Z{rIrqPov-lkMxZD{I+GxDkkZ`=Kl0j?O5s*x!|S+g8!7a#fwtzwj_dcAFiG=~ zQb>Lg?g z7r3dw=0Usu^M_6;EH0Nut0@W~d`~?^7|UJ{!+E2NB=l#1%wHdan3o(L0J56A3J0NS z_lF9cOJC{ZpgdAWUg8e5>yFE~d=;kc>@?Zz47g>SyDxIKn~hJ^NDeY`8RMFUPs_!k z#L|%5U)kRis1?BLQpA6Z?=Q9jT{`_>j}j)A6xH$vcyJxI-dhG68=JB^GESfN9tPhQ z^5E7b<0XH5m7pGJyCP&#&+iu|e?}bOe}+laTQZ-EfJg$4elH}9JEQ~cT>HHFurA}X z)h0@dTRJ9wwxASv>T-+K=09``m-O0b#0*gbx|GjWLP0{PIrDJm{R;&+VGo#ODwe2vTN9VPL>I&mo$|JzB{Vz$+gxy~j zaQ|q;TkW^TR^z8YySmcSu}ivN$iiZ9aW4AR1phT+fLk-piMddXxqI;wff?+g2UD*v zj`wp9Ji&kRZwHQFs@Eqdt0J@MvGzePQ>Oeq^2L_}N=miX#F zuBw$t+c3(7C?>Jg@gJ9KnF%alG-#gX`}n82J|c9xyO*W)LhQe_I$y8BVM^IdOHwL; zQSHrpd~nSjt12R5PL|)J%;W7T!&G~#LelNdlrD}-6cChJ0oSy2#3N?Rqovw9pJRl( zF3}4!-2c|X*#V5thzWoTRTaCc^f+%;aq>L^Qeo%GK`j0gRLgT4co4EIKnPwA(a*1p zc1Mo&=z>)g@v^rr!b+ak%FlZZFX#w`HYBHB=_&C+zC8RVcV%8T`RlO$@Ej?>dpS41 z_4onjv!w#0l#@S8=qmrRZX)+=MC35kem!g4!SMJZN*J|eUvs@2ftYtWi*)5K8q9%b zEapId#X_ha`{@|g@ruKsd|wtg`6P1IW$bl-<-Y(^%zA5Rd~mw*7^3x|pQ7zj|L*x} z0=34-FvRHie0#CrO*o(ELoEIIi=fQIr>gN?D`J_(DdCgTg>~H7|9g3r>JfT6U9Pn@ zy}Q0UnCe*|jnj3nd{^G+%KFBG*8j?YRgjOKf`@*pT}e15!8OR>$!UJd2g1sI;Lt=d`I0l zFaP@s;{ajpKYS-EA(3=%cFg`oPHk0>Fw0#9)n4rSs#HOIaqaKurD+>b10^ftG`uh?DQ{GV@Sa?zTw31rqe3 z?;{^MMzx*3^*g}q{qGYg9x~e++q`b#@eDmh;k5!DXvYu@sip0U z+$O59oO}i^!d5+Y3|8oFq*rtWSX*dm|KE3>RMN>i6`D~GNYwifo9tP%Fvd^q~Yk|%4!bu&0- zmG6L5fOR2&qUtsmoG7vqBz}6(tNz`TeZPUnie};M(YZ3#s$TNRs>>~IScvJz@Lp4< zS~X=JtBTX?jyAX3>Gjw3CqvE+Q;mm>*B4972luBW_KkQ}wbq91CLB8}vW1tEzEmEK zKCFpar7On2fwx2^ILRMm{c5Bzj}M{wa%Nh?BKyrabQ+2_3uQFKtA@?4*0yx{XC=R} z7?^VYU~Dm$nkc*%>`+m^n2Zeii%vA3LU1;>w?ta3-&Cz$qCUpB(KdMYaR5K>E!MNM z(g!?Tx|VsVs=SHorfIz@@e}`w^I(Q(9O25JU2pkbpRXYBv6Kv-x}g#2sl#}r?<6~; z?zW}V9Xs%uRyKGAs zf5#|aQ6&-dt_v%`Qu+u){95hwYh<}oN`2tXEcL25e8=FLJh%m^9{d7ZVz6bG7-Vhz zAq1z(0N&$!*h+%NLh0<%;k}zlZkv%B*6`(hy3B=U$iu17XIW5%5J%u87{mT)Fff(3UC~8U^LuzEPy+E01wwv1e+XYe5-hX4#2z)VoAOLbIB6d@n!$?+ z^yd}01kFEo_g)b5&ZBsAFth1Ll4!7zIXsWwgyMhsEF$Dt*?K#D#Xd+Z;}znJt<3MZ z+&q^hpb8l>|HJO)C<=TG0p*(xM9;ZjWkPtIHBpF1`(+sHzhI|RYzn9I|A@c(ye!7D929Z8 zNnj}f_;4|uA7KW2&Av}}C5b`BR-Fm+m|MSpb^(TAqp2K_z2owD1-<&=R2=MlnW>I^ z-2i<+ue$%-dItn#-()r&+N0WTXAN}eeRBU5m;{AC9h3f3(4i=gDR7g*ulk_jFUux9 zq39NG{KD1}Hy%!~^A%PIRf_@6bgGrBG-&-97x_F6QL0L)l_A9!GnJ7Q{ zENGpJZpby|>B`r=j+qFa`Yfw0S8$hFyg^?W)qkLTVWHYLa>~{R3{eT&whYVEwT6jH z+Pp3->aqCl+3V!j-9Ab0BB~WC*6ovR2k4LNmx(u?C-rZ>Kk1BUjI|DO=$NBF!OV&! z7ow+@7Ov@?5P5Fr(g-b+PQuhAjyuD5k{Jz&y zQqbDB$>XBlTCMQsf4GaVAhtWS%%JVy}DguJojb*dv0 zjY7K!t&;FvPOamHAskVDjz^%ld1JeY3T7)Zu|6L2t%#fJP)gjQAx*}f#{crTV0p8y zekec_bHbvfBC?YW5MYwzCs&JxW;X88XiI-SN1%w`_Hw;Y4xva(3kR*Kb@ROSCi{)YYKtye%)K2 z%k3`&T!0dId{8`-0NQG>gZg2qGD0GyhxYIy`^)A_r}Q}wX}c4B7tl$|5NO^@Y*rQt zQrJ7eJBIV%=ACdXn1CSpdo)E@e7t3o(|WN8tX{0!U8_|B)@d8FlQm*76-hHjQcxJ< zvz%Cg(9$yot|3h4I+JHPt~G1G87g#Rr7bvbWtXZ>J`;0Od$8~`T!$ce7fRG$=Lhp@ znfEkj zA*35&d|hYNY7;R#=lBvD(5`>+wW51Gb4K8C?OVg#7$s(EfU(7wTbZ8ds)G5LZxSZl2Isk&AkC%bPvP&Odw-eMu_ZFkO%2i zZH1jlm+t3^Z`M+4HwLZGzHbg>Bf(#HJ=KV3cZrv`N}_M7;oWk+Ja78e&#?xs&?R-*!zfpN-P_ zmFe_kJv!M!?5mHoLFz||#+vg_pCrYU`{hQlh-l9|&EO=7lKEmK&+ZgW^H8CY;ZHEP zHADVHhiiN5-MG4Gh$9`(nrB3n!B&p*@R5pvV6chUqV72)yBY*b7U`pZb&Z@|3;* zvC(FxJbF~evt!kF0M8=TTB*in+r_rdA~pws09zi->oPAi6s}6BICL~ss4MbQba=?d zKOSx#wqrvFS5J!pLlZlB4|E$N;z}4*h8RR#HfA{Q`;;n?e=n2v&W5>f1=65-@3;Yn zE&_4$vGa}kf>hhums(uB9F`oAI7;uzqPJeusHj(2rruZU^8 zM6`U{Wr#kSaJI|$b$4dMJk2@+jDbc+1F(=!$W^UyXG*o84%b#KM@UUc<%j2s0{s#V z<;6IEalb_urG!hXE9}<~g=W`y3o_?&N=hEAV$B~vHsu?_pnxSD1@n8otdZR;%M=Vi zT+HC774LklWMyHaI7*It=i_WqK z*b>S65H&t8PY|G+P-21YU`p`3`(&Xs=jEbp9bvj0*!p(vj|{^XgFa+dO%W>E`FF7mMK#W&$=+pb=( z22Kg4x@g8HPGq&e-7$J$w@C2LviS(TP-4hm|A?OFx&#$QM1_4Ilq&7&>nIO+MmJEW zE$N2BX)FBWr#0wEtw6p-8GGtDM*BP@9SORFX`$D%%v5lmwbzqwqO>DV^LGf3P9Wv2 z9PJH0!J?9z+=i{0Q_m`N&)Nd_+xg|?yU_BAI6dgTFug~Ifa3UpPraiCdY`f%*qJ&a z`EcR+;a5F!xUWa<;&}>}3U&RnUU}(l!z`CvddhnR_8Qa>Y!{jQw@vx>#rgnSg}9tB zcj>JCbrA4sUv8h-a^<5&662e2hB;y2^Y2tYh%H>6y^*YSwYNX;iU@`trO z3!ic{?c8FUpS)_~`^9dQ9|dB-ef?=V#yo@NC{}P2y0q3?&*MBV!PfAAudOH|=$L=! z{6*^J;+IqP+Yc0Gyf?=2si{*^k6#rsOwsgDeN#-GitO_julzuB-bp0kT>anRd5}VV zuO$fk@n|pFgT2>-O21#-)!-c2>(@FZtKF5B5tm^F@QpM})Tr2?{OAQgyA_#pZz7 zHVfizqFO02ON{ADmnOYLXQipS*qsiW8I@uZILvjyy&Z4s^SQnWKlidH3BKWXf}T*dVGr( zE;@7SaSgHzbx*<+ZdyJe8}wME=whtsD2|fP05+tW`xvbIT%niJCPZ)3&H4(oy;Z~L zi^af5HTQ#i)5cW)@j=s4x9DQ~{W7noXZ9I|GA!ELwwI4j+X+39-loV||2uEKH`>?P zjE<<^)YoZ+L_eM3(iyc|)el~e4+=v&VM@br`R$1ejUIDMv-{8|&od`Oc(%2?7Cg@IYu0tX@9W#hIE8GJiR?$ z%fnH}L)i0nB*u{|+{PU-=yr-J-mGY8M@o+!|4r^Ns3y%pcf)#P`UOq#5S1olQMws- zgMnvZRC!IFL5Dz_rU88K5_;Wq|2B;j`lq&ubAnT3bWhx>cl7j9>`dPxwSg_%=kf3f z`ckCgo-~cs(n=8JL%V#Au%9u{R zG1-?F)H3}lYPXdI=JYmZ4`CKam+b+Fb}Kgi+0VPFGth*X`G8t!qXA9vETFDek_d$* z0ZSDhx)KlP3)bGmXU$5hj_l`I%3OY8P~ex$?-YN@3CKH_!axQHZ$*4gCdyvsSyx zB;@AzN6L>97jI=&0>>qDMP8rf+%0H~+QBRa4BD&}1mw3oxS5JjxJ>)RUl~Y~Xdg$w zA1GlJQ+fb<8nwuH#hf&|DM_mKyi`A2stMiY%3EwVEqqyF_r9I3suIan+xlnl0X_}Dg{6g|t_kI9gqD9$G)fXHJqgWL=~%dlt~B0Ae9kPYn8yhSbTShet<*A% zz57co+ayohd5D!CuF}8m5Kk+f1uxRP)9lN9bZNT4e80&MF2E)Iy%YiQkw9xTgF|1| zOOW3L$Dnrmqv=1-G3JKCvZR+?g2C@zhJs(o2qrV8iW64V8AQw3pEPQ)9g0$=h9Ug4 z`di@uKv_z3JmRtbh(D*3HGxo-^sE+_JGEOS1+Lp-5sM`YrMI|CYtlaNjVbn z<^$uW?10@T=<-NRieGF0=qBzR3Fsn=Q~cul6W=LDMNSOIrx#cunVIt1$z@4xxUcHv zsyh;3vCK}`)VSjLjmxZgyH0VX$mjpBlBAfwLL(;fjiNfQWg>5RwD`V}wO%DxU|eU6&VCiASGB?c54SoHIYr=Xq7!MT z^IPyKDk>&!Sgz415k%lBs*^{4&J5g7yhvsGbnFPPWkm!YLbSk1;#Y7#MDxj7jjC<~ zGebxOZfu5RfV=|C(gI$7);HbZ?=O&&Gn)M&5&kU8aW-ykBqrs(;C^s&HL79bKW)oQ zrPG6#%8i~(y7k8?h@dK-_6|$4CrB2pBb;dG8Ui6)1VVdiBTj?fVFVw>*=42Ux?A94 zAQajFbi6Ciz?J&2;@JK$&yS{{#y)GQh%E`G2QnGr5;JX(9AXxeMmv>+(fS%{5N zF4xC;xD8v`O?PR&f8@6Ie)8asM<1jKw>Al;5JN?Hm=wocc1dR*>4!4C1{&zBF|IH( zLl;eBbR;H>2(J7^Nc~HHzQZml`gyS2!XiQH2iA+0QC;%nY`Y6!wrGNV%Ax>2Cul$T zD00AiH?4xGf}c(i9THG1`ooqw(u1=EzR6T|L~%W5iV72pGB88Y)ybKXSZc$d0Rau` zCN+&cqpM~#m8-6R6@5TE-@A14^uc@SEySf_CD!(UTm4BA;()LB(}NzNBXwNZ`LWr? z43cck=p@g<@^-csOVwMey?T!03r`ob>cHC} zf6o#VnI~7pk@GQBOB0v9Q8w5a>DH)leaTP3;27^7EDnp4<)(P&V2wWfoY27<@i1BY z9lH|uHVVmsPjvd7P$|W4;Db%vrtMq(Q7^;A-CARay3!`7q7ov@9b&m-{@!J8vQ2} zF|Ic~Hu$_#P8eoBd*QDs8i!n~HmVYRf>egs7(xSx6V4o8Nm*J+ANTPZakGzlCtx}@ zZR|)=G;(zTSvxvj38(3E z;f^1>r1=>MpzX)f9!+Gd~*0oASX5SH!y=wZ#)dH5Y>I6L(PZlb&gjW8FpHws-$?h!q zR_f5*=Ii{qD74$CUo*R%c&X_Liynk%EhHo>eBNzwlpJZ=*G%^5mmzp%G2rh=_X2zb z9$n7lUH<7ayNoXb=+4ZHsR&r;B%q@ey-opHf9`01LVG7Lxjq@i{nl|bvX0ipM|{30 zNoqH|IBd61LdtxueH1-hH<5k3Boae%4gYsM8IWOP)IrGFa{7&<;hH&gH!E+!*$0%p zupx3+nSMQnaCmzq`e2nraF9>h!7LPoT_j>LQBxX0+p!(>?DDzQZSqCT|JfA!?NAqb zA9Ri>ub0mK@+!X?)!7(_sP`9&o?5rox~_^S{h0^7~&LWk_6la#Tya z?7YX3DA0K#akJ@OnQ9vdJqPMdS~^e?STn^7N{y1`&ajFxc7IK1LB8d%v>oG03rCfO zgNZWG1fvEo5eq|XtG#I9%l(duWU5U<)R&o;U8LL;IO8jw^)`nj;Mrvv*LvUcL~Yq0 zE=$eZ3`{fnJ?+bR!yyEWA(4SRiQyy}WJGMTY!l7LZk}qqEH^rZgEJ<1u090f>0%V& zl6$}5BcADh=DgIvBQc!JL<4s_3USZy!X%6)QT0V_-_2ASR}8tZWIA%kk%73(!fhH> zm&bXLbgZAq4})m>D*an5=(~v9^KPg@u}UG=86N9#gdu?ZV_hMEC3kudc^|f#dE!z` zpsJGJh4Rq^&O|_cA^N`{6c5HL_$cSV82S4?k~0K#pW~1Vgei<~URAb8>1d{_hX2h5Rp-_5jJ}9&4-S!U?_66L2*H{8rtyO<@R~0 zf1w$6)8a|gWE%Oy#rC&-wi!qObdYA)k!ec5{MoE|OD3JvAEo&GYL8YLnH3ho?zVjH zE_3AFPdk$7frb+R4gJLn4fkz6bpfSz3Dhbg z#h!AI&HHERUW*Zdg?Q7PcvhA)d#3i2|3%)3OWJ>U82}`_4$U9<U07v{cM;Sqokj1@e}FP`n@8cO}soZWJ2-u`*&ap8lF{Zme%{= z^vTNNMEYha!gOW%&rxxOPE%$}wbuDdD|d~nKH`WEw`nHTmi`1gx(`M%!Ao>T7VVey z?@2@nSiC=%Xfk9{S$-#gER?u0(djbE{CCjdKf^)=(}k0TrM3d`==U`Eu>n1a-_4oc zXHEZkYo0LF>efjE0=s?`#}5Gx`^+QfY}0g@y<*f$4MmUUWRq>?P====D^zSt=}Ffk z6?+i#>8G4w=-k2|)iy5$I>S})olBSMFX4ieKtAVp(%Ib~_tK~TB4mGp-4*!RkJ?>3 z35+OqXgUIWS*6xLYAA`lKen@y3gTN7Y3ga&uU})j)&B@-keug*wOu2W7QV9r3HTF zOjQQYm*WgXSog^mm0c%l)zNho~=bMdx zrprg5C+C2TpE0YcXa#|FL!ZqnqlKW?eT^}@b?a0ksuaMD>8f=7`^yif>V_oOlmN3u zl>+r+Wn-bv<&*pf++hNmf(qh}@nt&yg(bfEa(v8oOrh8hxA`P^d}4Roag=6O)4d-7 z7CqZlmf!Y+9HF~Q@0+IOMSc$KJX#mJoc2Z;*Zz(fN%xlajzANYss{YX*z-MR$kBUf z6!7k%I9%YW&(trWRPM-3WHLJ3It)4j&8=mQ$i*!8fzmwpeb`wdB}I9BLyH*ug2Y>$ zd3$01X(6Z0dPn_kJCLw_?gSv}!mt!2X}s}y+dnd4&_Q%`Geg5|h3(v@2biM(ozSvT z-}>M<=mmBZ!bH~KsAgE~oRX$xC}J|PK=4`4HPI;>tY4*$Op!czor;R+2hs<~9HddN zY%mq;AE4cvoszMK5%1cqP>jjlf4U5Gun9iCk1mghvR670eM>}9B^3;jk=Fabbd4Mz z*VGM_9jgQ|Xg-_k1D+5GXnB??hbaR={c>fme zcM5&s0boSWCsXyXc!O&HW%&s~^ZI}L6-65$7g!ls=aN+~5h2*UHrTZ|9l$ce`pPX6 zU#z&ky}}@glNf<*LxEE&>I5dL#z`4J$kz!ka-SOJhVifJIz#MN+=t`V&SE=W%omx< z6EE%-yG~tYH!Z(nA`as#cbE1F?;TIT*VYP3Npx4+YmtzOAh=Q3TbrTLZVCD?(hczI z?2o=8pxOlzUW$=K8n!dY?M1|u}mRs zY&l=onH?Fxa44k|%EG+}^x1mvnpiKc!+LrZ&QKm3!XNLnWDXj7kEj1HS0^U4Z_9R} z9J6{*<>Ld;+>NKl25<-|%NJYXgB~t=TqAyWNm7_H=p8;L935ugJZJ_yjjd6Ynse#Y z@k{Z09O4tk8$cHOIEJNiBjfYZk%Mv_(i$*De!_D+EXT$>h%+c%O8)aocz6C^=KoMM z>{>Qcl@au%;y;ICT%(y=s5`EOL)a?~PcY`2GadjNk#+>EK4J2PS!5UtF*Gfr&2_KK z<`La^x=;!uD9_1`%+B{n(`RHH)~OcMkHeZy zr6HPuIlI29aGwwl-?zx404+oO&)h$x-7>T}9fgZVwR{%)+>9m>RhHW)j;%;4R? zK*Lpi2b2d27T?0kPD_uRI(z?M2aT$p4~ozu51ELzQ80-e_1<6Z(A$(`EGXSp$9!MNFiu00b8kv!Occ1jy*QLaG?2MT{JtvAhbOznFuhvHxf>@Vh(FA zo6hzvfS+B6l+U~mPGq?%b``K|n;l#P*X|!Jf%k{WUM`SN8+todd~(d+lkH%+eFEkh zEElVp@>Mi&>ci;Gd&1t9UJdHg`>+cqgbon{I$wy;>A!63`xRNfC&)i;bavs{AF&{D zGS!-n*N24ztKA_yqUCWsJ)a2Ut;JD5T}NnS>NXnm>-EiG4PYt|03o-;Jox<;!b=xz zpx~CDWVcw$mx$1c*0MePDd989Px3b(18Xun7>wIDO^NKFJOjVg$NUvUA@cPH%Nm45QL7?+#(-RYYI+0bKUM zv1FMym7^JLV$Hrb>WpH&n~=r zfZJ6WuYWg)fEx`F_>=fid@;if5{{Fty0-_BPII4w6cEKE){NWbs&U&Kw|-os0^9KM zouAA@K`l?RSIGp>!$!wE*}9|YY;md6Z)eM#CJ#`YW~86x^_Cw~2O3G;TJ}6&1w7f_(-t9og%6E~%BS zTq~`K3HbConDo3B{uHB_w-JU_FplG7;3UQ#=NcW)J1)5;f4R>Q@9MOYrmaGxt>%iR z_TFT7d?Ds~)a_-2grcvQZ3jz0M{Hx_q_Xa-R$Vsu_`Uv-aT>xX_5=hsKO{>=VSWtN zt2GWoy6U4=9u+U;$%|mDQmi=s3b{7bIKhx&xAjO_o^97`stn3^I{-($>rp4JwsM;u z=5X8I-x}@KKBTU34^zBSX`XtkHrnne^r=+e`>A~kgQY7t+6#YZXgsc&%5|P#=)twp zuQu9cc5+x#PavzlKI*y1TQaOqhO^cym{A2oaDVJ(Byvq>;~&3U_p(n@y#WwdttHXa zq4!>=V#-KENH6}_NPy);j5t4|H7YuOUFjOwBY9KDa%s8LT3ojJx=>v0e#L#N#68pQ z;CSfeS&&%cE{gtRp75x6eZwMKGZ(iI98LZ5@<_Q`74|f|gGt7u^t_MXB>KdNb7Oix zdsynm347P1BSkAW!&=L(=UeC)zWRP?MmS+Soe^h1r-++`6j&pejhI!$K`P!sz?;|P zK4w!~A<&Dvf6v=Zcre#2c|NUF!?pqGX57&@;c<)qYd3p66xM746qjRX)+AtcvIQxh zGj(XSkE7?FB?iPWQ1L~+-lR3)&bke&sAhCMFZB>y27fcb@f%NH0?3_H-u`yv;RY90 zWfqZ)Ey{LnOZN0fapaef_H>%Oz!Nhe#Tcxezhn9p;OtnLJlCglRf$G}fschFe3o3G z%Y@`%?3i`%RqOF!_j?A3@fk!>^?Hn{+&Z`jU8<>{LHg$WMsD7D;2_uzBBtmU9Z6g# zACi}DH+^Mm=nq2|60j~e#T^p zjLr$oGR;pq@s<4KeakE6&~nQCaO~S4yMMbfhhIV2BTUTHuzFn%&Hg)E1V2}mWKDsS zC?kYX!5ns2^bZ4{KH-PW0+?Kcw>=;lk{marHa}#!qs0IV8+>lMa{FQ|%NuS!)eWrv zxkX{{+#!EiMD?E&hj)-$=kHy^ojji&%mQy=_>n6(m)4}IW9=?mamrik5#6fPja=+~t z=Glg+4h9WgNwp{$H#x^a4j;)m6if1ulV;j*QmY+6*_}F7R87keSNf9S;wgt2aPh?b z^_>dTZBN_f;S_ygIA7$U*zRG*47mA{se1Q`B!qsbUKzGg>uhel+^T|aU}iIO(v-Hl z?oB{jC(EH4rYc+&FO?8=4Kaz835R>9bc*Sk_jYJ7=!Gla}CNtXp&?c z^VgOIA6w=FHTe%>dc)EbB*9V%l7|Cqp?BVc<}EniGRe3o&>OLo+9$Ch?megLc)=pA z`z&|Ej|xoJlXq17=9kJqSkew;Uuzb$-sUQ$DwIH7L+I)2F{W)zZ06R>Zuw zxO7kDvn?a7G!)&B+F+Rw@~&YtWL4*{H=s};VmEit(_ZjgFz~RZX#rM^vNwmLM&X49 zX&}}Awy<%OSd*qn7SLI6o=)XgmqK?|qh^^0BrQn(T&VN{-!1kUk0je~$?*o70SVEo z*5&SCC^!tkJth_HN0O<$O<+Q2z1{T=VkRYF_r!D;3k8z*vvdp_$yo|Et`7m*W$N!T zRj_}!9~7N;1JiEg=qw)?6QoGnWnS*aed8NkYF*DN71>K2wG1r=W8j=u6FjpjGI*7%Q57e|L;C^0UzaGnCFihGlrs>DM>Mxff;pd*j6uCnEf?aI} zeaGb8BQYXB@K+H(m(@pNBJ#@CAjcK(56H#!X$UcWDqB)2$C~8Br9xH!!ZR{>xuFMoj&1oI=9yEzwDfWlnK!k5MJAScPFfXgl8PI``;yXN8S6vVTu|fBSuw_fK z{ob{fQWHsd63lGuHND|Vf9OoWQH1n z*}wdW4saARleGB@$)?MEVw5~DwIfH6%d4njPopZZ^Qcg znVS%N(v&~L97>U`IEtUoAqj&C54ZDNMA2Z*ScW11K8Oq-N^)L~@&dP~Vj!>Oexigt zAv=|$fn3LQG~dQ3imW})ZXuvy5z-X8g|$hUqv9^|FLVXqoTy>xD+`T$pzZ1R8S3M6dz*AL zUtsp?eHfc+gC9`_y;B9eZ;YF1My71EAGN1PYw|f9Gwm^frk1u=?b0zCV zB^ZXM{xgx~D^N_z3y6hB$G_HC?ZD|0X}8*{^73%EqCp2*D8?_lX8h_)Hual|RMrs^ z-VD-?kw09Q^Pru>rDAD_WYn$Kc!5o%LQh<-c6$?)5{Mich2v<5Z6;?8sXL!1&U3h;JfvlA$urD% zB!-$24&as*?23|9d;MtZ7L}`>L68etZ%d^%=aJu(8v8qEep52WPpHj7O`vp4dow3O zM1dG%%`(yM)Z|{oZqgqmtlj3Nmh$zx-!9S3l;=bV0AC|f|nkWE-GdX8>}<%n?Db3w0cnq7S= zUpf>INECfY8!;vC4-({@%s3Yu?1LwpJ%#lb96ao=!@j2k6D=Ef%lBce4^_6xxm`={vm(p9OKQ^N)jKk>rb9p^Ik z;Y2H9zxJxY*%{MGxIUf%A^R-5sdU|T;AUY_5N#9AIFHGD@|+SZz;#9?*(I*~MH6FG1)fu#jj zJ)^h&=s3eon&Y-t1~KcDqzy;TRd7Pg$Ck^gCHBep=ca|^Oh*!_Z@lm70l{^VR3E2yhF~dhkNss2p#5LOIFM(wKQR+)MQRcRo7% zMg?vV@i?HXAvE?UhyZsQ6_HuQOsJl}p5O3qbK34PrV7~pC3N=68*o(UFMgr7OuJanHyPUUF-th;8R=j5)=Ff^9Z zF0UwXDVjmwB>Hl7-v>C^jHYVK(YtU&Jw|06oTS{fW=BW&p7|cWhbjd2{-=9B8-t9q zb8ll{VoZrOZ;Kq`l07)X-xG}7f{1xF_?A6Zg-KjMuF8$j+0TDXd9K17QdzEW)3Y9i z(4YHM$E1R^L>omXkZ1f%BR>~M%Kn2Ou*)dBnoz!KBv#hX_4gHnI{kf-=sLBUKFyBj zMC&2K{M!N3ly7j*9hFg{xvmjP^NHSB95Mc4XMc%EYJPF@OR1YFKQ(4|+U)!(ouuh)b@+L^jBG%dtY;&$ zJVf#Q-^8K@@R6+p!0M`lYQs!ody0DOh-6++X56rX(v4&&!RP)U-|t(?u2nw6F*b9G z0t&cZsdJ#p)f$gO)%#*ktXj|Wq@hYOratXsT}-6fH{R1fD{C1hdUo z5&njnS>Zgf`y92v{7ZYUmf&cD$Q?<4${3M{Bt^ube!gc!P(VN6a5^6fkH2ZfXnV@oSb*b=3}#f2(Cxk@=`5&qGpBCkw?$am z&Y#D6tZNtTlZ?-}$)!HJl?RNb^NGjYX=eir3`FG3f}39P*O7yQ3J2puz;F$+@jm)A zN5XbD#m|3k%*=Ja7pnu#Nfg1vL4+##4E^IzpXV9{Yl$u!Qza?kxLBDP0X z*V?dZ?#dSrbZyU+?JTxsUd@H5dhnU#)%%E`DFWQYHP24Bt34+3>mt79B^3d`v#kOp zz4F|N7Gc9m%6dL?^Q!)>c&g)T(}nuVEyRa7k#T7KMzSHrSL@9!s`ggJ3Efl!*MVJScRf`vh8M>ER4*_@M!%53gdLi7&J{PqO7Q|xWIpdEr!Fw`6F)L$)^9jzS zburWAzUCFhbQpFs zTh>t*o{hb%o%^@6eJjjM!Jpul^JrMax}Bf5TGfe!}=`ei#2tIUR9E|DJ3$awexf1xQ>3D-RFP($=~PK zU_GNK>2n{w>N+yyZrf1l0_yj=hxW9rMeGj@#JIKgd# z>+Scu=f^qc&3*2#d72)ktE%_jwX16FwN%y&lgJfZoLOLf)xELy2}#`Xw5(}`fy7$x zIVC1T7(?XTj?oQU%MPPDrta()SZaB5>~(j4@g8XW(x}iGPQfrgj6f9lq_=$M(z5Y0 z8d6GJ^eij|&2XJRc4y6zAJ!u0(SJKzm~>*6X((W=->8?5dyJNxolONW z7bHD*-iB1Zf6$5|ieaM4_@&0X|Jq1fKsLP72p-fNYDx{0f7B^_JjGCqUsz3{T5i;s zr(jJU0qFgz5KuF(mQeB8-XN#a2y3-DCav&%6Fdp|_@V7B*;&WNz7p+pNh6qH6$u&Ee?)s&39-H2PwtBVIK2()de=U|o z=Gex~uw5!>f@hM~)X+#;@w+E7cCqgdRdG6fBrnw2*>^esB!(WGYYQrg%Ui?W*H~RQ z(Hwle+TL5M%DNya_!Dy}cOFNPzM}w5x_LtCjEPKdJXB00>ZMLVRyNjo`>6kpNynsN zj_J7WD?SKFM$#0i1Q@2udl$T?pE`+#EKqTom8IU*ngA8-tY7-2^+|YDmi9h$?q<%Y zMiKl{{#i|O)kF?YF{NAVMj?O_8;!4BtFY_he>NlFJov*fl~7j~Z- z*HW+Ci;=&#a{OfyqAwlpG^@6vYp;l$b@+kd5Xk2`pI1=<9R_oe;+SiCdz2QamrmBZ7piBiS&MV^BkpY&WW^Uh`a9%${&ZHm2QDay(evmxK z$(lrL0dN>`6Q)h|{AQW&_|)cR{tLo74Xb8cD~Aewv7*>~lgbFN0sOUC{OA0zth( zIg`-cl&AL?b5Pgd*PA{LGJ=FdY)zJf6eYNv4yAJN~c zEa;JIlS(?j0cI73E|R<2QK-74j^>(ad*MfvJt$5+`qR@80z!v|=J3LHB(wHV+Z3zA z7;0r7I-mR#hyBl$MGAq--&M{d5EXM`YzNUHTnt&-KH{{YH`Yw}ERl=5oj zc{7F6JF7lha+c7$>K8o>8ScEWfEsq6A!2VEZnH0AOP^U?VGjD{1C{n`8o$5m%Wi>H z%!6&`Ei?b@+QlY^j3?q48j#NYFf3giJhMw1N&fYU{N+8W+vq9vGl<^r{ z+3#i?|7@ph{V!U4H62>FRu#mjQ{B+Hj#r>ROZxg0NjHpo6N zjL=RbxPI3XSNpV9tG8IuDuV%MNb5T8Xel$6m*@<1xA6{VK){QLnApUBZzauG+MI|;rO8t6 z_7Cbj@$r;|+?&kehIm~GcSq|Lk7Q#B;@>ip7pkmbeO)C|lrmMPHB?|Cdx7FXW7g6S zkK;sj5b0C3XcQuS7(5@W(JKaB`wP)Ia9({$c(||w5JaNajV(1+Fc|v8(rpc%+Ia$`#iTOy}X@jZeCimAgN>ac_vzq&$MSjcl%5XV`~4mm*R=}h-+6k z!?e3H`E8dqdSb(26Qo(GD>k>P6JHRqAiqnC{mUAZqccaU8_idOgl>-kdP-hqi^O9+ zikj*UsU(-|^k%Cm<8#rRlsSZ%cAs~M-kFiUjmtiV^!BShE1tqS@j_-v<`YQ-jAHc8 z2c+?)gr?N5Bm8fJ+N+~q%k`K0eO%a!Aj57!IA zn5)4vKFBitk13a?eIXrG?rWD)!>fWaCwIo;*01=oFu9+=MF9+HMvy8QKT@k2?0|5Hk-LV^ZSuoA*)hZNo|d@4TJq3 zGWZiN1Rkcxc@(;dnswVv3;whu(a?`n**-_5=fL5d;0NpE4?pyWr=-nwwj{=^_o2Y2ipQ-rdb9T z{BlPs!#-%Q$22YB&~(k@jc%1TU5f;t)V@oT;z**G6+N!Td~@+fsWUk9i)D^$KEhS8 zES-$3s(CW(M>bpEtnzu+_)%rj35%x>yGAHI@Wg3SB#sIm01ok>#nyB?wx#s+cdKmi zA17tEI!>ZMvTqOT3_`G=B$Ld!T?^0Ve>e(Fuaa0{g)l)PyUitj?e-46?!f~BPiF-S z!MS#BsXKs?4cQtsb;@lJ&wS5foK#@zUfOcn=9|lrPu_*#fz9x6Md1j2Lyu>Y;Z?j{?Y7~^!G#qOFHY#=~z34!Y5E_kI zS0kN_;eWIvYyf^m=JIJPVyFON?YzFq1LAzNzGdoX;M(FYX~lapSLM7jC&9N!{J|5FJCxa*!#j|0;vV)R;$Hnt->z9W4< zbhY+pcVb~5o(Xc_XAo~7PzI5}aY;&Zr5*nMD&2H9iqLW?=VztoEDijf{XM!srC8V1 zyX`l4N22-I5cfTUo%a`uh?%O)+qa&u{arcN*}BY!+onm{klsj6>-& zo=Y?QtiJMWAT8BQ9*-4G=rQelrc7fm&-+o4L97zXwAhMXtSV8I8dnSZacth808uMr z1~u^9IghU>z6MOinIMPRxXBwWcwr|e~|Bd^D*Q9RPTk>^$&Ab%YG4o=?)~4 zS4a1u=EM1mKp&viifQ|+XDWX@;)3^xb|zD}ELNvqe6Z<{L{JN2HHJZn z1L)6~;=SaVuZjm)39S249di}UZ~5TJ13LCUh@Qb^mzS4UyKI;z3sETvT*oKKK3(}2 zgHa}Y8M_{j(a3CJ%=Vr-0@9S^`6Q`DYA+u3*eLdnbL)aQxCAa9O4XrSL}Lc642DrN zSX_HlOnZ%kM7(Fi+;l>@a|$r-%~0~~F-?O6yjDwTP}*>zeoB_N%dujWAHoZ0l4in{ z$%ry{_Tx@dV&>${57gY~(WV2Jl!t?GCKKgH*=fpPOQ%bNHAYu9>yYS9mrUiy~Cas+SCj)?xgE zW0zn$(j*=JAr&?Tv4#ECl>>qP14fGvsWSx!bafQFpw=xq^J=PfZuGBbdwRFS6hi3M zXfoJJ#8bRtJYQNqW^0*p!M&z|TW9{_3MNC_ZZ+(}ryFZNo%z;v2g70YIKz0+gQ+0+ zEgK!t$b??Ek>19tS7i74XLkZaleRkvZT|9^Qu5jwvj*r!D2~pLQzx?hb6myzOUX45 zdGd%X^aGb+utPdAf343BI+ReYBHYQ;bnr;Qs=hoP#{27L#E_U4M5LpX*sF7EwNzhC zZnY8|&6aomf?qyIUV49YBCSOy%j=7LM(IG5+hO(-L6JZ;>HNDd#s)F|mb^vWwg4pv zS6*u6VRTl^+QV-y*zfeJJxWAV)1FNRrv)hX#p)0*?|tlw*N`|7@+@0jf;kF~ePT`a zUhYwA?2C~;F1uBlvU#(arTq@QRDRP;{E+lQ{o*v(%m{DUhMM*Q8vVStbKkgUp>~lk zi)}o4+1j8vXa3$wOWj#4DUsb>y(z2l!^g+aDe_tS5h~h|eK1uPd!FeIK7~x5DcR5- zyDij!@!=C+`RNa;vAoUE)evb#Stk%cu}^}DAwII zH2O$CM$nT~>}>Ji6>Cg{SV*3exF@~7=zPHE@@4VLw3MaSd$-xYrA?v8S&jYENNh7? zlK!=qJUVWuFYZKeLnHj$rV8Qd?JdjlLyvSIgKC71DuH!`3)|qW=&@OHnt6z@z{%#X zp5kHh=&@L162A($6hyhQd8uKs=+UHIgIPsCMLTKx?i>Iqpc27LgUkULF9dO7-}m!t^c<=2IzM9$Nr*CZ{nknFEAb}4D(zJkBV zT=BV3v|<@8zf)MIVZG^!;hEu~DAd34R9`KpFObqMUuAa1?>o^{lUOy(N?9?+MaNGp zBjCzgUEHKS{O98dO1OFNQ!x*Dzt2>PR6}C5Nl3_;Rw# za9Uq3{pZ;N-NEJC(~9Zi+7pg>)%KrRC9mGw4BG=}m9AIYVBiwrxKWi>Q3 zFmpr;K0RU!8g1i~tPe*K@%4IYp5-?(0_>;rR69q`ZF0xF=R|MxQ&Orr^=O4U*Bx7q zYU$i)1S)1#3{r&gnBH_{SFR;sOWXq73kT=#t6trf83?E~${HAT) zSK?W0H;uP@ToI<-uPjvJXVE40b_(Gv;wW_Mpk;OH=Yr+JN@Zs7y{KI~qt3=-sj{AtOzh{-B@v$Fw1tK-65FBkX{FOcYiW3BJz}%`Q@w$kyS+y z7Yasm5feH_%knXsNTc79E;=O)de`&p8JZ9an7G1uidNp} zf1Y?=sdkIxXLB#hSfYMvFj`zXdGziCuU$NJ+gah}fkx~2U*G2bA+Erlw^XPvKTBa< zr@>f-h_<9PSutF9aq7(<{Pv|{m)A>5(GYMkHEp>|GkoRONY+|n

    oG7J6*=2NA0D!o7(MMY;lV9Q+{;O*S1WU({XhV&&l4i zW)pa;-Irgtj`tvHn0gWk`mNDK`G zqKa~N*nT#L|Egc@J63eY`y`ITwn@L@d?6cG5T>ATTVg=9=ZFTY>o9)HW&Hc;0;|sY zvdR(FFA*QV2tRF<@wc4FWfrnHINUTFUts5X_mBkQ_?!(Sk`*~xbW4RbjK3ST%G1q% zx?A!e9xf)PMf<_SRYg7Mbh6src|Dw~Z_ypiX!H$PX|^}`#w77WX6HXt`eZt^TL7~y zKA^ji`QdkLw(r%V(|F77L2Dz=bgpgGY?+#ZUEA01QS22!HPAc&^i5^8nq~Feo2mjh z;TNP4GOdzTD!%hS9=HgwPw96WaEoCiR8Z4toFsIu+y{@Q)hTvrcPg-2YffRj zaCY$;BGm5iYGc<}E!XMzOzeL^hj)K2o5|WD(N$D3r6!3J=Fh5@U4ixkZpXEnyKFg# z`cncX8PVJA@>=3vpDna#K-z<-ur_g}rv@kMI>lxwrHaJg4;TNH^{9&cyhyQK-(iPw zN<0!QIKSOv#=OmS@M3f%@^BuEUDqV@+-h2AocIe(R^32wE2BS_kD^Ma$mPUIYtLs& zc$#^dW()t7t>MRTHOQ0;M!ogTw}_2cp&K~EJf~3kv&-vozO0$S^1KV~k93T9sr>ua z0uz`1tr9+$%mlC6D;x`q*^eqzgL-U$<}FQJNGFfUxCWBrt%n}@6w+s5Lz78L3hDI&~Yf8m+G zxqM{SUDZjVI>tC8=H1^T-44OX4JwM>qhA8un3#YM*y>aGQ~erv$9(6N1-WG1ofjzh zeGjQ0S$LbLSf`A;Wcb>iwe?iS@6QVZ+?Fg4oa(EjiE{kE@V-yG3h{n^G`_>~%9vJ@ zGcSDPpjXIIblaO0jNk;A-^uskKb{@9O;PQogBvOX={nc;FTc_I_LT$~u)Qr4Gqj?FbW%Np!k%KqK zpi7Y22A0cK1jYmBQvP)sr#0gx0at7>6+@zK=P!C^SEADw0{)JLl@wVydUgz9eAVi6LNff{&AVnny&!Oc%K?s0}sED z4;?&w_zI(ldE^v%pGbbr-XT&1W2SCO#FVp1NJvYWRyz&6QNcY6vLy5o5$nEv3mj6T zTba@#8RlEC5BWdEPPcvz@vjyi^l6pdYK*N|PL#^u-B0Vav4tZT>6OrR1=Ucr606Y7 z^d9rZ{Zf_kp*#F?KJX)1QE0BOrYo&zSEkgbP9qfJU(Fp)bx=QNy^S1ANUF5(w3ZK) z0z^$kBZQ4D3tf+bOsD(b!|Jm2^>cDcw!&zOUHbuaacj}J|FG~ zvAK$~bd|eIR=Li(R#>Y!D>Iyqc30R*`5J}h zWPbc6w}IiRrmP^OhI>^waLS$+cTptC=2L8~a)H)$ zhOJhCjD#LsDQ?Fux!z57Fzp5Oy7(D(ZQwb57yEsF)>nyKp*XAVmrP@*vN}!v?j$RT zA>5@d&4weZ(L&0-EX3vQ+Kj#v(;d6+$;?zQ|G4hSB+Z5WD6Y8Q85kS()t0}NdTyN8 zlvz8^K+1AAt(Mb%QD^nRspi4=fO21z+_2W=(@*LaQ{)K;hLM_9Gb@9E5T~D&Yz4<4 zv$C04^CjzpZ?5wdU9SO1EFfE>C;05szT(h_#{0#?EhpHglSTCSj*#qgzG0Uie3MK(Ww1v=1GSH69?e65P*UZ74fW{}581B}KVvhZVg7Z+%+PcjtOz%665jls^(If4)e^s)PJ=C# zoD=Ox*`Fo|8;Vl{`f-A0XDKy_%}+)?0@sA6B{P@fRkl4{NPyI;=puQo83F~Rb+9E% zn(UinfxZ6sjDd-u`+kY><@!_HgUN`B_a+ZXw6Ly3Q`6CTeAax?ucb+IDu_f}TD(MF zTJTT}Lu!QDApSUXlE8I3*3if=_3w&Wzy>6-=j{=o+)tsP9`ah$>zFW3yH&}7piY^pJ z68vdM;t=z@EL$s4Sv0WgICg`A&Du{a?)-TZ8*};Z_O@VgZtC+DYehscCr(h;t{MFb z!5MwIXR75Uh2$Sz2CK}OR^I~tNI{QYeq|>@%rk~3W_};hjdE1ZHeIG^zoV_qb2tmy zmck(<>uNSL8l?eP$Nk7Lo_vD z-E~>*MaN|vjaiXaCIOnsjO=+rkNZl2tkO7{_g{F6UgxerFBd^lUJ+MiNbQRQ*F;fn>_4G z<6tm97(+mI6{#*9LgHjp3o;H42nHd!{J8xtNA(rZ0RsY2X9=%c`pMg00OUBIbk3#Y z^35gP98+?6IXzVJsdo~&&TT-nn#&gvBL*R#T-zWfWJ+miJD7h8Go5~l#vZz_>}hu8 z;%VH2?pa{|ibtgw@dHOk*k#!1p7ccGmg}c3y;qin)*Aa-+PguM5HUeYIgRAE44}lW zPXd!IBe|_bet|B;oT9`B8XDtv8Zh?7NUQZHpD$TH)s=C2F1PP75s>f&a?i|fsI{t_GIt9XYw=Tb0^0q$P7;I8&; z00v1j9@fj^l6+J^Yk}YvcU%x}g^t0T)%=*W)|7W)8}$*fvWP2I$|JOR!hS*~P!NG% zq%nrWg|ijq5$V)JlAqu6kKIpneg&k!_b@oyE0#hBo=^l^zBYFZM-N5(gsIhHLNE^d zR;kbfXFg;J}ctU$f$Jb!VwfkbASBM3wF!^e=W zYi2!SER2nVeI#BdO%ku@U0!et?GrATjwayJP!!l$Yn5(_BZO8pjMr6`t!vC5f77%w zcWjZ*s3whA$}ENi{DABEjYa$#i4YFX=c{l~foj_*xP)Fwh)w87$7?H+3j!b&aD;tt za{Ui^xK8&dk*PvhYn#2cLt|kNC=+zPredaD2R|#xo-_>=;sG^K_E7>79w!s!6JT@Z zn@W-i(I^}IH=K0@#X*#pzkcANvX0t zRJ$`^6WLwDIDx>VMZ)cn(|)kSgo&XCYP1Av(ALSje3 zDNi!beiEN3At;2c$^aFQy_3wON(Gj#%LiPZI**KsJjbyrKe*(JW4q?aEX2d|X3DV< zIOpM1N%oGEroK7|>Q?L1CXb;JY@UGWNDgGmuFym?Yh3msrl^+|yrww^mn!-ZU<&A-_&*jw7Ivx)9tsnRIESmR-hhBWiHunhlWRC@C%Pm zf~E$cOCPM?DO}}{y*ICYsZ_DrZEf;O$@~Y5RBpQi5b7`wa?NC#+)ezA+xQnGf z#$@^x$MmN$DYgm6T(0oV65b4r4ui|5fO=|_TwjZIF5>c%x&(^%Qh1wF2pz7@QC2!i zmBde-{Ex1UdW#>0a9d1KT%(c>(~`|yY;PW0*(|Z4J@+r5SZU~|okd8hJ@nAiG{PRG zn;+=k*TCqlDXnnPZ?54dnd@zBiCa%F9X|S^hdIpwWU^*qNa?F zk=b>g`z?;=D|ETp((?x>4;f+Edr$CuN%pFJ8eC_P(HIgJdptpya#ak`;oislha#KF zs1s5l{#&u3RTp6A&L=1G$)?>?%$i;bPdZ#hvX53{Mg5e(Zaq} ^CYZsZKE{ni2B zmLzx1g==CXyulaO7@WS(U=ryP15{kcXut87|DBUqo3bL-Wh_NVhXMmj*+Z0@70($b zXpmMf=$(e_XPV|o6qC!f@hoI&{nki37$i*B4d|!2!kaMN0vf`m zqBmdoya|YYi%}Qpkzd#>T5yM|++;uVN7#kVcwQQwI*QGXvF%9p6+by#*nr<}ZS#YR zq7StW8@)L>p;FpWyg#FfhaHo*tv*H<$PB%7QHIs@{ei}r= zcJmNv--_ZBRR8@79Br&BglpJagBICweU`$SRkb0_kZj z67=;NnJrbj+#2K-m^TSv{``_hm=fK@oc|RHF172i5i zUSaJU%6`9QGg*~EVA5d6I48?R#J!Wi){8_MPo-81e%Bi4lLt$%$&?OeN@#OPE$y1e z19vU0*S0gr>EaZhy|5NVo@BB zr22j>y+32k1ffoOZW8Gk8|di11>m{nXUHVBnmJ_J=ALSZ(-b7UBxmW0zV+l1#Qn>) zg#m!VsK`^3mn*e>Fl(zkxxeOaquN(a1HSKR_BN7V=jrr`XREfr-jZ^JkCP72dTEBtS>%TI-n^r zD0vIbO`43Gq_lpy3^q4~BG>qJ0*bEC;pw90y32DIXgZ`N^Cl$Ss?- zIzG8HjZ`BbxxD2Lq-kgNImYwoLFm+5o~7K}h)_C%!bNv_miW|NbwJmi|u{f9+Y%iN3x7D;57k%W7huuVUIM3Ge6ox!_ZSPGX(u0e4viq zBLoR$8#$FrspUsrw)=G>Lf_*e-eb_Zm6OE-%R?;3&AYj-J^Iaa#)of0XK%E(1pJ)W zP{lT5{dkG^hs01|lEgujQeZB2on>LX$&va?FyVP zkx4xcoJ%)wmXB-@;~g#oEq;^=!bv5(+%=$59Pi7OI{2N`mv2&=8ezze{Tn<%c-ZBR zpU{KrciS;?Ut<#IijXRlkY=z)I3O^Xs6+fyYWwYUf)Y50k#1@TcSsq=2U*A>-g9N$ zhrIfgLW(nhg8ivlGIae_q<~NNva?#dM%YHc`FHPjU`ILg)?;r)*D;0&Rjkv5En0|J ziig@=1>MI|S{oDH>>MJjVtK4|-5pe$XVQ(R%`y-fis4QC(a9o`I6j! zro?=Jp$cpxDNON--)&3afwM7`ijqB%CY+~pBfw$%#=Mtc%uKJtUuwjgvBh)w>LcCv z8d2hx!Y=P{ydpFw!?U0f*-pMG@OM;pKf%s2d*H+#c~{ z7g%>lnJ+PzQDp)zW*EdKjcRS!bvqi(Daj{8%XNLDqP~-g^>4LY;?P77zpHo7_V9qo zYWjwZ2aW}}&PwcHj>eM2bKI}LGITV;VA=qk58RksOJ{Qzgex`GvFGnpx67mFAA|5T z65w0Nq7nl=ia^M@%k2YOnJ#>EV(h>f8#TejH=!)n$#L-!VIPaaRV`cwgoS>x3XhTM zHWTsE`>V8M&R+VTee7TKRZHQ|$2K+evbbb}wR2}`0 zJL8f@EcIQ!a7IM~;?K+t!3WNj-V&jS#?zUgrPkg>%5R|1QEq-CKnt3)#(%$!GQl=Y zy1H7qEOGu8|7_AElkOlC8l41Ed`}KykLk}n`Vj3Ux>%m!(iy5$90BC1Q9GLN1dt8| z;zeB0Sdp@d?8{kx6CWeAj+#ttv*RNfyq#zt&L3v<#9afE>qoK6ESn%N$rnZJQc}%g zK!o{Z$n7s9++%sN$mPITgFtpzJw*bMB;3^1R%HO4!|6m|gj7wr=Nts6*U0VOQ%X&- zw;)}e4Q%7x3j3YSPp}ey#PQhfcfmMsX}lu3W$}#sofvg!0i^(}%l;yjP+PlX#>|1# zxxbEk7LWhp?b-ncnQO4Q2GVON1qkjJ85{TZ_q8A+)Oj$PPj4T&3A*5h$Ha|<-pxzH z<_V76LhUqu*4vLvm1ANA?84YilFN}?kCJ#fBM{-#FcLJG_+1aV6XkV_0>EcC$YHc>}C&NJ4c*eV8;M7^ZTrSEMwM#!z@bsj#NtyiFP#x z8W`KO8yn-!V{QBO7N+N?dMrzmAJ+;=(+o=M=)aNDiKX>lOec$~Cmp5D#-eTG5`9f4 z$_>xY6p!aJiEO5n14r&t&qz>~wa43Oax0WgtVo%aVo)B=C*^=*`8MBznl;QIFQ5;D ze*~{X1KVLyZL?T#x-+^VLJu{Z(J{L!2HN~vbh*PR=dsoGM68&J2$c$g;OtuMW#hZW z?Z{?uYy($6m;JR&nFH}YquRzr1?si}pRtn?4>iIC_L6shpq&L}Ww4u2Dr*9mhkUvl z&Vsd<03I_?S=b{1`CT_$hyyLhDfU9;?by8rDP8%_?uPs8Z#*yHlen2*8ZvKXZAgDr zPQmFXW3>FnlXV@5({6n&Gve`}f8ytwL#EtprzUt%eopKuLK%a_OJGKr_MIcOwl$KU z%)6hf<>M!`?UCu74&NRT40L?QskMY)^o|Oy0!Q`%8^?U_t9YlgMB{J;i zI)WGA@55x=8|w(ysD=(rif_;;Sbq$W<+lS%@u&lq8PA~R=@@~0K9d~(oNRB8nHn&H zBpC?-HA)t%Fvf*ww(DLj8L9}d`J8|k($B%u@Suu@_H4n66i*zQbbsF|X*OBKeet#{ zmSCHJN0@4{1kg1dZyiPe&Kl)U=(juf$)_9Ln-!br=#?G@$+sq@TWsFFyrZd_Gv5>` zo09Y4uh+fJ$*iY`y3F`j7a>)yM-hLgGO~Lh>rZF9CHq7?UWq)W`_O?EN5zVTMuTzr z5+7M%UjAAXr`F_RsUbLNbu}lOKd)Yl=~k@l9R99lqNCT8l+lIYxM0Aw)|7gRn_U?U zCQRLkztxceOlS)7-vWY4H-(`>bEW-xaLNP(X= zJlG9Q$iwdaz%)A3eKx||fL_;eKkSbr;Fquf`9%=PK@(3r&Sr`f;<-i_vVzK@_r0iv zlcIiZpC}%U^Ts%CD_ZoIL6z)sVr@LnY-@at>2U?z2VFG8WkX=BtTnetpVZNS zsZLr=4ax%yA|pb{yg9lOh5XFN7_P(`{P8Z^pMtcs6;x)}|#x9E!ZliGMv#ClKZ5I)ut^QRDeJHLAYU#E9R#12v8dm`$1`NT@BB>+GZDN?KlB}VUzEPajS}f6n(>G(qGa|xhGk^HVPK+6i*mnjZ z8KgRC;p4`lrdOH|#r~+Nu^Y?aOsNJ|o|W1eF|jhN+ERfIaF{kKrp(a+N)|+ZQX}{s zjm^ZAA>W2I)rpU@f(tBQCNK%GKs&UKJ4_`;$Ht<6CYGyZ1K-|tcoR1h_uHhb#ULp_ z>LK3Tr1$Q+1f6*a!$qLF$w85wWJaPLr=yYAnDx7Ss-AX=g~5J=Jxn}p5@L^%KjC3_ z$%U^PvS+{9;{1{@U|NFIcnj01Wb(s@%Dtizdh~O75sAJ%SLxB`Pr<_KE%;0kNJZZm zjMb_iqG(t48U`-U*2TLI7>I=9sd{^fL{G?+xZ_Dg@6Y(&~Ji)fN=k2>TT0ro| z{u4;Xv<)#SJLA}C^0IOLx{VHv7aCe8<&D1F0F<;VdS# z?4so9``^KItFvc<%4?0Sa<|PzY}QUDxrtDdl9Cv1O6L!2Vp8iyVx(56A370H*x)Mk z>(|z}6f2QB?a@7+qP~%@zxEM&U0_4#CuHJX30zlmVN$P%)bk&A;`x7o@q5{x3%CGd z^@syscAAh=VqNy~pN2Q8X*=jHs zFCfS*GdUUOjcHdt1wEj2zV0a*>MX|cBgT;Le~lE-bY-vC1yFAGz6n&3-d;gIKHVe+ zT}nRplLUatS#}4K7)TbXqE)Ha^4*%QksvH22=uqkqrb4^K+g{$Lo~Ltvtveec5*TS z^k`XK_LMzucgR@XPee-%h17nkS9~&L(xGuZFo6In@^t2zu2KI~)IgG%NKnN^2_!z% zA5I7Ks=5BVYwPP0IT83SYYeaD;gRBMrbHt|QAK-Q59a|iB0oKb&;t|C<7nSb+=!P< z)5(}?ki)`~=xVx0 z!0UEI4SW|BcmrXA-?iw2o+kfdMFp7Bi`@OB>|2<9nE5dcK@qrFZcAZKs|FuF|smmzgy zQNImHh!k7)2*DCV?E^HvM}(elb5>jdDKn1U+yu^8p2K=jfztr}?fu#_o*TQV=FFS- zwC`-Rwhl4Ew@cUlVw|MLFXIFq1$i?Qu*A|8dA+Y}Ja1zCN7@J6i%oD<<8}=@^@`*l z^;QA3N+S~?qPuCutFnxKr zFk)SWmRR;|-+j_jaCQjVy-olG;)j8v;J^Uz9vNb3Bq1f6S2G|BUL9|hK+ib|RPjih z0e0wcq0;=cy!NDV=5sh-;mvECBMKsnts?yg~~RC z*~?>!qT`>EE5;NZtNg?;0Qt;m3)KC$N?XmAko}`Qe097$0)WF_RN$tg;4q>RlQ&gw zWeHGK^8KZ6=$~H3aiX)toNQV503Wzy zxis_LQOiRO-y(E~L91HWrq!ZCr@;iM&fMUTeEAQ#jz`=X_uqcA`t>Ip!hc*&@lOQt zm--5=8rd`kN~LNzID7^faS?Tns!X?PR#w_W9?*LefSSN~HYT=De1XUhkW@ELz}~d$ zdUW%Ayj(!p1Il|aP%86rPCM@_0oD?6bb$+H?M@%hi`c(~KnGzmWx>?xUkQr3_n?|V z@W$O`dVykQgh?FzdFH4X>vzXr^_DZxs5~LmaWHVWm3UPqpIK z>im*O{3Wn)`;2UueWlY5WTW=Wzz?$iYKEg;QSl=zrAYQmvOA< z6&JW>Zqx!H^NqHAu3nZ=0*cnnR&$aZf@L4JrB=-f2IE!AG%DWYjb#fN{%UanBK(E{ zfE(^%YwKoa53B`{8czLKQoqPC{t|7t*D4hr!I+jRQ z(4{d|;6;C*k{!v8V1E0Lh1z5ZcwZ&dnvR5A18>3(04!dkY+j$Pb*(Qk>NMI|CbK$j ziZ8Z%I``Oj{m*Q1S~fnZpX#M=BluW> z8ZcbHcYlfjP_GlfTpWzcW;O~$xsag4r3UMf&zkxTL+P#uv$25YP{{rB?S%g}a16Uf z9<}z@2Z{h`-Q*j)LG26o!}(*N!kFzyW2twF++1dvv!WMF5+?a4${;ym!}l6W1=Rf3 zB+xZk>zjbkiA*l`rc~?9#y%Q$oU|Sm_}s!aZo{auv`h%Q_WxoE0pEo2I4a%HdIq%d z^OjEvY?f*rw+5@XsD|WG2{`3}NT?hFYz>UhZic%(+Jhjyo{2kga(;Pjx%083qodU` z)HCvndPS;*vadqE^3&KsTRA47IU<2?^Vu@%MT6gvMZYEYe)$(8xl(%>htE@g=^IqO zRTuLIj!NlH=l6teA}4pF{X|l6Hf}LNFCiZ6qmbkicm}4x&c@joHpKi18WDlYg^K;^ z*VeyMh;BcSu<9ULA^X-SDtnP(E#(=Q+0EWLX2 zm&wNArK%Ju#nzU<*{v?$}CN zK~g>cJ!`MOChY$wDy12lkx3Hl5^myc5KQ{VWOxsfzv2t047Ueq_$NrBrOPBU>WePn zW_z%`g)+;3(DtAB%QXF;8KHhZBVv6qlq`gdpl|*DnuOGP!DJ+OSD?|`4*9`rJ9i3j zP)VN4SvHhu(?57@=q#8lis%229*U6~k3dlS{t5Z%cp#grUdFs+eYZlNtKqjT$G@Af zi_R`{l0fUWcVjdjjt}o4s zz^x@ilO!A@lY|18i;ok693qHUk7e|Row?#*fMJ9N$M7sj&vyp}kV?go&6kW6)xpj1 zV8gV%nB1dgF^>LsDh-5bW%?H~c%T9cHC|gC8X&g`F(U+yk~PPLp1+i;*W?l|J(;y^ z_fnaT4mq0)MIF7!b8>Pr9lS7w9ae zTbxhZIDeT3xL?SSD@$TDOep<6Zg1xw9ve$MIIQz z*{$HBXBc++w2Y>(=zm~LE3-ykkiJBX=y+4>btze@%_gQ(Z^3Z9?EYF3NKSzk^od+I z0NMqh)rArja@=q;>Mx&`(uPd-uHFA?n4Mxd5AMlf`YeUmTd3qyZgi&Gm|t0_sHCcS z&N(mRn;WbblClJ%UpoS%0`FE$?@B8uOk5@2;MRQAY0Vdo=X|K&8muoAwY z0VYp>TwgQ7C5t?~kf~i<*9b;dNGSX;m~?^#=0VaoT~x^GWI-N`8;u*m-xvCcXtigr zzYSP_{_o7c8%vrfpdeDLHY)1Pm-QM-OoS;QE96eUvjl|X9W4MlzWUvWw@bWYC@n+I zuN*n1C8H^gc|`2H02fj`;|e!{jBi72(^72(e8T-cT~|g=)Gi zq#+U?Xd7Z_H2(Ljza<{ifx@zbWS@)*QhNqVvY>TXVg$#1-F?jcb(FEOVi|a#{<G8 zittMo-~~l|a5@e#_DVf1<@tArF?wAvz+K;O^D;J81eM1xorKTr|Fm`G@lbc)|G`w0 zWr!?^nIW`T8WG9X*d^IzQrV)IVQ7pYkDakgQTC8FTVxx1_I;@y`-l-nAzRt`-hSWb z_xn9O&FlU#|IFv!dp_r$bI*IuIq#gx#ieEBg9fp?F$SuMcOJ)`o@{IJ*CQ8tOkAD8 zd6>!1UgT#whdqtst&xX#RAc13kdYg>8(~uN$@CxYpK}r`uCX zceJZFR&3UZ?J2Ifc2`%eEK2mVfz%{PNy(Nt&FZ|fM0{dW4II!wID$^e9F)CgYbFC9i#9}CKio+1c}7;0-C4tIOLTnc)zH?I^f^-Sou z0*`XIOV)NST~8d(P$0*-o$wnWhYH0M;z+9YO>m&8L9<%r9*frU<4@_9uJxz5YP@TZ zeYoywnaa!qM{Z5%*LkY-2^O+~*sFne*7-L#dng-y=ygnxVS`x1xs=EF;5$6np(5>a z{5O#cC4;GNt+YHXk?n1|Rm*m@237S_GQ*|LY{v&pczg>E5n!eU)@1BWhVv|i>_fAS#q8ddr-L>)Epjk1$ z-75L7@N0KV2`|nWn%w2tdB*OLs0Tfu{!^XvMg~J2V}NgfKaW2%M72rW0M?{7Lgnj+ z;82<@VIpEATEyYrKQfpjE@BxFVhBchiL;#_SoDNg+gIE=%*Yhu{`-{ETI`=6C6`Z+ zz8ihHzPyyWY{?F#`+Kzbs+`4{_sXcf9p}Ri7B-Ihd{;a^tkm~b`<6{lE3xFAi;au$ zVBuFPbhW4dVNs?)m@gw~?=VhKapK~}vv}*y_#ZfuWB)F(ugrZaFTI9te!S(_CT6FS zJU*LV5pwv-MBsg{|DE{n75H^9H5>~mb7wrye~G}#?X%&mP}4iwVP;|#xUB!`j;`{y z7@RfXQEmQ9{s~Hp4;tI9N`=B0Uk|u511RR zQeZR)t#7;jT4T=-!E>lkEi`|*R6}w2#So{}k()&y6Q}H*bzj$9>#V$IT6!my>)0`o zY63~>&L#SOs2S!8 z^zPNG7U0CWk#*+Udv^<4WdvfD=op}COPo4Yu!ST@=6n83LA{m9F`8Y7d#i<4;0lHC83QnaQqTz%IayX@S@UO=A}Akpr{MduoRZ!w0cjM9=p_bL z0_4G1?@TUgY@O#<(R|jNF`OO&`^Rv(|LZjoO&WM59^ZEKjMCazz?ij3zQE=W?^i|n-O9U_xt0}u^A+EfvJEmd+CVC$ zHy--%yeA0C^w3Bns$te%rBpcxK^kf@N)r~!WY4yg*4<)_mK&*_zJTLNk&JR=JhP#f zrld&f44Zv%HRayRRL?gjyk-kzJr-^cm*FzK7aO_z(orEXfGpL`>+9>6m)Ak6{=Fb~Zq|O7*v!JOrQ!`|wH5;$#6vAccE={1c5|9Gz5{;p* zWciQtfLWub`ev19iA{4IwcktpD^wdmpnT7wK!t&SdVOM9>s z>J6xu-RH^E!Crw~_G#Y7+SDAp!St8OAo-{7rGDDAt3Lx;JOQC=B>50CPfSiLyxY9{ zjY+oum?yw2A~A2{a&ds9jR_t~O1(sMeadFWX5Vwv&xH{?;m_hn0J`RdC!cYgA1cnq z13(~^IF+DA0py1)8fE#yU8wT#w#o;tU%utnu;PU@ql$oD1{eOh&(^p;Q5mAfIHU zlANpM4K$&kWxQ9&CcwI+PL$Vij)y+d`ubP48T9G6hVmYh0i&1K{bfLt)D9r9$5tU`GoS}R z?q{RUW6ruh*&vBD>`z4p>Asx{B%!ILS^D07!|S`Y-QD0u=4#zMd>%U@$BFxcTin7H z=uHv3%E3~$Ma{Z&Ar|1oZ^%b$=}aDU#j?`{RrCRS-cx;AxeO~@LZG%hG|XimUj-?| zLm;$)4t)s8&Ez`z&`_};x6e||XR05>1Hurp^0Fg{vML>-6@NZmBt`+X=p#k&aU}uJ zh}B;9Xr#&ix!3=GY0;)HXn;FYsmukzQeVP!|H?EIOj^_XIf`F;vkgl1QZD=QOpVXN z-OeeS-O??WzWL|+WXOH;mss7pcOc<3uvR>|7_|Uv88Ec~AWz_g26~Ec2s>8m7ZNee zCKujJRQGRu{)o{ZRm<}M0O!`AS6H#JW3n7B13;eLfZB|214a+vM!vRQ!N3#K+}a?& zvJ~7xHM6sg@629JxfoRPi@1t08Rf!J6)f)evBD}DJ%&U!oD`0CAhg(Re17|U_ljt6J%6}rbME{{o< zMN3w%Wn@_hu?jibwcCS$*B{;LU3mYa-i4r5Ve1*0{u_^SwA<{ZsLOm#-s)={6#S=G z25=sh6VBwx_=Qr0ir#56nMvU{oO|yca@LSHaVf`j&-gQU;@a0#6zqp}baXEJ9`&z) z=|IJfv}btjIR3n{dFL8Sw$VhXbQo^*)zBxyZ(ciTPAxrF?<1)~vt>6KSu=1Pkb}L! z*ItXQP&=l*U1%*p>Z_Us`N`WtPnqc~M)blJ&7a%YISOlab)_gUxI}#E4%a{G#FCy-m-qX{e_fmN?b0x zlGC>w9qI{buHcS<+xcS5#HfYGQh|Yc=l!DOSJapueopN`)QOB*3Ow)+c7;~ZBBuoO zSr}Z1W*tcNHsK~6YD;d_KEpPUlY2n6`mD7WT5kP3L&BnE1hVwV@mofD<5Nk&J2;RN zl+-8mj|;*7xGDqsyUu5_q|v(T#&pw50~R`W!mV+*f`Y~#Y4nv{C7Z;#fwK2Iz0%#~ zf@z?hC}ycVmO<8|_s?O2OuucrFSjD4y~_+!<@=l54Ix5TGb%{r)2u{QIk;`yKJ%ys zd&2ZsSvbB)$6NQMLI_Ews{%e!7Rw%u(u^paY=1IbOPDra-tv}xK~eoSD%@?sY5dN@ zu6nXOHlQS9&37Avmq8a|YVeyj`V|XomH{wZG-8l~N=9)98ca(?+6gYc;0V^_CyVS; zFp?@P-mT@*ya313;4v|9O)LQib4-8=<9HT)h1j2GncmU-r9a0Ut3F(6VW2%oY(O3= zv{J=no7MOgfAvUQBtn#q`zdJk@FKBrL7VA>is0(1Pu&7d9?p37y}AtS3<*k_<40<& zjKKXnOA|RdA0z2w#nZ~{ZMzU0PY-B5^c;dOyvkIbK8#~x_k-xF@|U}5-Qo^Xx+&o- z8YyLJu=kv+KwMK*oN%-{WcdDql2)+5YIdhyYe-PCH_P4nzBH4aL#0 z@QmJW9*v;%eE9S!N56fB91$}Cu?|Q1ND|oCJ)Xa!p(^40@}i_~A#y9rB$5`#_Kfav z2&^mJJ`fQFU!CXMFg0YV_toF;4-he2^t_=cZU+e{?g%^XlIX+|d7*yW4M!W+xvQV_ z%J=yRUIUW(%G`4*>!__v^tJ;VMiB$EcvjN0mz4N^quAA7C#!cqm3Z%Qi4Q z90k@FKM&iGHJrEPK_Gb1by7NLyZ;ZB{e;xI)f0;?Pu zrZrSCOv~t@s0;Q_e=^xGPc@hVq!7VMX?62!sX8Lx@GQLQmDK&|7^R{{uKVm-@}A!@ z54L72%QgP}YF+3HYTRfH#+8T-ErDXrkeH4tbL&~RRnOmic|c(vKa-m0p~+yAOy^n;=<<_%W&6S#s9cnQ8! znvL*PVWzszax}C38;auetCwJn{uJ&Y@b8J&AZnqMSAi3_LybeH{uC^x5wxZ!Po%)lJdJ69xti z@4p8u%+~^97#JxSB{^wbKiIQE z;>se(MFq^WXjtMD3GmsNK7PH~rY_CF_Pg)nx>D-yYo9*y5PN<+JKyr0Z|j>sbG5hk zvA6du5Rl&7)gG=4Q-RHa<3Kb8bmO}*|IZJ?*~+Bm*jQHCSN?yi(6491HEQp}jr{*s z!`Ujs>VP)Pk6WW=|JP6c9~)V?MDYKwx&O6h!;I)*?|9;}_kUKYv49HOLm zd0Tvk8VqmOhM`C|mT;GV01TLwSj#q_|FeU_4EPIETGNy>owsOadiSyT`bu(awnnG= z?DX2CN%$l@tW+juUolkl(sL-55V3!c`q4z>D*Z+B-uJF5%xXAK{-F2M3illNs7osw9({j2inTCGgRhxD_r!p-eD# ze6?^MkVB=t=T7I1!{^JPoOm4u!%x;bjY6o?nI>fk1nE`D`BU_Ty((QUTXZxuG+Mh- z4{nIR7KT$HnTHRW1x}!LC7;V}_E;>+?BA6tDmB`bbfIrA@l!Pr{3Gs!#D$W%$6Lb5 zaiSxf=^V}+#$s}&p_xZLWx>Skege5sl|R6TNIO+fj*ib;fa0N{de8G+h{q{=mr{$> z7+VtzX+okVh3A|~r5uU8ve^$?6lN`^_BUU59OLu67v}=FNET{zd`9(xG(lu0=P=f=|*20L_SeLfukKqP$nh%_RquN|-OnCxYj z)kHicSG(_}xxfFR+v81}My2jX1g@vM1hb)V(;;WNf;xp-+CiYk)ZAk%-Q97fhu@Zx z|Jlg+(Ta&{M_VZqJ&PMtwoIx=>AQRaDpv%qjE$A`daEtu&o@CE(2b)9_u=XQ@{kFCSl42q zocx@i^tc^O&jv}#(}xI(HoxW8leK8vaVF%X?vS!OL$}{lSSzG-P4Ugi=RJ7)zpKf> z#pm1ppqU4Q(F7mO>g89ie>ZUPBRT|`bUR{yG#-_RX!{$1*s7z+`L%lZr|Unn9+#1? zHosawYTu@eYPXt6$Fo_t<0=SI_-c2~t#Oh{iqnl<=Kxn@hNv+<@PUOCnIy>48T0xKVR;jeXjHjv4CZOF`aZtxpm=e)kcwtyX^ZORO7;5|6vQb%+0y3mU(6 z3qh1dOP=wH;3NwAZRd-`JuczzCpfQGig==)YEWya3M zHjSsRGfV{WnUq&#nfUTIWm_=F79P01e0tnOYkxGRKY zs9|N9IxtB2fef-!%9X6;IOJ~?XIilqu;+bU{5{ln`X`a z&hbXOSAxx=)D=cVo5&@y_-%A2?~i>*f$?#`9_a*DsZe&y=Tut6r2*9*7@L1sn~_Q1vbYNDSTumR!V>PI&5zTjsU;GfBXGKG#!fEyFmaAH zwv#+lHZWxhk-t{yQiUT^)Me;92=?^M&ZBaf-V3ZL6|Hps-p(5tMZiER03>YEOY<;G ztf(0cBqGmtG-$|hT&yx0L)e2IlZ#f;(_RGD1bS-fNckfQ$t+O0>?R(Dd}l56{KE}k+TF?AjA+wKXzJj@g{P#$f^K24^; zn^cU>B39mfjHc^X)Mm6%@clX6x&CZB8|bdx@q!4so6yp(RL=TAqQ&TmLeJ%ZjB3p}S*Gma z*DsAqG)e!Q2y@-s)1!inAd%8C$N&OjO+}xxHR?i+6DqAOPvtFSm8v=y(GYPeG>T1* zOolfq{x2)-qme8O36Wm5XA<~c_-+t%aJkq)wy0}%;n8~*2#ZL=w@`z=AiJ1zgDG|A z3V3s}r~vsh`vnoR`EZ|U%LZd25=?lAMw3atOT)pT-h0z;xX(JSe4R60Sx3<_ojSo8 zEI4UT8BEP?+^(prT9E5Dp5v7+phKzMGg_0zwbJgiAp;6AGY9C;KrTt2 zZSLBOD`lbq`n=ve$VixiHdugfZl&Fp*drHRt6AdorBfj_S8l+=-m-Xk>R zGhj&YWoF=IFQ@y$0s)>uE()d?9nlADrk3;I{mF6Lk@!6A_jO#7+P`1t@0Bl%=ntpo z2RpZcOPe(K_Dwu;-<;l>Q>eihV+$GdDu+|29621kwN+Dm<3y;Pvmj2=nBuf`$=URY zi3zz7xRlMSF`}c^J4rpof^OnRkm6&=IdRRq%oBm$_UG#*q4g^t%)F;p4MM!!dtv*Z zvPvkkApIMSletMwzbHsNk$I4`-TABj2lUA`Ls@nWGSxK7ji!tf$+`AcCi4wq&t05} ziqf)gF`FVKjL}i_?H>~fCzA!YnPwHTvH>~rm9?-&pX-90flHM7s$b34bfF>Fb|fYO zo>sebT(a56b5a!`M1YHghoABKe6O-xt9RrTDdfn-$Djv5^=31clmVr%Gs+f)sK2>x z66r5Qqt>*R+uHtl?)HDKf(h0s?-UFQd$qv|o~VtY1Q?L<(yaeY7%3V1KSulUzcv%#A7+a%|+h2s7-c~%~;J?QG=W1JOp zA8?p~OGMaepPwe#ujr3>6swHFTq!47he?NmicZCZCm!IN z<3nkXZG}O*xam^~BFu=H&(2T@@a~XFjtGD0nBlYkCxOica3E#=Uh>Md{x9%38CQ>1 z=AU@_ol%k)qNmE`&gW@fPBD6mkVo4xO+cm_a!MF~!+eBST8Ua5m&aTh)AC@+A!`vN zA$NXewzvm!2n4JWWVGN7cAhr{jX1vpnQ{i+s>KOp+qvtU{d8GA^)o-R%5}}sw*8q= zj-Umf#_U#vO%W%c`JpM>j@0^Bx^)DF)Od{zA|9Q_TNw9Ke-UC7mr+8wMQe$m!sCLa zXm4;UYIi>MVcw=Y=c(>mW!@HUQZ2sLDeEugVX}<{Fvw^#f5I+p(KR9GJ5=h~y(rTz zvz2yrF5JIO$oZ&JQ}k#HUa-f5l@^{V(4H9{pq^%uvS03mBVwvWsh$?#NT1u$SMeQi zdtDFGCGr3BSZ%Y~XeMBhn_)$xn=99g3LL|d2?a$6DC9;_s^;_RC3IY;E)1XU{qelp ze2-F^g8KqI?+hh`AI2K{LMk$jufLH(A<7cX38qX@Zs(v1eG%WW!mp2@ZLTwfe6Q;^ zddP!o$I$Yh*5IvH{*?%MXu#jhe1F`43xo(-6Iv(kj~sD^(6(9dlr{Zkn7fLJ7uVCA6I8{f2MG-BPuiY3`mFLCNnz zw+g2hoWhwX^qNtCZhtIB4li)Yxj7lPYON%3)}Dc_e!au>3uI4$(l8uuWK~azDVJRVA;j3UzcIZNt5Pk90o11^kE==gi~GVm5TH_v`i5Vf%{Q2v{S1 z+S{Wf7LJ~SW`4RZ@moap{&9d0K-7?$NYaQ{xCB&CjAAh|(R-sAvsTCL(JWq|MihX8 zhoj~>>yTtx6&Z&Z5eN3>*utKKsOhckHTfiM=WEz-6bvHw65n!ChYVO;?Rmj{1ipLt zZnZc!t~O_9#^dKy3O&?p+vhBBCaYfkIXc-}ZlckcS(qsJfR;wC@W$|JzE z;|96Y$*|nI3kn9nK~g`&;+rP5aQ{KBg$1VIO>`LmG>HfPhtFsFwr8MiFT{JqK4tOu z-X7tH&WS|qs7a;ApSe%t>|;gQ=T@VnW37Kg&P$zTcPgOo_zGCEzcdQ~NUo zGBoEuU|PMFahVOrLwSw6_Y)gQ91Sdq#R{r|cSkY8tHYoTpkd5d=&n~%8ZK-jBBU*4;97<&A+QBFZ?HfR$b6h2z=Hnco%N|@yT!!g_ z!T}I+hZPw9SW1J4c=Wq?4?K5}-Nogig_PdP|J24N*p#Fd_EsAd(JmK{>)}=Gp0uZ+ zdR7!_$ni~gD}~dN8sA?|k%=B8?MQXrjS|!6dzlTM5F2%h9D+k;rx5qj)%0tx*F=aJ z`kR&YDCAzMN^W`vYMW+jV-)A&Ex(=7;mZ*RVtv$eSFH7(aCp18J}HNB9)>C9uCj68 zTe1m_{=T8E*lnb4U$ZSt)pLHq39t{A0A+)U^I+MvdZW&s&P#S!y3VU*e>eMBPje3A z*k<%n3o)1EQt1ho=&CXcH?M@UI$Xu#)k8=L2yDlJhcEO(%$^~uQ}JjFtaYpP92>Kx z({F%2JIeIlj#DEshazyLCV;vcxZwT?cP3%~j$jcG8bc=RNqn7J;PaDQ+8~-@_;1W^ zSr`wB?Vom{Zbe0*6N8UgU2~3JWDo{zOxrqaJX2la(9Z`ILm{q)b@5KcJ`=8TYa;CG z|CU906*Xg!awwVTWoVc0mfBrz16sWkLfw8)4{X>|qy>1N8V#1>qh)2^ zWx(w}rGMm~gGPsN197zx?~qlc^AKd1(rxd)Kc`Hd=#U^s4bEPzW#VQtcCo8uQL-D5 z#BQHf8EL>ela3)c?#X<7>8+SDq1bZ&xuRoWqp6OcS>b|$3E*?tH0)H};=GFGkw8=H zb9sP-@i{NBO-{RuFCit+arza#m8iuRRBi2*rL;cA3JG^pAMhU3$zNszY+03|9d+ie z{IsaXWZ}7a{$#vaDEZCCT5HHmh@)|nKMlF*V1{Bs$xGQQ!CT2y1R6)$w>SqQ$q9vstVLRuyLJXWLb_q>s;&=OiE# z4~Io}qsI%1lMwfLxlgpd7n|`@8rg&0CfynUp~nQPGUvKZ$o#Zuutd&RN#8k1CWV#4 z3_^BeIta%RW5ts=zkkm?LJZs#qvFZ;A7HTCiRYL%WL`zo_W1Hd@Se;fG#)r%q5qZt zi=A5Zp2DGqQ9>*Z)8 z7%Jx0O3dNc1=wHFY5dn)SW^@YQ^ewx2F*n(AyoW~roZuUG%xIF(!R>q=(JR}J#g`6ym8~%AGo`^nF1LAmC0!hQ zckkw^Iq*1H>FLvLG+qCrvTYgPs=Kz^M;eg`2NI}AK;v?EujXiV-F7z;3~}vc4RX@x zpBeOTFa@3SOq=W)O<5FlyFIG(DxKXU6JWJtog)$|p3Fnrri?Efm$YpzhP3(BKPzjT zY~uW11t%uUs}G;*T$`0K{Xdfc%b}$Xg-p=~zvG&&KuE_9r$8`__2Rwz?{@b^x?wn> zQn)xePQo%t7Ts@DJ^SBnR*T6C_tqymvp%2;Sc6!AID zO&j^!3?z3t^?lw8ebC9a17xbaOqdN!1EO{7^x56PpTac5d3HI^2><#5m|_&;+c|Mc z!$=nT{jWbr@5!(8-^Zzb9x}yMf9a}bs+=MN1(}ANJvA+T(KIkNT^s$QP{iVt_PK}o z=;)2o5PF9Wn+(G{SDx#4yM5|8irF|!WfaJp)NDhhO5R%=^veSsSU9c@HkgGYS|^BV zPgMj7B48mF_-P)cca}*WPv^|bi@7g<(~>0=8`R4l7|_8@sh(kNp32+sjU;<#ocZIv z8zyW$y&E1B4=g>3#$}LEX4Y~`tL2JV(5%-?s(%)FrtL(f6n4e^EjHK|AcAW zxP4qT@)6dVl1thK=Hypp&I>Q!!Sopl5$RB3R%2lwsJ}2WCY#iBNl5bZ!6EEazh16Q z$v3^a?9HI-S)PNh%Z^_MKy81URnL2rK^@MV$)tiUk%;h!{&*d1NNE3dW6soI$8gS- zb;Dt>OwWu=yIcyFYPD3R@R>O!+uscYLWbt6g;(PvQCt5$I`cHFemGgHYMZDbgKW4C z8Ih%T?JGuOFiK8t1JMgXtPBpMAf=X4IES$Wf}^L?rkoz%rpykDbG6$def`bqdvjWU zTnMS^vO2z$9=5l?ikp?*(%D9(`yHw1GAqQw>H$b^VkeuLSAM4lxIx*=d`Nyq?-;zU;PS@l(Gb!%w! zSoE^~MtrpW)xF&Mw?oatav4>)`y|)xZNj{KVqeMe1Z>v&Q0c6FBS*r^{9EVqO1r>8>}QXlf^Imb?fPD_E!IEJ1G8>{PGt1J~72D zN@o$jY44Ya>{=2?M44ULbjAl6<8WN{&mQ(VcX!ckUFzeIFTjfw>+FA=t@2_@FB>4JHN;Wy? z+*(`FhZuDOr*-stbK}(+<{d(}@oaOlx(}_BA*jGiO_VqBdTKOgxbhM$2S4o(2`DHm z#GZvNg@F?ftm7)HlgpKsNBxeqCxf9W6P)+;InNh;8p=5w&i3+N`anqBe>o=%)yhMa ziNj^^O`k;I1rarIik{zvv#5$_Sl4>R`%9(yYwz#vnXkNtRB<9C2)2FHUzkeTV~G|x zZVW(iBjKWsWh(j;-rPyx|2NUD!Pe z5(e`RU(7IzQ>QBFY^%>;rQV3PMe^s=kZ7lh-?kewg_-HUaUf86memLmVI*SWWajWA zf~=~Wqfr9wn8(vL)wARn-EUsIiyJ5^g@fO1iE`r%iWbn`mU$k*!p^{@^0ZjR|9#_*{JqL>9)t}xjl^l3#`U1~5fC@U|=Bfu$q zSZYaXowqlgDVDUL2z_yX{8!gh5&Hh-v2L5Av@;l7>3zClgoFi>edh3hvBtn72BQ|v zlz3Ri#H0Hm9$>$}J{+X#pA4Jibn?%$hEhdalz?5fdi6jZU+mqh#b|&(zor>7mzMsL z*JHIUDQV$H#Ggr`6l7&wjD4dY`;aeP;v|$>N!&TN$vL#yp%vhB>PSM?uNmW%LQUuu7{cC>3>8>l#WDN4>4@DnN(#R&*K)8h8Lp3^fCsKafo=_ zwp-G!;R+fI4qR_{%p{Odyejxv^-BtQEPWq&dqh=WlqtACO0;e`jtllDtKK)dKA#W1 zLq{xA(X*WGc*d59h;rjN+B69>p7Y|2$shPz=WE>}7u*M}9iVpy1j>_4+7|~iPzK+m zo1OgRh5<e%Jbc zsWGmsEkr~{vy8$o7}fy}mE3gqBi1G)kqXY`&I@Fh%El^NlqWesG2f!lH-G=5EQ6>4 zB2{duE-B<@sZjd=e0mg18KQVo^3(eWubnoSjG`>l2#o%d+D&bP6_eN%sIe;$^Z?%D zaorX>S*|m{9i!8IkW*B>`eI!NqgdOfj*iX8GhL&WFVNP;9xC208#4H5;OyZxY#ale zd$gNp|5W+K=43JO==Vj=RFQaKJ#(%O4y-CiCW=>=MzH@B3^B1+8ZwcboAkK-aEYhu z%Trj(wW;GkRrC6SPMnFqE+NJigXFCtI}xy+_W7Q?nYV0L#k~|=-o13~2DtO+g~9Oj zdcU(le=p+wZ*2sAv1SGxCH{jWma{KNg+yX{k1Gg>(L6jHW~2r^`VH8VGX%WGo8#gY zPw3quK@V~Ou|0~0I+LD8g)QV*u47=S*opb7%20>qWxtK23TE4HAhrDV$a|P#v z$A~P})-~|y+$wS{?MTtnaYxYlk4Dir+gBfFZ_7R=GFT}^Ys~O@{UIt<9_JYvR1}y? z3YmN>6x{av-uRiu>+Wn_he0*ha)V}}NuF}`D!P*fn$-nc-4EmwQ4y4&9uUQ;{OS`k z1mv{X4q?=L`!1e9Pava^L`?0w?Qc&*sbl((*d_L`8+j)l@_M@}hie!6#Uk{z;{{4k zcFyH4Ng4&_)4wcp}g-z-ImKq1h`!8hZE^?WMOF8GAD<# z`GPrHeZJE#Lw_b$W;ZG{h1>G#zZrey%$VkAvnJ(W^iUl0$f~ft)>pEd%-dQBNB$#g zgG4}0Y&!gH?w-r~C%R}IfAEu6o~UnQ5`{PjfX~)-&(CWn9&pp#Ch=B*SD$=mpM{Q& z?f4GmlcuYSLs$g2yfO!3tVX=SuqE^fvPNewRD{Q;0R|=1Orh1 zU(ONga=wwnXxRMw-)ZEHP>6nlg%ZW{L&)P1QDPw!YuEhB=V*n9{mR*<&6q-L4a^B6 z8k>WjN4A*Z^b1=AUy=R@EE1(PHIyAc^GGIP&tB(WkQ`5?bVWqNC$Revcz@9l`b=M`5h)8Qo{$?m z%CX?^-SW;c!zkTybOn9=CIdgurHJh)3o=7SC7^M%f3v2Nn?R;~k#_^z=RZeduXEu& zfT2tTn`G4!lu7Jhz?q5R$w!g=6vKxt?T)~KO&15oqv?i~eahFpsv1Sgb=yaIg9*Xc z5lImm#~rhv+UCriemJQvK7^Vk)Q00L6mb4ZAj6&NxY5~A2$pig@^XU^5=dp`grPnp zzQuHfk^aLfD(FWP3xoAT>(-=*#GdkoE_Z>?9?3MtM7;i5GSU;v3fbVUM=bL?qYKP)+39w@*K~e8{y20lit7n02Wt1$`6#~7B zS~y65ySaR`AI6hNXxA8M<#4+d-VM(2Hr&V^upjWTr>L;`wB$AApdtr5>9I4RT`KJ= z)+oOV+y%sNyl|`byh%#|v~V~ik1g-j^&{nZji=*UtB8Pano@ndx!9j55V)i(4i+zV zqu;d*TP%KJv;(TpKUm7RmrhSiVKb{-U*GSBM<5({#+m`0 zi`Hc1*bRNE$HN2DFowd??nXSjlg}~)Fi6hbhoO=MDqalSn;W%QMVoz``uw$o&a&0} z`MgU-v<;`wXZfnQ^N*mjQa%eIP@}}nnkUJhvU|_Gx%|B8C0zHt>)6>WutDqv{9RvI zS{*eSn}?O24~d*890MaO&&>(#WZ28g&el6-vnT4O)aWaENNqC=iY+EeiJTb;EWY)4 zN=E`FX+w8TZUOfrDcKB7@vVsmA7n%T6)6CTfO^Cjyq8R-_dB6OwscSEHKcM& zL1?MU<;h|3maq~}A4c|#zZ-JL&(uqi;*P*ND=(3-JxMyBlhL(r86^iiomArqeqWrs zkwyY&)8y`obW>|=DTl`fFJq2~tjiEB%V0u}4B(_B58T~srWNhz|KO#xs z_QSLbu5WFvEf<5`3?muCvqs+W7u?Q!9%ikBMWGnkcV}7fT7_`UkIrNcdLJYU8naOW z?YyOwq&|f}OYk$|GV7!%{3#Pf0$R&Rac%GjG*vQc`INlwv8q$+KB9HQ(5i-Tk4(w2 z8j2OeTMO|RHzSdI4$DE@6~-yY&`xIZc+Vw>-PGQFAqYnp6;H*Gc+24Tq$So0ATG)t z85c2~F3K{!kD0nRpcp{r?2rpHGg6cL*Mo(5)!R}lW~9wUQ5p6xpiq(D@?V4uoV9Jt zdy42qb6fFKs9oi5M0j{_7#b$6OiTCeiV5EAOTqNn=67B$A`l_NNFNi&FzghIOhQ!x z+65vJ7d8=*y)iyf3R?@U;WrWss#$gil}!aJkvLI#Hy8@tFx~Ks(@m8i$Ry+WAf8*T z^VD!jB!b^=ZznG+1Cg0m`Ylhkk&r|&#MV1PiC$knl71KqMZF-5dJKB1k-T?G_90-)YES0|$i z+dX+ZX%&rjkN74-54W_kd2UIoh+K~giaFq$Raw8(RcU`DP;poC{9dFg4R<7JKWZmv zH8Ar2e9@vUO-C6h%mnlI2MXEe#*uo&1VGK44Gem2oPZdW)Gk%6l}s%h^JTLG_js>!Kw1sn5#>7zK3Vy+V;%#9$Ava2wTVLuf*8I9qGZ)crr zXH%Jn>+Koo3RqYHU09Cl9EL{V5w-VN$@nKi2*wmVOdlHKpIxb#!;~cTh5Vwfm()Rj z5->pK{z$724dKUoc#}E{72fSHAfhVk3?g)Q72b&<*>!humtvD$pc@(maX z81PlT%AhjFJe=b*3@5*>=w4E*Q4q2jQknI7wE*}TzPq5f030?lM-5PR<4b~KExZHiUuY5`r0e`9cKL4pB1jinxKljL+@JQ%! z9b4eEZxzYi*mfhk~=U4$T#{$yj({!o&Cv7CNtuNe6wGgKZILK8IY$ zG;mC#RlK8Y4;m!Mq~6@{R@%%c@y$EhhBbv zH80Wq^(O(5YuFM-Uhxx!DeXt<5!K0_7E$8utp-_bz``4XA53~lP~@d)xf;I^BH~~m zT5enfCD2{aMg!2?o5+i2{~Q>IH>L#vKJSFS2lsuBdDez|8PWO7`Sf7=bykBBy_cV$QiI@K%(03k&XCFVy5~~rt9`r?N{QH z+rqmirL3s-yg@1prmX;G9G;iY!2B$cVPKZ2x$fGomZJK&7NwN-doOo-M>G6gk?+tCw*6DWu|5q5*hkN_~csGJ?tp+{`T4+$R7NK zh)6m9ns=<6>iPJkWfXR9?XqG;d_B2reHF|`0Q3RGGZCYFt(ftl^~1$hc9gRv7K`Uu3I_7((?#nhY%ijpz_fZQmg;k4e zvWaN0-s$mz7D(3O=07VyPUix}f{`uS<}WPv*vPVvw%p#J0AJ2=QQ#be@VFyMbN{$F zG#lDGnB-med9NpXiQ!zf_=y&PU1W}l?QmrD7uU#6PJvtpDg2|09g3`KMs zQ|3JWo-FX}$Nf5^PQB3tY#u@JB(dXlCplsL29X9d>*D-66{{O%#;DVZ*0S*T#6$*N zuy<`{lsT6+1#P~=K`EZlG3Xwf($>;slHtAF5ptOwR6yD8;H7{Qh);2#xF-`Bnq5r{ zz7x!WPVCf~T^dVeV1b5q&FfqV;~kb5^@DpdfzZZi#CR;NmgoRBGgkx&VPl&63YLLO zeCT06F|r|m+JLekZ$Z4K@ zFM=aI6MlzVOuiAjWY)Mh{>dDcEUAv#yskPsy#E97BJp@8&w7hkC$qhi6%MnG_o2e~ z`=bJ^md#qr@GXffwRxEA@O?XdFvrqV?hL_^DIC*QIM8%`qO7q)@f3tK~RkPja zEos3>!!sh(+f>R^QyYxIFe4Y^+#3H=e!b)nknaUdhTfno2?`*IW=-D!r?2ZXZ0W#3 z(1XXDJc z0hQ-6kzFME{<6ec*Jd{AbjlMKjHq0;L`NrtN5*Wo6CE)ex~@^L_gJbk;GDJ~**hG3 zzQ`O1ytnf=lj(t5=z&&)qeuryNc!qmaT&jcomiFKeOgav|BBk@9A8EKV9?n-x0Cn# zS5ti$j!ZF;RNpXd1$0$a_%|TBvTh@_rt*-5prNOKtkc9oeZ!QGj)N{=#hGAIb~zhY zE6#qIyRgq|0Cu;|^6ZORSPw@A^VUt!ULTK(wQIELPq9ZVc#^wXH%~bbUzA`Cn1xl; z&feCY4ngnFc;-eqtNE^jj?Ur7bA|cTycA+WE&gds7#xT^GO+qS+#4|Y+v=$Eif z#E`a1A{utu`JnjrlN8b?q@>I(Ro(-WphDC0&1hA>cYFD*|(c zt}I&IdO`*Dh!W3dZDGQ~!$7pYtAp{>=l74BWNqY#Zmr?XeYVrtP6qdu-?+?0p@=g5 zMyK=i%|FYC$-&ea%rp1?i{8`2c*Y{d+efoJv+Bb~d(z6N#qL;4@~@6Q>pA8DHR!}# zRKmeLFZmeSP9+-Wur4GV3$w$$D}B&#h(OI%9xl!dMRok;+ms+;swo(?c^$9hzf;yd9DMKDUeJJvBwUq%NuTOR8B>E{+Kvc8j4(`rl#hcZ**;_wOyD;cwuuPP<}cftBP

    Qz;$<`L_lo`faJk_BI=v7* zMdIBpqq*H52+!gafoCmF{_5~hJH|20v6Ay9fVpgsF9eEEJCPgfz22u&f;&NY6J=Yi z5xH>PD)!zI*mjB1sz}9p*0y+;x#3&~p87VAkRzFA1QAmx;^Z;QziN0b5qeQPGZBc; zp*_kBY#O-x>is1rJx6u64F#0|$~w(9oD3qv5|#ST#6o@rn~1}E5e-vKEx6q{7k;quA3Ry?DKW)d4GG3{HZtaWiYGf zJXrmLOA>+EZfEOSWsZ8@-#$m!QV4E z)4JIYuL8^U1K_V8VDg>MW+Vd9=i`c57Mjy@Ad53JpG5dhNOgzZ>JA3ZR)#SG&NjL< zz6!d!42*4iubLIPpiXqu>&5r??_X^7@t%v}a()D`5+8rZZS1BhJJX7WZx;|>R0*ST3!;v-O@rv&HJdIO^;+rnpRrMI!4aMfa_dq9VjB3&}%c0T+ zKm?ba=*3&5=LXY)f!pNQWdLdr5|(8yM&TvwqLXShIKf9E_S-imqjraMelu$V?mC%; z2H-B~SBz<>mI~|JIPF}S6c_kD4}b83&q9gx%7L_9LF#^;>HSc*YwFZl@yecjshihAq$5|EZf9{y8?*Q$(KGuDbF%{0sJl?2}j7EK zL-8j}kO(RZ=xnnlvnF@BW7dZh|tAWTk5Cql=6_-Kq3YgYnm(RFZgp+<%Fx zWVk@slZ_hG?uf^SFcN$-kxtuSuc_JxM-XABfjV8&t>7K05)Nc?+y8-jvQQccWYVJ9 zH4akt_I#fw16YCtd@oF)(_V^y@UiK7^qb7LdeO&$OBn0LaP@xBktrbubA+Z;4Y8{z z@u8?!KNl1+M4`AGUYiJJXjsBp!L@`Axs2#G;GafRq6W%nCu|<0*O#?gVB^hOqA<~p zRHRAasam*Ks)vR69n%Dqb-d*i%-!wrg&>S6|3{%(tLZPyG+)mol#u4NeeQW*Hn8Z( z$FYDiP4#}SPI?T5aNq-XR$X|%{-bP@GpW4`Vcfoeg$gCSFFrW6&KHBLq4*0DjRs;E zKHZIUB4%}$ppI$Gs$41kI(;=t-$m?F7$I;}^kGx=+QKPV2Z51M|3*?!TsFK?B?oZT zI?aLhuSJi$L`ai5uWRAUoPeINv_B#H8u$o{XT)R6j9Hc`xR%sUgzIWf@4mP5Njae*xC)&fK4U5x;T{N+Z_5i)G4AUEL^ zQaw)<#YK7^vrrPY9}blIhl=;qS?kBd0T%LsVpxFK)6Ore>aJ*p%9OWsK@3e{0I$iW zzM*+&Hvy2HMI@ST-?oi6Exfp^O35r`FrEmpsKbqr3{ zYTEdm*0@kLRwQz>-8b}<=Q{Bzh^sV;FDN{+j1TR81^d|_l+-~7ENrzfBFYVIiI;vZ zhE=OdwRNcH4Q8@4LGS>iNcfCjmA*R%-(;4$b~lzJ}Da( z7W3F9$SEiU_R>!`h9ld{33==z1Yxh|kJJ(qFZ@+{(1m5j=0woY-%Ei!%qgouN`j0L zHA6(LAMOU>x+MfxzrxfKdg+JN6 zHz=r2?+-9mYRZ{Adb<05!V}IkXJSmiQ`2FQ9C&9%`qmXvy{%d_lP{YHRLFlvqI;@ zt*6r~qSAf+Mc8JwWf7{sVa-tm<*BgfH_@kJ1n6`0gJn*W1pbs!{R1Rzm_lJXxEP-PjMcNUz4ct5~r{}Sc zxT#4~`ARrRU3q-zq@^PYtLRCl&b$~vB0KuMAju#@R8l9wZhvj9Hu(g z8&C>%OAdF!`@7jcbx|DrRD(__ zA~*F%oZa9DStc6IM{F<_F*@y`##X*U)tL}`$G@da1!QsB*{}R6KR9JVj1htu*|nb< zZ|N%=7YcbtT>g1x-LQnnn1AzE1{_~CVqZ17HVD(tH_fr%OA2US{PRsckMx_;J(4@Z zw(wTU(Jqt0j#?S^9cWzT^Y&qL_RFQJ{u=UM5s5BwSR5{u4zESUV)U3{s_!gH#S*Ji9jkXO zmqh5dQyktf@MeAHRx{6^*CQ7UmFGFaI~ln&ugRs8^X9*BoH7ezU8JvmILyAN48GpG z%Evn#iDo?VvcVw{bmXB>YI+PL&g zk%@XL)tKVi{^8xTy{cB9ev*~f+VW7AFwLP-@I1|;f4eMflsCkFZ!*!=pk{V4Oz2&! zPsDX?ccgf5A^Wg2Sbnz{VcxyQ{K0P0skkFyplq{g=Re@nzr=htu$}biS}#E_YjB() z`YcI@CGiqB7PfB?9Tb{;U~KVE$%2*xIvv4rv82V&IpzFp{MoT>gGoQxz9`#Q;z{>w z$9dXBVojU8g@2_IbGRk~L59+;Z96Z7bOEeA19FW0t(zC_!=o0g$D{fFX>?2#iGj|@ z5h?wHh^%Xrt~QXVvZ-oB)!y}7*dJo^g(v~bAIfHh-ElrG{{2;cp25M3^*u(MQ6-xV zsM5QdO)>CKQ8hk7z-8lu{h9cI&3+U;k1a&oT4%$EBIJR)2HNHH=IHRYv4dknE`#r< zG$hqC5=DHnT4050#CdlZ7yPmOpkkrJgZql0v0BLa57!2TUa3`T@grdZTp$hu8L^1)ieXihu;vFYV3cFxfPAkLbv3ljI0DE1TrHC zJ4$;vIro2AqEpY)wURwl79B1}$y@`bCYUc9)Y23Pf2tG@c1_Zzido&4f0UPJV5voU zl?z2YTC7;gpMY)Z+Hz~qMHQuyts7T4_{PghIXV4f#$hRj41%F`3j4qR3!n-|<}z@X z+J6|OM5HeXTS1&;m7gJ0LN}C^P?s7O4IH;BpOTgeCF@siPz3Gd23yh!1kmn^G8iPu zyC|+2d;hU8YnDjuC!l)W#aiwEfD1~w-+gKMlT13bb)+%n*`XszO&qx2-kw`1TSOtD zEpjJx5C-E!wWDPLPXATXDtaq7S29*|4%HQR{8-!yUOKyX}x5>o(^gTm2z# zo@#K%#o*zaZWmKTt3E}Uj{l|?1kPcJb5KZC#M{N*u2wlO`Q!3T%``@GHaY$5S3$n% z26)@O(dhX5Yc_v}5SFOw+p*W+cWC@{8qm-jss{}IO?vKyN)4tyk{jn&?hx1L)%Ef@ z>8}3n(W|TaX$N6833bk~tZWkm4c>vijK*)d2+6`SdLD?jg%nu3=jAE6EO&<0o8H&= z->oeFZt+yB(dkMuqGtXd02M*%z9{t@lrxG-U3u@L>t)TRAyTINA?ebkqcwR07eryUzDOJ3%|z%^B91zOrn| zTCvTwova~W@WU8G>V}jlP_SS@x#*&cvg}M1z(yowD|Hh^rrnk={!v;Ss?8Srcz=w!hW=7sq+X+m7 z1=^hUa|M8U)mUp#Wet8eM{q|b~vn}no-qjH%O#6)Wt?%d?Fc0z9I#GwgKr8jYmCDLHrWO`IB`K+feTJw?{np4a^%EGnQm~A%P5~|3AK%R%`N9^J<%O3nm8Gkmky2$3nt|maJG%pROfr3uYm_=f15OEk_;H|g#7;ZK(m?Cv)f#6bZgkCynH{Uzd6OZ zwQyjXVnX{JkE|Q%U^89PaYO|}1Ok-hg@4Sqra5U9Je`ftipkpz zbIH6})9q$r6*g3`bKdKI_e=rIE3)EXKg9(+4`q@`4ECGuHeT0`TxndndRb-g{1*RR z#nfv;t32|yS$)8!AK_M4Vkk2sypKLhgpaCRgw@E6*IX&zw$5W$NB*lx>b~X8Wj5_! zkyj><`B*;x>@&-&!7pVfzxKr|Qddga?z;Hmi|uT8+MY`P-P$f-0im9L%Ni$7|%0y}NgriRf7t6WV?%D|yS`*LB*3y3dutQu@5wxC#mawW z+2UE!=jy7)HQsKE3C)9Nc!%>PC>EUk4a`3HP=EPz(V$fIHBYp{#Ou>Bea$M`?bb$H z8HBU%d3r82*XX-=>3IG1*X^NH%t(?y@8Ar@i{+OauZ=d#&`-#i|NBbDkN;ZQHjk8; zG7(a!U~Xe>v`ZeizrVcy?h}$P&mn2hppld-SI(J5NPge3V~4!;)?31<%9u-_;3cPB zo;+Sj)+oVxXu=wWd{s_jd?1E$P`awz zLVH1nUuRE~d#jxBK9&6a*?aNI3Nz_Fwn1dCX+l+$1<=eDhXk&n24@ctE35Kw<{t>Z>o83tLvPN`|+}c)Q;FvIg?P zi@lAKj$l2k1Rn%L4X}KPZW)MnaQYW2J{Mb9d{(uJ_?wFR>wD(?neKWcLp|*B4 z3!x{FboN$R=>O=W1LXhy@5>~&y!qz4W=}y8Y1FWkS)z@#%z+ya(5}1gI_m~F$paz$ z1029)lu^)bAAb10bn9M6YS%F?XOe|~SMHEYd%P)iYdmd))-Pn?-tPwvJ;9Jmhrg($$`Q@Yi2|%b|mZEcSRO?z^a6j6CsJMH%t( zK$$jmtXzNH1!fjBK_Uwo;UnJ(sa$EJJo$J%IbkNcTC{ALxRK9E7idCX36+52#Uu}I zaB)m9XwV?DN;FIMn!~ARCuo+rC0Fj#(!6;KiHwY}BnlSVDXtm(XhVbzgwJ)$7h7*ip4v05!UlOGOBT-iIni7PWfdF}DL<`2`xJZ#9!kA2P5*A~ke=>PcBd$|i<#P$3yC5K$U-?c6QB%Xk)99MWCWqj*!`F+nRd2Xq3 zE@E!#nu3X9&!WfWw_oR2dZ+@gWqYc(=N3L&8y?V(KxTC0H{X0yN*XtfGv_?wsm`go zcH5n8e_>kbe1cx zsVC1oGbjm&GfHP~g~K>#17Gw%di1cdcfVW4d@;th`TF%6%qgrB<@Vd^OToxoR!~9l zoiynO!H)HtZ@w`mf9<>#8B*850c}I1KR`=pF~%HP^z8FJ&UshvzpUq5^3UQe(zACx z`El|;azmdFd=9eD4eM!4d)~E-p8WmM@2D8=6*=eNjW=F5Oy1c3j($;wJbR1u?R&N1 z8Sj`qTgyyY#>~MiuQzX1TwZ;tv0T@utWlB*N$!MGQm1-R8U20-vuCD>v1{Kc2aK!C z)~#BadLFj6#5j4FYfkI^^2;wx8E@D_Z)%k34Ve*K~nCa0yOS%G?ecq@5lRC_bgb3it4Ib!zt zwY1a#Kbtmflt0W$*2IYu?QAh7eDK^r2gSB33GfT*w{|DC+t}vD8dM# zl&P2`O-tZ@_%C=uHiDOO`0_<&#n0PW4wue7E)%yT*v`OUh zFadu6bMPf_LHWeA1h9}C4_JjHoH**$_3w-wr=+4e=y7I;aQUuH9*Hz?|8T8u{?scb zL4Nyr#yJ8$xS;&B1g$gALTma1&QXeod6u7@c|TxQD90Q3+vQCCOXhR_bME)z;UfEX zY)@Q4((=63*JH2O%F-}oKxF|3v-h8V`jJeTe&4wzpS^O_G;+I^FUgfxH<3pMT&%s2+w{UzA2GK4rZ)*eXIxN&3a?hO}jPPb{8LEL^zW%>4- zkrJLaOm^&umD*L!;Wd>aj34pC)(qkWv#B)UV7u9XTf^j6IaACaywREW`0}eU zGVhN89!c=!m_Ou=k<$$bMY{K_CwJY`&ImJSW<`@f@X(_jrFOMvtcy2J2=xb@lb4~t zaU;h*fco|83-m`&P42c1`akkOb$RQJfs)JEFE9OPrL?=ynDCU1kiv!Xn9dPzPH25x z_Q(Bgjz!yJ@@ga%Dpb%SLOKNxPKHds!z`+7xoG2U?OfBn?c4VoZ09r%Cc4Qjx3{)! zJABxW!sPOwC|;t$k?N|YuDcHdl)i!fHZ7)SS~c1 z5@*f+T!z1R(9ColmayFBASkoy+pb+vY1hW=jY&9ZH?USSCrDMR8EFQe8l`eJ;zON07_q(QxcW;xrmlo_bYl+J6std%mnI){8SalKLea?7OeC&}c= z6U}DfZITdwz$k^AWpC_ZJJE8@HP>VcKT8d682lJh7*7xY*p$yKVYIQC_7qguSQd9m zYEMbQCgW@dF?q!bdni3@-li-B4#on1xcu}~Pxo5-DWC$Vn!V6&+t#m^J1cn4^Z#0& z5k700RfnS~%TPvn$szTlbKCK$V#SJf=3URTNe?FA)!>+@O3B=5Q{>hdl>KwXs{Ljr zf5E=f@@S3R@?!lk`QDhk^qg}-#+lju8!DcAKX2aIn9yAN`&uh^!CR>(vJCuMWry_0 zN#3cN_5Y|s048)I96rIMRMEVic|>|^4WB^K!Jz`4=in1M{*tC8XnXJ+^(J!WQt`ZK z&7J}JP7~w2;p{7|IdobM8GXc?LaTKtVQ8>#;~p!MQ-<~(r;Mxj`bxFF;8$slya7Ls z2~YXmzineY+ZhGwe@o8nF?q4RXJwH(#F$n5*sc344`y|e6P%-nY9WOlgt2$7L_rT+sL8T;tZ&Dje>5`|Y4 zjHqbx%KFI8{#CYYS!)LI#1l^#?fQr@h3F_3U))6+HfSIb#*~3ht!5CH^(rfm4ICl= zt~w+izuW8_Gl+ZdsxNomv+W!+2qh-8NiJ}wR6kGud!qCB@#D?Dn>tdd^0^10{k34D z+;Zc`(yC2W;|8|040+~z)7bIG%TjMo*&BDkk#h5GEzJz`|Cx=lPbAhkBXssykh$2f zVMA+jb??3R8igdkbno8H(!ZWjI`f3*u|BGyf0Kqqq(}FPa?>r3$*$ceWa@YA4B$>m zfx=-%Q4}c}wbYoQG&2|)Cp|Cgkf>BbD}U49+q-1Nb?3Y3M8Ldx^Q3Dx&wZ&>slu{k z#j}P%os{ptT_j^Z`%NlTEGpG&l=NDC`c&sGwXGMA^5x6hJ^_x2Fp2c^J7-}q$NKf_ z$xnn-e?^9-gI23nW>eeZ0 z`fj+CEwkFZPeF-{C~5YfoRHOPBg}!T_uF#^(#nLLbqwb3Wy+M65hLC(P7MZ{vu6I5 zflt?#{{8aE(&c+4x=22=GGds&9pen~Onc1P=ui6W{{jlL#*2jbn#me#S)!BOGDM%&a&Zf6P)9a zQ=r?nWwTVS<~a{!(-qy!!HH)D>&3BWaEQxSZvjQBctn`p2g_Mx2+b%bl$qMPddoXl zuRHmKj)C-FuAlHM+o$_g%y47Qv=OU7PzLlx(APSoHefkAORZ6t7JPd(pv|oOiC0)CF z7LfMG9W}Pje^~+9Q$e1tb-LKh437TX=B>qyH_xS_-FBVMj$R_ z&TnXI&Qa-P8Z)UG#PsQ#&oP6zs7o37@%xSEm_fuD&s^J$*PAWI^VsUutBs4^HrAFt zJu@v@XHRsVKYxzgaC3FNB}w|v;>}W}YH|7SlfLHk*C_e%howd!UF=mU;o&Y_Ysy3S zPd0RZ&x|GjNu?Y=x^(Gc+`N8ePUpT}Mt}CIEc$!A5k5kxgalPn)HB%_t+N#!J}}k@D&L9p&;qmE^MzR?4=W$K+zuW(*$j@od7{ zXXw8}HXCm}&>trNC__;Id<@*41Z;jy#3BH>3d5* zdE$x3v+Ph<<#XVS@49-yY0`^Jvn|YX*R$weMhoHRh^6kmr&HiSFcFz3AW%;sg(x~oWkM|_Mh`Zn&J9gRS zNje%)1?G&*1Y{R82yb*=zI=tWY1b_|x$N)0x3&`EmMyXJ`iSYWZtZS+W`)O_&t7cV zx~ehXT4!xD{YhtUW!9}*&#tQV>vyMFx!Na}m`&_6XH1Z1pZZMxS+d)h?--%eV5?cP zBGRm>@!XbRb~PJ*Q@%nzDPOg$nO#qi&&I4ar-0^@ugo5T_uqfZnxULNc}N!gy}_9D z9F^;@zuubZd8&zzV)GViZ%g57o-h%-jk6}OjFxj;8UWqCeX#@dtw^c|yJ;k*%1GfE) zH%rRLbIO86yXAj_ewTli9hIlezNl-j?PF&-nJrX4;Vrl9dL7{g^UWD%Gvk5S^@eu& zjJ|FrX`mhQi^p;KEbT~)Fej&S$@-0lTg#iwbgJ16+iemGv!0QfIE0 z+M29Cd`b$$6tGGO<>(B2T5$o7;~8+J*rLvP!g3oE95a@k?3D&#((UIHa_?W@T%>7K4!NhB$rtwS+Ho2lq+R0#H@!gjS~HYWvUk@psZy!1T-c(7G51+6 z6^#i`$x>xx?AM!R-tY19;DZlZPeuNk+4uGifxPa-1Neuj(M|;m7M5*W<2+Vn(ZWsg z_@m=xz!P1ieTVAO@80$@_vcme`}}o2m6FYy<4nF4ZT^%4gYd;9JQnWdBFvo)pDzu6 z(F)ZA2R4*W7uS%y;SA(v-->6cI%#eO6Z;PylbO>u$x(Ak`32Pq$X^R)$*O;sNlZ*> zqr6s-#*G>ov(F2XK9|jZfq_Mf7Rj&|hslzKzZp}t8gj)|4W&pC!}M8}w}J=*oDYG- z{2gx;li4#j$@ZNGq-Kq1v$D8Eh7B8RcuFZLU8bxxXJsE>Rw*Vl6WCVF6^hI!KWsZG z*L$(8PiwPI-j*N*3q;ubRoI|BuXtdy9I@%7Hi8Q_Al zh+2MHD5Lxu41+6T;erK>5PHP8ANHKnN5%&Kjz8n2qdDW@OS31&+deAi(|gQ@88tCe zUae)5hH}t$Q9cn-sIW}le$a19$+z22OQhk6!Uf2iA0JQ?I;_QOMK2I#tdSfY&60cW)KxC z=0CH+h)zwk;p|{<_@gz0c)pPgG4b}h>qz4!rR4v<+8}Sfy&LgtdOUl ze$LWAloS%BAAQc*?`W&YNV5dIb-&({BrRVdT7I7OuRQkfSi7g>y-`2g|GW39m*l?W zKele!Z}L5}1g!lHENV=Ce_DRS)N0kLCH?Nc$LtBZ!t|T{^1yw+7|%}|%;r)f6w=L7 z3I-F=1;dQORY#^z?j|3<-&9&(xKHNK!+X>S`Qr1>tv~gYvdNDB;~x0dTW*zWufJ7( zou6AC>_6Xl3jEhhC>Re?sd*toJ7bX`eQ&HHlg4+E31797K3ANU_3LIDv$)sgU2{k& zt5~#qRs}FlAO&*~(Iw>Tt^TXcS6hsULrj@--Z3Mq@BXWuo0F3QPAtIh2w<~fm1;6( zW2zAE%+BlgH3v`0p5v#?**#Ux!G+g{y!@ejQt?ZDiRDb4XYWoN(2_jOnG}yMino2U zevzE=sxjaC-{N@rX}kA+xW(AwW3m(-9i2EXP^Olfq=56_N0sWe4BgBr=~i~|{KB=2 z@PE2bK6%N^==%!na~QwToDpL78z&DL;S*qkpctuMtG1y0cnWiNox%4+<4JDl@{=~c zv&K6fz|drqSMA!hEe14CUjn|~lTu0^8awyAR0jNG3>$pVnf)*+DI}En*pkym=4)+N zjkFFa)z_<10#N}!j6!2nR3RfE#cJMuNw;m=XKnA89id}urz6csK#Ahi`EldM$-%e{ z#tb6AQLfIKL3ll&28rqH27BYClnuEX*Bmwn*UgtVja_!1>u-}=Z@tA zu9MDmi16?*BXk||nDrYYXUHX;N19pAT;?2yD$=b-UD>dHj}cl9c)VXPLT9z1^(UP* z|KuqvYY0D|nNw*`#qW}0B?}r4N`}6;+VR@74*Fw;aw3PUU3tiC)SoTy8^P?+0RyCO z-`wORA!Hk zmk@7Eqh?NDW*tI+JIc{W*%=KeC6y{wlAgVK$(?uJZ9Iq7kyWeXWdA-RaF`Xl|CAEi z+EX}#0Zf(5!kX;bbIfei-X`yUu-eR^M#=vM4m94qK9?3Pj0s?f1($Bt%| zD)HhLSE1;(v%ym2SI$MCKdX47|q+;a2n z_Kcd|y?a~x^#2~nnr}=j@XpPeHIo_JeV+`RT4CeY-3juCiOKPi(mZ{s(I&4*hyQZ zkEiE-PRG*`h$^%cEgEHJ-1Zpr3FFGs4`Kg)1p5T*I+ijJGD$Pq`FK7%nL#9{bFt#l zmd<$2N**F3!>#by@6OMqyHQ9GM4K6pXueK+-?lBz%#7!^0Q4uFlh;juL;pYi_`~c2 zcv{x3S|ZOp)6|@!(aZ=!Mp!c1w_!Er))WA5ZizQ|_+j!oiH$pAOiup4z4L&RqR1Nf zBdE)gbIy{JiewOwB#He8FB>Gc|7j55Q`r{l)T#7Q5?Idu=od&4f25@qtbiZxx*a=2-MfBTZ;^dP72 zzULmcuU|WzaQDo&qQmaP!H)}ex@pB{$jt9Hy9F69a;;PQxFlo{x z_c3LdlrFYOUVZ6&sam73E9BhlGp>niEezpO02n(1~KDO3VMv-7Moit#8(z-*7l(@+!NJI57 zJvzZPTJdI^XT?XR8Z~Rl1B;f)OXsF9U*o=5MD~%zB3IvZSxOqDh0}M&|B3O3YgKN| zm*L&J|26XBGfzsnxI=PXDhrrKN{W`r=93P(kNAJ~iz>qw#>gDXC^Vi_-$p7dREpH| zzjp0&f=sbb|=Txo({XZe`f;U&s6OZR}e| z-XAtY`t*F=SKc0Y_|k|tKK*2#>jG8J8<-i*IkklOTK?C8f0aI0UMruB+bcI+|BZ}v zgV%St8G(*gKvTQ&xBaeyMK_fHbN%YS`n-#b9@$#DT(no_&7B}mKmCmS`(OX^1A^0t zCCeVxjE0P&Lx;|C*X>8g4~4rSy85v$Nm>R8@ABQZkCFE6FFGa~(mEbTytEo(jLF5*GAe#U;+uQtD%j(Vv){J;!(_`&aG#m~F_(4h?*_qi67oz4K^tG?@|kmS}v za@GiIiLG6;*R{y*mjZ73o$}@LyOqlGNUfUHg)cbS(eX~Qh~VVOljVjRZj{?@8|Vhj zuJa2^e)+#O^2Fa~yDoY&+?qA4>2XB1Y&#%JmhO`Mv^qM&WS5(2Y9HIb86yd+_qu_? zIi*7R0#c;VS#CzN%5FBa8Yva@Q^`;ZKnfy9Zq`YnAf%Ah$|s?@%tZj5z!CDin0$lh)TC*1VWz5&)}-F0C7L_wY#kHDR#XW<#dTws`}?Q^_6AfR3EAnvEmEr z1G=RZ@g7(7l`)&*T#HHi^0vLMP%JWLzcg;uRv3s*ysF1q!Pm57ky)>SH}R;<#LaAH z{ja-GuAhBK9&l@s#Nt>}s?nrhXC&swJ4QIt}5PIJY8k{R1j-2CK zjeh@`AKZc;$DkL?z|tglh8T6xUj|Y=`XFlPpg?zfgMj`_e6xxK`Hx zazO5P#@;J#%++GYk=CU?{r)G9KlOvF=ofbj#nzYUGbYHL?wy%GfBq@$@`x@|C=pWd zQCML(P$?j!qA+^(uTu7t{V|mRd{yo;)lHS^@|DId+xYl`3*(WNzE7D|2ToVH^tq51 zp$~X;gplsWHS3=zeWxFkFE_?s0fp%RH!;dQwP5pbM&dX;qF=dnTG!24yEM z*P1vdgdZvW8ZcHT~nsuT#6XQ@vlA5|S@)@0w50 zdd7Rr^vHDOYWu%~^7bk>*!#EoX(ZVhhw`NBHh1;RgHo$jE!VT8l#h$}m`Ff%g7io` zR>!m8IzsdSud*ZeRe!!g2Dvds*EoHQ#j)t-xs#HmP18oMp4Tzu9Z`KF9bBxs5GpUS zDg*LjxKM|_<(6CI%?OdENB_~ULt%ciL%o#prQJa|T$>5y44-+BLj*GS*q|CGbhPZ}=@!Chnv9Mkd6nxeq-! z$hB@I%JMO*WbCI2Zh^lt^4LEXO1%d8; z`SRuG>)hl4%iR9L^5QGY-G_;7j@>#}pyZW>i*`zrW+lAgm*6V*vt}NUbI;4~TBFOj zE@7YfmePkFddM54T9}yF2NWS?cr~KI2P3jVZo9tSKE_AH`bbF!eE^-nteDIoA#}H+fI#u-nj;vRMKJdKA zBh=GHxN_ynz7BKyyr1Q%(!gizVe#uWX^ZrNz<=pN|`*# zaz*(gQq@&G%eyssHoFP(Ho0kvKU$wdX1g&Y)Lr}ZYv~!o70>ev%8k7GoWseT>tW|Y zZ-(v!I`)8Z9%KqfXW!Z{`Q2Qn#juF_Rl~6++_+6%RBG>PS_=ttJIWBF1|!c zISVotrA&XvMN(l$QArLkqp9_f1V%WjduZhr7_MsW_87W>eX zgoFecJ9eyJJ$}T9VQ!#a9yc(npv;=ZID-T?t?x>yes&=#Qz2fKFWV*ae%$7tMQ&GN zShd#8Jm!p*I5(r%GIwnG@?CC~_p@B<$N~2}cDPoxb9}{(rp4^$w1p6zK#%csiPCqE zccMgzl5)$ym)umzRisMQ;!z_Q_2C@3{+f54!Elo==rD5dymMqg6aZTAkwL}s&LITn z^X^iq)J#73E|cMLFW-FqjeDj`+}asy+?b$ha?hZ9W#_J(GJgDzuGOoZ^zPkPzL}OF zGiR-l1Fkjp3fJu2zWrsgc=1kQan_wXll<3yR%B*|FAA0H;NXK!^Gq}Y=b7-#IYwSM z{o5Jx^QuMi(U_&qelIBZ-glpyufD8|9y7;vZEN7WeJx(PQ>IQ^Er}bpN|!EOrB|=3 zTz=@L*xt-a*@s+Sx4=)oPQk%+l^KzTMav%@p-j;beV|NH9Y^2!&p8DOyJ?KwII<5` zCdt26Cd+0w;Rx$BFnJp@63uaI6}+|Tuw3t^R~_!UV;sp}Tx!>A;Pru-itxpY7xy7L zf=3_GbuCw-mD|ocu>{Zuo;^SXEQx@^tel}qZlR!80~@+Q>_{-YDTZ|DT^^PV9~w2u55T@BWgkQdC7f}>Ok%kU6_**?bI9avXUHfwi2du``DM=G;*!5=eQDLEt<Y84&Kd>tE8l&-qb(t=AhP+_PuDG;R2V z`|?>!I(53p3(X4`eCZT&tS=qiAV!TE<-7Lm-J9epU=>_P=>zrlf3lm0x4A@pA;$PrjdJQzFUlNj}$2xFDsVqk$WGQDeE`vktU5w zNKw}+G3kGETo($r0(s4&${h}9@Lto}j>MTt34?tH-}|;)*$<_RevlmL?r??rxpg0L z>p3)YEex$QlyhqLwRP)O89sctt7MLMihbA_LRH;bZbfA6x;$>MdSN$Ex0Fnu`Lnca zRzS*EDkS$mG)G>2eUa3uS=3cT;$+ahvt_GWxTjRP0&bDT?ef>#rpn&^hotQVWh7sG zoP0d)TS-XR;8t;WhMLPnvZEtY(9D<&O$$1q_Ok``+o8!A^U|AYCR>N)zY1vq-I=lFuRU=i8j z>Uh&O9+dHJCZcx}lI4BZ*Z$LuBC~cnW36Cumyc9+EpBCf0Z2tdE1y(2*w#W^bsRl6 zm$?Y={-Ot}6NDHf!F^O@^13yfmT%r8Z!Oy^PycjC7VXI)bG9bCbs&z&NN0RM?<$}- zf9JY-ZO$QuN>z|*HEa9lAJhwsRjq`QpYmY_LzOy5v<`XERUBb3zeaZS89lCAqn6BG zyImfcu~9a-1wGGl!;5`a5TA|5{8sx#?J`Uub1c^;bV2g;waY zW8SCv01V-&dZC<^IsD1Ls5|B_R8$HUE+RSO;$@GVCBL3^bn=W+Wy?tkH}M6}9hP{W zzkakJ4@5uUjXvPf1uD$3I^+#{EJoD3dY`%CE=KG2-SufnWIw6{V7! z{<>Q2x>D7>V`o>dk&*`^o64#o&j)$&=&!zSsg*rp+d@?c6@*C0%(Vj3xN&2*GH{aA zuJ(Y;`eCrtZ%|%&M+`S?*eMsadqD~nEG?H_dRYoo;Hn{lGT_tmvJppC>IRV(=awy7 z`6kS>E8Hh@7d|94Yn6`3R}ZdUy;Ux{@CB({sfMetw99bLnoelQbL{CVnsnpQ=b1An z&-3^0NY`F_gY4e5Mn3&GL57c9>YDi~$dJbuy8ew-{i4XJD0=4?i5?l071iJ@%My!VQ%ntNuq9@Vsm2Km2&-U`j@*&8d9l z?$>XCo84@t)H?e?*T1%m^z40}>mysh&%n0cEws3P-F81FX?4O@H^IRP6+Mn=xuj(haZmti<&%)Yk^SgA@t}KL zXD)LR&}XD|3@SgG=c(a}c{L194PX3Gg z!k<6xOxIntk~C;rTdKJQ=8zc~@tRK)uYdntI=mNnEiYn~J1d+YWJ}q{^H(F4ZLK(K z9zj095&@h8H}VkpaGYlYJK`ZfKp)TrHA=`gxQ64$qYrRZeL$a7C(r}cU-SXTaXe{K zo~se7GILC2MhB1^kDTOZ$cg8^NmKQ~M;^nE>JTCFs$rrbQK?LNKgwD4o8!Uz<(`lc zS=Dn7?B|~J-ZkxT;{M@n)RXmQgAW!EM2=uhnly7z;-q8S7o>g1S~BGCm!;aJgf%o? z9{QEM{_5ARS+l9DC|~H~L3kAwz}&b3Ryo26o!UO+qpx8kSHQuVw{W-XPPE#6RV?bh zI^OH%@LwgPMt|g{mu)OPdUTgBzL+S(-diGj-D;}cx^?pM&X_UFt+TOMiWb=|6DLme z$}o8FU_baD(6KtbzqB~h%8@5eytHoqv~=m(Kpq^@#eMNE8a6qm{(b)GNiyv1>C&uO zD{0m0=%O_l%{gr1Zl}IHsU2s3A(shE|8AQzG!lJF77I~Mb6rzje|>~EpnCW2=?szK z^1~aeB$va=*Yg%FTDnTqL^-g3nUpM1%Fplww}&5oI9tugq&g9N$!THW`H|sjMnIc|IJaXi%V%xNjY@V(@cOJwQNrJf7#eRg+z`R~=Mms|Vg zMqdFB-fxf{`zh$~p^gH%y;}pNZQHg&YtzYgJUPY+0n`z z=b#rH_z#+?ctLIw|@>ImtO3Cl0u&zVPf}tN8;V`{0Z|5np=sf%Np5@SKn#=wZ$AVBUou zsE(^4Mf!Nv2cADNt3C&HLeEXFKB&yZgF{~4B@Nj}+#H7?^6Cd9eed+1V8=N;D`Zt& zhO6o@TvUfOZPEyJVQbTRfff!V1|$+B5iKN`jf;%oyYFU8i^fkncyZFYZFSdQypTKp z47aG?RyS+VS~qrJqn|C|$}9W(FLQuZxWq}^+!O)>Aia=7Z!X;bd~Rj)w}!2f!@uT| z!GrIRE3drLD@uj(<-Ba2F6!hAk;~lF$DhatAAaa7K~#+U_Px>#zFjI)rZ8jBH*R~0 zTP^+tKX{4$?Z^(dSX{K68#Zj{o5%P%-uT?d-Ak2MTDCe{%9Std(sg4o9OobAtnq7` zRIgsm$$GVy`&4pHjYENgdFAC-mr8+rMdhid9!aTSmU3Th>({R<7rV;lg>F`@4@Q3I z^kAg>qEW&7@;bc4X46o0h!VldhM1OJUpz1w?-?h96t!xknI)JBQ zfyXG(FXCE|tFCL>C;Of)y|f7gb%HzzUC^(~n&)v2dckoFRVqh#&G+>AYQ9E($5(Ll zg0JGL7gU(kbB)$F(w6erGq{%gR^_6Oge>ay>|kC@#hLp;CUjoYS6xO|kxlbyw$Xod zoO6f^d9~i4X=KNc`y_ARzTpBdxZ$+y<5?>+{dhq>!83p@UhA%KL?_S#4T+C!&LJJ* z#iz&VGtukO)zAvQ=53^Z`p$6RTpR zm-D)H>wGsBI1>kKvHDq?C8)rslC!c34TlUFB0Pf!9(X`*z4g{(D#D@s7cE-k^{HLE zcE?0R)BTJYGyK>UTE=d==_ap7$DEV}|L`n%Z}g=PefN0(sHhS0-tk`ILo>F}j0F84 zbHfJ&H+oJ1r+UonZ0NRzTDE9qgUTFb68cHmpp^~H z@3gW(MMEna=sm~LZFC&{HkWw`kdLXcK^})EuSXXcuSgiw3pHHOC9OcA7gUO@LJntNcI{lSVe z*QHnH%=`5JD4DV)RDQ5U2gobn8!QWh6=uyRc*gL8rir9w)I|u`oJ~F%yUH+HNaJF^i}U8s0-*1*Bno|t@REm;hXk>pL)C_z9HM@WPMCJKQPpb&VJga7PD_#kAi2bfxP3Ks=X zLqsb`C<}!L;ME}E+7scJnTtw2T|h93j>qv(h>Lq7u1KK6r30hv$~oNqvTw|oF|v8{ zX1VUV>wIM)wezCc5B~%=XqUo9zoJBQU1mOqds3a?y~C4<;mL%xz|&=&by(C()W;R6 zrGy1x>286gL&~K=KoC$;It6K@rQuSG(jqM#igY(fr-+0|cS%UU!+qcTJ{K?l@bIuZ zJ9}p4%sFS~_x*S$TkV!OQi^ofNp;EwH+zQo?E9z*ZPa!8a3qe?2vcI!_MLC4l1vUc z9Nf!Ak}7qQlrTS`FQKiYy-awfS+C=_;Qi@d~N3+EP>vBB?@_ z$Gby{7W=0$u4r2fNP$TDJ+EzSZQ;A#p%t^F+f;2|B1_iK z@?2l9m#)JdXfX_2G$Pp${Icay8OJac^cj*P)>Y*<_^tEz^d7?or0@;*Ms8F66b>Nn_z;Sk{cv zGGn?V{Bc~`!8!GOAK`j1%lSk_m1AItoxY=m1q{hv}Yi`RGVa8;%~QhL22o%%r~ zjeHQf()U@08Lm@-qUasqbvM^NdHt5h!^RNtyr^_t9qp0d{0#?z?GI18^G-^aTNaDE zrB7QFSxz3L+6c?-L^~adYpNikvVNsRPk2aqEcRPZ*f&i})Ej18#oaqparPJW$KrEG zU74@O)AHc-ct}B3KxI!QPEWB$OV49@+TJTJI;hrbR?O~H(i0(WpV}iEx*{@f^3G{D zJ5H@O-La7K*Yi32Q;UM~uZ0?9&pdKx|~se41oATe|-aQoBwPt>#r zjOtm^LCd1|M09yw2XSDIP<<8J?pmvc7B9z%*k_m95T!0{o`Bi zPH3Z(Y5ObhmG1jO(In4#;R8!E(SN(9)B;9 zydN(LN6X}0NX3@PoKsB5s70a#6I?LUNkpX(6ob)T(H7wev^{VfeEs`f3 z8tqJC^TaLlCMZ?&PIw%U;#d54mz&n`$=p&mH}Cg$ACci^2vN-wxpi8A~`U@EiErEAX*7CpM=<5tU1&9=u06w4&H3b8*`{eM7KoD9msv)TVn7uFgLw#T=?C zHJ%Ychwxa`o%m%qwFtnY)kf@+?ibECVC3J)^d@vNLVC%x1bfXYCWN zM6t;(m6d+L!66wUPOMtBDl3kEGx5z_2Sd~Z$zLQmjLjxV>jGzcl3#6Jhaq2gIPnq* zyt*jmqFZ4*H$2Vlt>oby$n7OmJrmbt?k3XUdewwzpqha3o6OCO9IO7|z1(o%!JZWc zw$UIQY9-b0#BfQu62hI7O&iun%;0ScHU!IlbsOWd4_rkRNuOE_D`J&v#2z(kkNnTUMw=b5_z;A1^&2m695fD;auhc8Fnot}x0%D}E0L zdFoG4K%1G-7|K@dzqEv)Jx~e?wnjrS_}RtcTPZRlWKr0aL@Ssj^050X7ma&|`f9S2 zlxHgWAu4vIp*e-u#KANU!5AGk3aO+dwhh9iR`n{YZCxXcf9!(ToC_uShm}Ey%Bp}{ zfe69ao&IA?)f_2-{;F}98s*muw!8b*Xez%p=fuTwR&-&hx4fP>6_DpsN@$-QP7=-| zq>ikTC~$b+;^p;qldWqk4_$n^jP7BfRYxn}N+6>##Yr}@kGSv$BtG|k=i$4Rkc#Ff@70F+3F#LvurBY~ZJZF3`SmGE9*U=UY1sv4 zy}3BfAhcSOF(1P1Pter5T;MZ(cMi+l*SXwuNFkPZD2CZ!G#);5k=u5a7YPk&!6x>a zEH{>hiFTg6UNRd++8(xelT`&Z5n`5^P6=HNEIZGRxT*4%58(?WXP`+{9$u~W4Q|h1 z!fw;svte2qP;%P#zH3wa0bOeBmoY1 zavevcxBvge&6uX0Dc{&T_e_kj@ zGb0;DMk{iMBtr$e-fhPq2`C@l*RRzOuzTcOSe3@gs1y%wehJF~Dc9wvP7{ZO&hMc% zV!{`(gWKQ!zrWziH)BfD=--(M2*#YcpBnnG42Y(JhfL&_ehpr+?#oHJixm?Fak@K}%!1R?idZBA;_Zh&`T*O;Rc0 zMeM-V^H{eZl&rYq0{i;4V=Tk)g3wKde~shc4VLlw<>jK{@yi3bQJImxr8J{-c?7zDO`skO3DQy;u zicw(#&wI#Y+>QyJ0Lt{@U9bxae?VV=8o(UKq5F*ZzPt2xlP?m$W4gMO+~6Zad;u$R zbq#>zX7tf}2$x!1kL_v6<^>o!|B!2~X4Won?BLSJeP;51Gxi^AG>0+bSEyxjz+kxL z)|=lelu|vnS>2tU`F=^@;+#=W)i2JTc6u%c?C$$Jo`f@ZFoiYo#tDP@K&C|q}gyMWBx z(s!FuF}sudD1VNH3WZ-@1k{$XMZVecTh+@nuZzP3NB1-*UgQ8;uHQ@jk^0Z3dC>g$BB9ysLe28&A5c{j z@{)L)-YRj~AdLivD?INciMTi_l5mc+(1d-3pBzoSd}4ZXjW-;YqdvUi&VJU-J!bp8 zTmjs1StEA2&nghb5F2$Ncym@VM@uGyC5aaJ-FqY9xlau5U?0cHuUy~z3acDiMH*wp zmzPS4$M6UmTMmer;Pe&H)&4ybgiUf3EDH#^PgHaPaIL4(u70nR+@ChUfycBPf5tZB z+CTj>+Sk5q!F12zCCN$hd7n-0?i9nP#D=YF(_XOe1Bc0=<=8NwK90@3*cuQfXt_` zi?k)beRB-#p%7_4MWj?DN(3|lRfl_iVL`u8wo~POo!KutR`u$1K}C5fT&OLFLO2*? zWp$MrV2GtJ%xMQ#PRo6)fC8r|oI`Hi{!vz;nw!}pHRy&}z#l2kuID8~DSBA2MT$c( zd6n8D^Ff_BqY>p3Yw%#KI=|UtF)JAxBu#s9)hru0`Z?UM2Y83F7n3|qfED;MUu!FR zlo+T|OjBEQmRvI&Urt5;$4F_{Ml!Yv4+{p8&N1L}R;&@!SHLG`tVI6@IUO0RtU@{G z%h~$}VP#O4{ryzas#d74T2CB1 zv&>&a1m?p%30!GAY_4CmE!^qpze^`lM%Zv`&#0t zX<9~V2c%H5kaJL+f3Lcu?yB+ANbO;e&+`3wW)4032l3L+JQ+9wq!$DO7>LVUOK!H* z{R%~mlo5teb@pqB4lB-XEoY`RkgVm45fVA;&=g{Z4 zQ2x-7_pkLW|L|*)IZVhLwM^ODvM9l!WDJk6ACVN!Hk|cTT>OYl72a97V&5d$J;4MZF|V^!6F_S|>Z-XcQo^8DmN(nZT6 zcM0`nr$YQ!-amJpY(*O7tU%I&XMKPG6Hw-Rfz(@f|+3FCFk4 z94Kc~ZcY_A%47oYmJoDE@NEtz=d9MD=2t_+MEc8tPH+Y|+v;)O_@hPU9iut50b;UcT89nbJ1pgQV8 za-}Oe5l|fbt;`STkgFOMT}8NjAB$hL@J$beY=C52#oC|2FobY$aPA^>9@xVutVIo- z?Nj2RI8Yr$xWNI1Eo;l~(CFynV#dIXVg|qg#+pdtz3HN5X*>x-dsuG!krBTz(VK&- zz(sapP#EKPGKA8X68*Yq!()!wX2W<6^a>{qFe{m0jHk~_bI;<*M8s*&xc_Wy3M!uf zw%XS}C6zE|is34-2mTKG09)JJzGq@~da%}5k?nl>dC_p+XfnlF{x2dn8f9Gr8sGJC zUGN;!lCCaX9WEOh{gnWLLu14ErBcK~(ER3Vi=ss?$2NDRY+V|9%~g03KS3R3E3^NJ za6ivzDPuLN!6|Z-_t5HNT-v94;PkcLHpFbYmY&CXjlk+_S)Azp@3-n@rV2meuuV|! zV6?T@+6AdsUEOrfG-pr{uML;zh!hfU#9xPOOaNZvcJV`!RGnGdE^k>!%~6&LyKG~Q z;G}Dwg2&6GJ|gXRoNirq>`rTtQ6>jdd1vB(tb9fo`kh5BK)TLqSOmfT1%xu$PjXdw zK8$LQ)M}I@hBh>AK6Q6hrac+-yGRdga4kl^_UO$CT03JXchmhaD53WF% z_Wqx2=KrDv5ZLl@U_}6`z^NN`g#0y$Mk9bc^%gHa(Tm8*D}GSc|S?gW4k)V2I4 zh~E}j#=;4;tL$b>!Tvrx?GrDN3uG?UYaGpLmXmGD&Kthr{wo3p+pX92b{m>~y@-n$ zBbbxK4{d6yUuY!AxjfZZq=+qLR@rsf_y)E85Y`D_o2n{(J{9Pq`s`y2X^V5iTh=Ez z(u`8F@0|hi=or8{22*GG6I#c6S?>Ia?}N6uBP{8AL<1LIoGSNnVIpL{Mn zKzNFbBo{=_D!0A4cJc4|@fBm?fbb#X5T(uZZ|}Qem;Y3#zc&l&|5hiqo3%HdQ+bU_ zwl|_OUYauO0YShr2wX##-r$J2E%znyxNbhH0k>_hFDei$%zn0rzGj`5G#u%iEWTBx zO9irv*Nh1ut~mlK_VT$hqVWU{(vO<)$Zo_rej1q4YYl{4`i|x(^A~)iO}RX( zYElS;GA@Dy;jX93>Hs*i(~Pg>w5k3zDDfi%fV#8l0ei-WR^i!QQl55lwd-50f~c!x z@q#Z6jE;D>53l)h08?^e!@Vr<;v3D&`n}*sY2q_G*U%*|Y!Y9XAVP_#lQ>F9yU6kE zXv;U09?G@ov0by`MFA6th~~Dq>A6RgXx5iO?z-9<^ah?7*3)@ zsu4|#Nd8nAs{wczit+c8Mm*B@L2@}w&TyU^(saogUk0{28*UOvmqkSWCA zjdR(V)h}s0wayqv?P`z!D~1Okw6B`L2E+r7!3+$(~I9Cr)`nhn(As&chPk8B+BS)ei8MEr+KNQ8ir zKwx;q?1}PXq~_Z5FJdr@kqr9%H2x5Jt#pw2f6u`tEXK%CX<8n|jZnM*XV!G(l5q5_ zO5Troi3_V`GG?hgVk;!wSIm8PpPZ_PS)5}$fyynwtATpYiYg_yaiCzP|H9){ zs*c~$zL&q!_P6VHPC3}9&WOf=aU zkguL#+<6?W{YN04NsAw+mbu~mIOD1BtozEB9Qr*T*Z$JsY{I=1W@1LAViA{%Z+P<) z+*rwW?cq{*>XUMnA};F`PT5KcoCdBs-hfub>HNOchCQG%-d-;fTImo|YsXDK3AEYi z00r#L<;5BGTXP*>p|rCPAA;s_2BFi`HaKJqPi{lH=gycyQ&M(Z*FJ|_7bh~_x>ORfR ziaS2Q-1~9(Wxr>iVq|T^t})J!Z}q!$sA{y4KSf3{BaG}Ce?_g-+H7C@sA0Din^jg4 z2r3zK^o`?Yp#Vy1zf1_7Y;(f6krn84@5q>nBjhhQr5E{z* z%J{(0`0b|YC$caG%-w@gRnG;#Pt>GVNPIWzIB#>@@Xo!Ws0kX{_wx1XZ@I}%HYskp ziFDKvkPXnZNTz3o?=_RWGWVFb5u4f)F<>~sB6?%~{L!1<9_pAnnQ31mst3p2Rbwwg`05!BlR2AA`zg^9utJyo2mj%H6R>jLiG$~B$9JwG)>0>k0v zLZa(J=6K*&vj4Pbs)}MI@Nmc*E9*5eVFY%q=CFT8z#R88`;>sL#5tjvPh*L+cn$Fj z6m9U9TP?5*7gVXh3yKvz9XZUJuQxW}>q->J;51|vhVq(WMzTx@Q9KjrNIV@yOSOzT zl9@)3bLEap)tTiJ_^bJIxB60UM*VE#Z?}AQ zNK{-re#J3+wL1Y$)dskPI!fi>3*9@iapTZY?4#Ms+v>U*GMn05mmct&CDZv)<&@A> zba^lT9b#jKs;gK{l@;hT4j=wX0Fp!5F*tWtyGi9AbwXq@q}?>uG8`gZ z-Vx{xBY=^;s(q0^5f2^cqG_3HE;P!Vz+dN{Z-sFOpj!XwbdH#j%bU6xm@rlU+7>^<=}0M~Gv<{RUj@%0&4~JV30YQ(988Wd> z3EciOvjcfRDr#n`H#@PY`KxwB@4yW=0Y}Ld^vf!wOP$5p+`gG`y(SO}LhWM_4%+3g rx;YyRQKvIE(f=s3fYigD4RS8=!I4L^iJr0Y7Wh$+QIRe|82bMYYG$}V literal 0 HcmV?d00001 diff --git a/img/one-for-all.png b/img/one-for-all.png new file mode 100644 index 0000000000000000000000000000000000000000..1d2456dafbf674cffdbb4ef3805ce269aeb3a4f0 GIT binary patch literal 52744 zcmZ_0Ra9JEv@VE43U`M9!QI`1ySuw0iz1N&;enzyavJ5JK2ml2Ig(@feRUHZnwgd8WKN14uJJ7sD6Y}uMRaVaf3JMwf zzb|O0>|X>>P-0MWU&S?jpigrVeYB+JM-gQYdm=Vtk>A5Etc?1ePr9>G`9c4Vs|F*;xLrVf+JcJE?JAPE@nJ>NHsS+5w$~zPAbXGSNhy4GR zn5k-i^|Jf9a7)CeG{aPb3!sPcww8S~AN50Ui>uB5_a)4}LTj;bEmqNL(zVwCB1Lx1 zWkTcB#oS~9o^S1{*GGnjS1h0Em%5)q22WRJ>iDLTsG@`)jCb|QPX(x#VzWG^aXk+V zxq`7Q{DgM%R3J9x|arbWq=MOsc+KnmiF-__!hbz7^}3^@!>isyL}DO^1n*bUhGOb=Rpg zq@OQW$#ClnU^U=zNeb+GP*|^=S1*q88Bek@Erw1&rPKIH`_ncEJvZ5bh&%+dISZcH^4LFrP_{lglG5zi~6s%XXoq2pS_lS@$GV-<&({~$0Pflkk4_x}cF zygrFDDK+IUR@EG*#~hxKjqhIB8$wG;>5}@*yiSGxx1Je&6v@n1Pgs-FX9I7s^r z4B@%2N^#fc`Vs)TVAx=A5;Wd)R5$>#o!& zA4^CXomiY&+w0E@PiFS#88@Np`vT|-t#o<(@c8tT%wE3ty~3X{tn2#%?^3m5>8ngwml*!hB`oT~u16CU9%>24XS=={--{5#)q=?Gk9)T9 z4Aw3FV$imLdj{6V`r95ol*;Y2SIEqG3a!G-i@$96J9VjJj>I@^BG`F7dcJ9|)Cj9h zsD3g_M8|y}gey95n(L-;nf*eHfTJ38m^KJL>v`Bp1G=CKI5aJ7WS&ZuIX2-7?-sn? z+(Yx%dpbRN4$0E%w^V2eXyOS55g>%fm&83t~Oc4{;k$gAwsX9OB2Lb+N(`v zB{d{w>@)0856WaS&gSa;Vw@#bI1Bves!pR`mg!GP(Py!OKHBg>OylyCfa#1zGwnxr zC9$Jid^R%KI*nHIQc@CmAdT?R7Tn z#C|g7{ud6DRRWTKH^D-9%m)Wusv>?58jQB=8g8Urm&1C}_VG>7%120H8f=}e+Y%1U zvtFL4^qZeeI#W#U7t!e3%fbTQff?#0vPqB1g&LmoN^=E4YEOj>?WXZ)Tf*qKsT~S7 z^35s}3Ux(xoGamID)k`55%)k9Jvqy=nu5JWIHoR{t_|M&BU^ChLXJVtL84eAXze%H>tqu`gJYujC9O86G>PG_Cg^F5fSA(t zUUp)+k+eUz(+sN2u9b~p!WOJcwx^!XxacK1QPfPPOzz5Kf^U&TlGfVRZM9D`^!SDsT@}S}2;U-#q=*+-s(fc~BPevE4*{nWFtRXoJ4winRXwtnx5DmCK61jfc zb1B_GU*O;>X0JZ*8*giCSA0CTq1Ud_?QTMz=0ier045ag*@K5O8QNtM$kL|U3|VP& zP7`m=>GRf4DtoWKp6%r`_B2ECqJSD914}Z3PQY-+xMKHTETDd1Zb3z9r%mKPiWjNTGQ&c_a&< z8tw7hqQYInn@J2)Ky}}nWZ`A1q~*j{N%7wFAB~qEG}Nl3F~J{5fB~)J_fbPklpp#JlY(Kw*gxfyZ$G<@6K3y6)LfS)G<#luj+jL7uu{ z_y+@)+DGR4Ms?Q)!grxVL;Ji!6)PRdVZztQdzs2iUYESrX6I7PF3vz2P_$aB>Yb2! zSs*ka&ud@~qR`LRej+Q^0A}{8$OCS+LU|TGVAyTfMPEvGPar3Fr__FdICkP$=_E4L zz5$&^YmtGcXC~^{cbUF}jl{|?#1V2Da=2_8+TCjF<+r$fJwKg0+~Byr?{d*^CV{7@wdJq6u#ajH1|X8>LaT(89SNDU6*> z$YxctyG^v+@nqMMf0oc`KddA-*-yBi$YKelpzCTFpEji9?1y~_I8_s9WKD?4e^AuR z*V4wkDE?z8wHMWv7zyB&{_VU$B-&Bfw6v{QiL=Shr@9Ht3X{MD57H6H_*Qm+ew^A{ z6S7iiVc(rx-Nf^YjgUva4&RiG9O^sx(+bfu(OqgRr5-H5BNr&Agt=s?qk@I)MC) z?$PrW)o7=s>EN2oTk_W!Ze+N5hA2`|MP%ym3%y}Ng5oLQ%K1i@)&5+Xo!767?9~G% z913by7CKJr$sY%HdsE_iB%A7z!?QusxQ6@Um&Gd{*2MK!9dFuK$+b72$t`PL-*`tI zeg(Y&>WH*t6V87DpV=FJJ&3Urehm8XyYI;g&1)oL3zlUN zEJ)lCouPWDt&{S(iUqy8h5ho*Cs)+$#7W_HSQwTM0s3 zeP9yCy@vGLW-C2)k@@+k^(ZXK0^ooBx&IFoaVW(gEqc8pshzn`q?*v`bs$fV={#kN zUVGRfpu>ZGR}rZY$VmjfWO8cd0xdd{3AmU0g0^~lu3(qOyxA)E74+=a4ai9m->&7!k_Ma~Sxt4tT2$_Xq_ zw=XroNwk|U8)ocaaHNk#j)(a~U(k*Dj^r{lX9^61SFiDd4o!HXN_8+;zLs8m^Ybkp ztPrgr<+Jm9qBuhnwd|XlPjXXlXbQIGl5d%0)+krvk0PgxB3*<*#P4N^X27}Raz!vP z=<=%F6PyVKz-uEBPZG(pDFs!js^Ud)o3y#QmLcM4Zem`ncg>eC27IbaZqrm=X*&?> zM%x*-9I%nR3tpz{0NvVJq7h)uKT@yl-5IP&L&j{6gDGJ|Q--~_^hyQ30y^+8Da;Z> z$IXF!^kT0YZoaNnaa=U02EH#ZT##ADe5T7`TM60+3-K|6momWnbeW=~=yi!=(uh(b zo=oVOl~WxA7KkW}l}QU`h*Cqij8*GQ3r5H(w5-&AvN|8&PbaUs-hMQ*wNP{`6Z+|u zu-T*J{*qFjrPuuSf;KX<7v{Ea6JH+5vZ=1YIZTxaCNZ4FpUk1n|HTLHi>}9xQkFaK z?$|*z>duTn{0_@9C^5b;@&&<=O(;I_wSNcBds4vT+McIw_EqMAb(v?E~) znTCm-rV88F8fo>}sIGKApY@nI7wxdFe%J3b)r@2zkV2vis7*Fl4c-a8OMZ&kPdhw3 zd`xokp#olws7kJ^Jb&4X5*YmLFvAi}cP`49vOY{@_3{38Ku5m10@;tkHNL(?LQ*!< z@nnLONh0p{=>!XuHwTH>>^3jyV}94v!Dlzr_wikLA~;YPa6agW-Q~8SMOe+F!Sc?N zbt&rYvV|VkU$$I#{NGQ8;Rw96`e9gb>t4JrGP+AWo@+~d<)uK=!h%laeO`d1fFmuVpKyy9M`a^-dkzwEjp{=;lR^L{V#xh$(Dm1rpJPRbO%-7$X?#PDXg zcNFnN!p+}1gItdOSnkK|d!w~ihNzqL_|$NJw0zEGMZ*4H$c1vy`wlJah1FowXhqA?oJMrhL6Oft?`OAhW4Y7S)Bq1BQqVb>a6MP4-J>*Qrzq&{xyfl7TAb{e>Z^(n z1M0Cc0jtIn8JA1&9{vxz?~%#K+jhDzx%6kZ^GUHAyM1b@1cSFR1RiixDI689t&wh` z6e^(+?cw7%%({x^UN0`L)Qg!FI1D74Gz=8zOyoMM@c9fmYHDikwvsgo9#1CjQM@Da zf083-2yZo@U;E0}_2|^a_6tB}mX@b1A5&E$M8*aUmwNk>SE!48hDc5TI+NnslZ9&9 zwKCFMgUDi9^7Kybll;MXuHZhaV=Od#KtXsagMs{Sr3Yb_1V<5;DBfX^??J8&b92Kq zr7g+d2nES)NbyAe!DzvjifhvAFvMMHh%Q8oaaADNbg?8NJ? z*gE~9ej~d$s;4+K>hlBQ_CgKVC4Mlgu_J!t0w&TfyDpudQ<8Xiq-VIyCG72bZf7w~%IY+1-PhZfzBwcpN+i0LF5JqiK59Y8-2uFHA4@>zZ3tKt*wlO3T% z&Rm@ry_%RWdL|V4P*PITeaOd#@0wB5c?xWk*?Mnkviz0Ea-Q#!K*w9UU_>uX40B8! zLwn4O>vpJF=^n%2A3pE%^||e9#{G*RL3KD~eKJ!#Z@CXrccrM4Q-=_F@##ocXdQi9 zJkX5&8a89f;8&}w>XEO*qs<-vp)`S(9*(?1yG=Cj!nlUG1=KjY1ekokR3->s`ddde zv30pIIa+Dq*0k=4B-nC9@WFh@E6&X4P0>PMASCxh;dY?R3u@x^9vd(2HOlF->_@W; zvO^xv7Y!B~8BZv1v(*q3a*$cGw=sSql$XjA@-4Yp;03Mw^FqkVIP^_3T(-LrZuL^f zZ(jT~6XfYkD|K3{=S==8?j?VcBYZA?DqDNIBS%o{_i zcQ0mH?z{F!tF6_Vwffv5fm14Om0QxCoPZ+yLFsXLO8^(^yu)Tq2Z`P{`J&*v4Z7>9 z2X6(x-KMW7KJ&BfPOXf#q-JQ0At zr=(cG^t@ror;#DnuNLxK3e7$Wxu%h&O@IGbgj99qZ4`5b|DeKfv3tL7kIhFT&W^-2 z!W~{7A0EfVRMD5H`#Wr8k=fw)xe{@WtbmPU6ursOd>5c0U^)ub~JK`_)IvVVYcP%YQfj7Hr&sHe24vtf(QkQ`GF;$}z)Y594inKsd5i z)a>Vid@_G{W-E7~nixG4St@Oq3Yu#%)ud1Gsn9UPZY3H6R z9tclkE3|b7G6U^2;Q0|0OB?r=5oqLnm-y4Wf$J{b+IejeyB~(X+JcZ8!~fveBx2-u ze@G7xz-fp9$a6haoWdWZ4sSEn;-K{$q#f98p~QvYryAm@v(5De`X8;lk&WN$m_1ca zG3Z^rE1P}NSKnTa$8K`ICCi#MFFhq>*1l-J=zS@Hu;Ez5#KiS`$q72s>gleZ_>H+J z3>m5o@W7s7J=e>{2?S-DC8}zKFYBK4s?=>bbxE{FU(CM1YlMFYy^_;wRi#=5cw(7_ zgb)-!JEB((vmzx{U2J;8ugY z!-JCEAoI+QmeicokBwl`q)Oh}bYhoowc~|aeAXx+vHSAXpl1@dt+7Neh4&ql4rr^N z7?IQHk3sSNYj&&Ub3xx1>lZotAT^QkBu=Z|-}lwR92f6O>I_Z43^)G)p57j|YAZQ5 zbASY8SDWu-LQ|*}kN6@vJWk02k;pH+DJW%iK7Vy{x9EAaM8)2<>U5whT6ToMWzt+%(h>x2kBP@?zT$ddUra51T8JW9(>=<1~xL6p(qk-mgX5 zSr7&Lo2uOtUJtKAVo;AT&g{kk=O%2^yvWv)(>OqugyY>(Gc z{5oxQ)CFfe>Z+cGW+o}(=kV=6Nw7n7e?unNdr;itKKqcGRkw#XhVpAs@*(=Ok^Cx} zBy3hia~7bHI{BCMANOZv@wUofao&BnZ)>;ElxFDpN9qbBDFOL255QLpC|c~;3Q~zC zxM~o2G;aM`n@gLnQ-B+`9;bK8>(#yfcphiY9;?eQy)WTf*7JCGu{<;tf~9I_3VY8d z0&DR|oDL{3e(V4de$@(6m&vbCmgd^VK^1%&aYqG!t3t^r%a^L2CYNDMdhEhGXyvMe2YSh4)9i<%IkVHBMc6DsRh8UyywCtqS8Q+~Z@Eb0>X zNKli-RZteMZRu%V@Z_a1=PbU3PvKRD+w%}1fA@R)sX>sUHtHI4uuzwla_uB_ZO-o$ z1tQ)w4l9ssv-Q+hT*G0)H{r)D2Aufvr}`@hTrx}Rgp`r8xw*Mx*W3YG3)BZyh|Evi z;^%zbd`bFR^~xAS)#~_E27PIO96NvKb}Av15KZgEVg}K^Jy9_1)Xe6`MG4WP zOrcS`Yaw?y!htchz^_MRUAhhc5do>Vpeuu|n(GsvF0Edz{Wm>!yYSb$O@#40!JIuN z5{iz=B;943`z?c)VPn0^WovrRf)>NzZ#EOlrmk$l7j!Wk!f=e>VnzMBp*gQ5iEx+} zc0LX9>SC{|Q>;PPbY^3gsHMDAD{|+Q_T6&oCR49*4N49*N-%1`t}az7IRs;RdTq|2r;XfZWM{i$q-RHJkR{u(qY+VjLmh1a~4KpCcM-xuQ>m^iG^s?^j1WmJB4 zD05B`-7;b|2o|x7aE?1Q@co;qH4OSKhiEmw9DY4|V_>=VNTp7pg6fcG*yD$qgD}me z3B1!MvW=Rh?VC^bRx#Pt;Mg#nd^sP-}tEDn29JbenJ-v>sL+uC0Ln>y2-!0nAH6F!-<*%yy4ZBgFDl|veDbL( z{W(wa)K{w8HGC(7VP0z`o?_ICSACOrU7e%z0#$iF0>rd~e`~xyS+<>KnR$VAKdT>m zQroRJM00gKCi^G|YcYTmehtp|XX+#O&!m=Hk-|#|9vF3bjVFk{(@{ysCDLn@jX~N1 ztH;4~>l+yY$Ds+ly6U=G+12jo#3*^NBceQ54sMSLvAyur5YdFMd`I-rX|5#tEnxvF zlS=uA6haEnYbH;)Ha{Jd#<<3Xo3LFJY{>~H`ci6e{rx-vpf-7-YMVeRXiaz*3X5n7 z!LAYkmtb*XD%&>46-s3MHx`37C+-+$v~g@)df$n!@`k5&(cA_tBrAS~lua`79qRl0 zJIKU(oY4IUvlvw*?H4qQ(08~t&EL$i=qENk@RCX1#tph@SftmaJ8@*0`3l&GB&$1L}`hD|B; zIz#^0jv?1kZfP!DZx9!3B+s@sn)I(Ud^q(DOz0R5(o5KKh+y~|a%57ncPF8-ykd)V zeyhH+YOqNl5pb8~aam>Jj^j{3L^mulg$xY^I!GS9{twQv4>7omtN!*?abhz4U5Q?R z8l$d7zrQGZ)5v_1c$^ljJ{CCcpjm#d$wcoXqIlL+Y-seo-o4(~| zCFRW2AELEFs5QW)`?EEhw9h`+F%{RGu4V372ZvOY&AiU*bg}rX>B_kR!;Hk2^p6G- zdEc?Fr+BxJnqG?&M4AFc^R%)~e6l}Q{okJN@DhgLR+IHMR7BtYqP=eg+)TM&^gOIG zpSKcSqlLB zZbys`mD6c#8H?Y1K~=Dc&m2K84MF(qisqxag$NZAwW15ho~t3&7VyX2|4pK~&$%!= zvPKdau8cz%qK;+3OfjlLQ-e|Vlx1hRY$@U2_A^ZOK#cV*F!(_Kc_m)GQpzM5&ND1s z7!R-lA(l`06W#eTO1I-mllI}YVpNfi{(7O%=O0H%)43u#*}N_+QjB;%b;$Zj6_wn| zi^oCB{rpF6eCt;FDYRLCnA_GV``(|cL#7~P=h{6AoN>YOXYKcPr!}6Vt-e0bqq*Vi zTOcuz1F>DAAx!j9pyijkf)b3^UKxx zPfL2Ss2@Fzgk0^r@q(wJRx$){53w^-Aeuw=xXY!VZeX?M0tr- zN`8JZ=?L%&A*=x+^Y@pS|2%27nfXojjeUhmr9qA#W_t~}ZB=MD|E2GOEjP~+wK#eN8mV3wOS9rvO7@e~V;JoI& zL`oEypX~eVjmiY_x5b|ibduUphljRVB7vP7`rn&WRgT5;VLIV%xHlL^wFh2jSvESq zr699*tA0l%rjd+7YYm9J5koS?WWwZHbPvTxYxr>}8vZ1OAF+2yD@LIX!wjcm;^fI74faD#!s8p7 z5>i3I{vcuFCm>@d<;NSC$rm*t3x2)wvC2G1JK~2v@%OQ~7mxbac~Y06GDSzGn5(ud z`18^U@($C#*_$e^Cc&z(F_SjVuW-1I{&1IA4;6M5bYAB>nk@_;&cOF(u+>_IC_9S` zKsq%x{)X|JI*SAU?ow_k0UTB$(cnq6fa`HuJXlWvkQOATjevtZOrI-cJ=>0OkCP+N zIT`GFe1;L`B4IwDgNyD{jP~w^#)7%SHvuNx|EVVzWjaN?pD_IrN65)^@p3)ETFXh& zFlT1AmWp~E*bUr2-mA-0Ju9;a;5ZiM=6tO5D)QZ=m=RcPeQ!-hX@H)L$0 z=g>k{egXqEis+Yg1*v~3izb3MX%B-Sl-G5Nu-`d#c2r82m6hoF? zUvFmcOi=EP0`C^wXM`VyELeYzzr4C>xksmwhtb!g{4=~N(=9eO4$8!Eh+$0|^!$Pw z3`R8!0v#)`_E_TT_z-4B)K`i~Y^m-uECHzy z`O4}W{qF32&pQrI1>c>r`elVrTbRY_CD1cL;5yFva}ce)VscN)+vS`ZxhCkrS&f6dmCb)(;ivcXr-&@jE7bG2Cn56Eq5R z3V6vRc%!a$@g<8;(!z>wh^-02GmSCUcbmJxp7$r3qoB|X>6(A^w8Q#$KfJ$Oti3IA z^_d>e3P0_o9B?6P1jDeJ z$~;kZqvRn3!?_s$3X{743K+hjrvr=4Qy;dQFAdKy2JrPpd3-KS1w9UCp63e#A2z&a z#$h%Stm1j$cDWbj%s&-{-giOlB1{*9o(Wm4kTX}t#l{x}GTLisYvJ`H1}HvJJgVi% z{vtrX-8}H!W7E~(SM;B|(%>gid}W&;W{8GiOW7<}A+H|lc|NZ8(k4A=t?zr+*GKpf zrtn9tKk0?8rbSeDKKj#e_BD3{Y;S7Rl}LB=ho6#fHogUDUbFEtdETJkY0twZ6(Rns zEOa_nAhblnGf<8dRtVOfzX&&!Mj}*M}%a{12YDx-$1z zUT{YZ^t-nsY{LC;V>q@UtMDJH{emf7jw>7vSb#x`+SS|-?zM12OD!lD0e8eQ&mrZ; zbPR7(6`Msm&h}kZL+U8Oylp)=mc~IN?bhdy>xC>sAu8oeHgOSdJcODk2}0<6=H_+} zKHJ&Pj9(#2u2-yvPw@^8lY5AkxpZc92fE2upx>WN^WvH+4sjGwxLBfU_nbu5pt}W~ zMN8c5f?w^4JOCdXcWnKJig7a7x=^hC$?04{CtB2%3O-n!=qt(GKVads!Aq`9cm)!< zE6Ij(pTQG`6ImdAAQb+bG<2_rz{aF1hY!q_xiv2atd-TjEbf6ZvD`oKqS048!=E2~ z5U@`_Uiv<;6&qSV%5hD9#ikVs5{4%m==Q$YydFSim4!@WgQyl++9s>z5bwfVfjI1^y*q-c!!TyFvwmN< zsQ!f)p#%-&-_Ewn4TZyL03GvT(gRbpQ{b!a!+6;P9V>VtvZfTOy+)@#@7wCY=9njj zOZ)cskQ1m--g$%X(x3t7>%egskX0 z63j5Zxcc1y3gJmMX+OQGTN6R`o41)|jN< zWUJ2Q_b6*EGW|Cg*`gMb3#IB<7|0em^ho5R{$6(QdLSk)9mM0jzPy2-$Ibl7EN^BmY=X6K z{Teuw_bJT*!sXI9a*+~3Yl6U0!qc2jVxS!(`GsZS@+lRo6TI7WlLW#;r1)_3y{-`QWF$ zM9b8GuJ+M1u4ufL3S>z@ z#mkorqi`wEu%9+4&@d1fDD7*m5BisfDh#08{Gl}x#w@&aVyYf)E^}4FzI3O$S$<&~ z_lX&eA_{{dkliD33SD>XijhiRe*+C|5Mk*kaI`Z4uOKiKHcT31GvL+$c;LeT%2r;hh4UYPmJ)*evfZ7_qJ9WauqOm4 zoAzg!tGJcr3iV8Lk`VQ$na{-XPv7O4D`(~PoKR5g;AkdsebVIX|~0PBpXNdsS2`>N?{rNax*1iebfVhu)w?# zhS-$B9LP)07C-6*=7UGI&1F@JGLUnyUL zRAVY#!Q;({ZlS4!sP!blODBGgq3ow9`hCKf2T9{`Dcv16)KL;2HYFw~N;KSpq1OW= zv>p{jRZ~odC^TTzhp%)N`2hvUkS8V&X%Xj#xbA zrgt{|R(q>`ivfM=p>&Y3>%aX3;~M(Sej>hkUG{Rff$m3@Y#pO-X&b#K;pif=raK_*O;BJU=mE_gMVqz7A_)@Bnp?^?Q z(}v!PaV^h6MGhh3< za-+JFb2dj*#ZP|f;YT0H{IbT-Y_}(i|HMFT_$v`6Kkz;aeB29RJU+SP-fYG4Iv)l{ zFdI`6ewJfB*B$*nkFQv2Q}r|N&cz2LO3286NNBU`Uf!a`R*-br#Ty3Q2ve^Vf`x$96r2j-k?BGYyXYb(Utb* z+$ey>$|BgTTZ2<~SK?3&DsPB|0ayZ_1E63uZs2zFq)Nz6@9m7o=~YLO2QA0T!`X)e zQ?m%IiD4r;AV!n_)_Vz59+_TjEdHh`rZplu8P0EJU>wNlX-CELk;lpUPVZ*ww=~cc zW|W8Au`*O61CLq%F`a;g`+wpZp(yGhKw~B-g5@inOfI|R8iO`vzx&gyB3|3zG+@Tj z_jHgmUHIIOBY^)l2NwQVC?+}7Fx?j`FUXGLq`O>m=^Ky4H>UgZ=TBBr?gUi`omkg@ zc3E2-L)p!;{Bkk|t>`<7 zPuu99JY=8>^kVbCg>L6=eWIYZv*6ls{jFREv@4VT<@S!l2~u#nU}cM>I}L9osPiNq zCQB~#Co%78Dx9bi|!lcd^JR1jJHcU_&KfwItSxlFJy3NCZ2m~PDIIm0F@bmZIJ@xok#nPk4@4>l%)-!&?8?$HpJ(;>zjp=BWI^u#MJB^A|~I%aTb z{Za4b05V75vu7J>I@Xtd^ToJtXR8bK?kLT7_6t={t^cO7xtrtP-*56ikc?%FkVpWZ z4I0Ic|5m1$AZxX|n%Ql1wwm00z6yA{UI}nzItnWm`>YlMeGR$B+Px6eC&r+i4Z_0u z2g(E#d_m~?v()9uCqs#7W>u-~*JQ z5j0hik_LPAwYP61I^js+VNzjg%tpD0hKYukBKymsIN$v5PaCHTIEq_NS0=?S_Qq2< zsRVNnW6m$P2WwdqOxG2JzF51n<05&%<8whwwSR(`lz~$%7fT0(?8{;NMh%%AT9xWj z%jyXAUDI%X4_sY8Jx9av9sB>wve~Py@4ePQV_9iYhZ@EkrBenfG48M5R2D+zo{peM>@g5|KBT&!?#s)d*BJ5Z9mVcg1A*#G?)7#Y5hL4nCjnX8J`83?Y=dQa zzzVQ>Mxv@aHc!mU1zn;R&f#2XEQIXi4Z4DxP>dFp*axN6hW`}uy?&(f&9KH(4Jk#> zytVo#z-p59ul&0qzsJAS-oTgtN#j6}tKt;nIiXCbr&Ck*K$9gsRogh3_ZYFI0#>CPkkAk-b% z^=a^hMc4PRvYynnS^Vu}&~f&AjidPI{)YEfD+(Pa0lBff*+0ku2TJYiP~=dCCg{)R zcG#O!V7=>r4TJRSoE0s+asQAw^Vkxc4rGjnYxGE-^w<1{642N2YL;gAr(rIrNV=d;D#crO-?go+~a4(Mfebep%!vx+9VGgws$z3Eew+sWpX!!l!4IuB zHROCmLI5;eaN-F=Izf0t*%Z7Cbg(LvL&n)!n}LUVTlXM2jtH-kz&wPAOn0d37^!;@ zd2qJrI-Z2acj+VMXhHWMnTJjqG*J)NN|q(D7u>11t~mz}3bWPA`L=XGjZ zc4K&i9P`YAx@d2v9Zwk5rf!_$8q|xWQN^#mQQXq?i#+_#cy}RkYky$B zN0-PBm^~09x7fNy7ky>3h<>^{D^PcTla11(o-$T*L?1N%okhx~GL|pe=i!&M5`kXu zN&pUNv>XvbEK`TbN~sXKADL7ip9EgL2p~C! z-aCeH=BdH&Pov(^ov9@7SgIjeme$pYp?akKudPeQjF2-V{=d*tbu6~m)2=sEv)eD0 z4h%yaR|BNd!XAjOkxq?fL`MmTx;f7LJ^}#BU^+Y6BoT(Lv_ovEaw#-Q>v@4$;+Gg> zu+`sdv|TBvUVd_7L{s88u8~GSp}ZQy3w_Ur@FqbswA_SDx()r#grfxo$i0P7;hH7^ zSCI|x{EU*l0Z%Xzz(@qGYVFptHAmLH``M`gO|bYTeS4OQx5?h2wR@nrREG4j8Wf^Z zhDlII90B`4-6*OcOgjJ(4xUCOkI5wPE-l~^h?v!Bq$;K<5-Cyf^4_37rrn;mNU{(OTG z!mhV)n>cTF`$DB221_A5W*PqaFEKanxBOz#IQW2qT}vVPefi9IAOoOha^uqbB%SB8 z6WyTdYG!)+L41gY&_srtWdNhNw36}!fl%%MXUY;*%?j1B%zN78Z?eW=Tc~70d|yt| zljJE9AQwZb9Bdu&<0CL*W!}K0M+-(AXqD)=++ZeBk>-+ONr8fkDDTgmZMlo5^(B*q&p?`h;3}cFa_Qz6?{I*^!E*%Q- zI*yZiubVBASbP1+8a8UOjNTw`f=DS@xjhrCNOb+bHGF;zdvYeiI4m!J4_8Kh5$(cQ z$APkNWNe1GcS5R=v~3_FX8SOKa+sm4F#K|QbngNx?>Yv?$um8_2!iY53Pg?ntMdgr zf4$izVdL#{vzOu6P*q0_s{&U31hQgZdL{Q{kwUBS^r*0dN780W7O3#l+A6f-g@7)=W6}-5@7>Eoh zhQbbObyz|}qP2tOPnq~}2vOgd0-tY_CRn=a323+Lnm>r)o%r5gQ6HwvF5&Tel(*!K&A`rEKjP(EV-x4$=Gl0 zH3?twy1;Z`It*PLF*2}Eq6>~pCQAel{N=dEe{uk1T)@1K^A)#fd_b{prfX(6-(on`A z%JC@z!XP11-FZ9=y=urfo^=UKpb`z|Hs6W)L1^4~Oo$G0TH=Tf_LV%0rb=~nW&|;yr25kU<^Jl+jWT@*1D@^@1XJ(7h zc-(FxME6HX&hHEgl~kg;1Aq*3r67Nw$?N1)ZzTbc7>$cO>p{?bRp9?wSd5?iQXfV(JzZDr4~q<#k6nj zR+FcYve52JnW`&mp#<=Bw{0uPVQD;rK}+5Qy@DVg@lo_}xzR!_Z9*rK|J!k@5vN=Z ze04n`s7|*;&e;SY7;r=X+9nY zo8xwFItWE+(Au7+fAOJDY4k|<`z7Q%$FRSt3|HwVh?N}+Fn%Miy3$Y)wrX5#bJ+^1 zp$vk)91wI`Z3$sAslWqG^zl8A>D$!#3a)`>xc>c@7-y}vczfqwypG@?U+(g*@4j0! z@anICxEb{K3x4!0OI8ZI?R@PH^M@eAE)xj9?XYXJ?UtzraVh?1ZiM8Lmz1^lrgPWK zF|BlHS-GYeC_{LUrDp})mC)<_*(*V{Aevt0o{?rfTso>y-4o#Na$QGP*QuAdw%2YM z;lmSKqaUA$)5+|`<^E&yh&AvTOXxUFxIqk3yeTU~* zT~Xh7Rcz64byYcI;)_1Hd51`aj=r~TOOheF%9-}h7tMaZaRUP8oJO2WqEl1})Bd}M z(@qG|S3^CDuANd_ig;OMG2Ajld8v>2d6i>yX?S`l@aIz;A)p`leW4}z__d)d`P+|d z?})GEu2B0@@gtX2e)3g@RGV(S1q+zvjQNV3l$0bR^E(t;1G0z({r4P3edU9#DzSt zb=xi+GGDg~gVTG52Y4ePPiMW@|KmEKL!Dos)hW7sz6eKy?HDMFkJ~P|wEkd-YtD#O z7t%eQ!WZ^3Y1fH#wKnH^ZMaX6?F#xLF&ppCtZo7R!68vd#!Pn&Hz`KqTaw1&9n2*X zh>Sba0jF>+2rrQj#9`!4gX({y(YllnLFnekN7o7A&X?y_!I`qie`LZ`VMLNptpa!1 zK|vdv=T9&@n-Ri0AHR&ty9co34&zsswwn+`-~ECw>Fc9h%H368sQBmf4txpX$w0Zo5#LrPxdmb0Ld`*Dgo|NZzGud-xa7Ohw~m=5t@5bFBW#zf+aG0+*el-c!CdL1E)P+uMqGj(6sT`RrzvD;O9t zpilfCkf1vRX^qc^%6=#l&Hd{;{}1Id^1f29wm;UlTE(i-dF6FYK3D_WW$8QCUW#Y$ zXFf%eL=D)#_0X*2w3N4nMxPpRK4a3*WU2S6x-^v7Pv-u(($>il4FMaw-e`!u+I}yH zzIxfY2|CPR44XP7b96n4@!?K+y_Otk!4MvWqg%dfTyGn4B;=tfzdP!j;s>NpU`%)9 zchd*0?KkdpHJ3pT7!QiC^?MHLO>O{Qzw&iA=&rI z)zPuM51(EB{=o$coFaj?orb`1(`a^7XMS10D%%fQ!aDpi{hqvb)T5{IpPXZ?q1$dd zwmxg0;>W)?Oo?wwRYf5qn-20`=KO2EXYG&KY7Ewok74D8?b<{5BUX6791}!1(jEDH z^&7~4CobP1hLQ6PBS1>o;61~gsAvpE$GDw)u^%&Js^(k2D;uIhEm^j zz8yN?8Z3qd^Nl577hm;m?`%`&Q70=-HgbE_r5V2EYGXOyGL0D7OhgQIQMe)9M}ySg z8CO^w55$nVxBkJtf0hy1LM!9}XTo_HS;=DcoB4{X@IwzcoY2Q73HlTB8@32chRhc( za=j$Zm;d-nC1{v-+x=e2E>rjkZn6Ked4Uj3nnrkxI+obykvH}Y*1o{O#YCP~w@@{h zeEg|WC*MKRq6@QtB%+(==VtV9QMdJ^H`6M zy+IDeF})2Y7%sY&Xxi6DE3^gj(y6h)_s#HxFR;6JVWz(TG4wdQ-5=L~_!fVRjfw*K zoixh?W&<=TneZu$hHVmu6;kzLe+GT1*hW)c!r6Id+{{;-3WP26A1BI z6^(-e{*n^BN#Dn5iLy@U|IOfc*TGAxo!H)J9Ha$2U18nDkBYw-vGwT_w4t&Af?@`0 z22u0;nU+1}e|;HZHNwaqkcTV z#$&#ebE51CP}5vD-El_`8rxZ@&2BI_X!S*>Bd}s99JJ5l^A99M%r|;|aM(?;_WPO^ zY)93RUeOzWlh-$f!T9Y%;_QVu$I9;~P}k9orAML(ACImLTA?Be&&j*2KjR}y(>q=@ z(AfD@+(~=R(__g1xzOT1Unr+bxutVdkL4?aR5KO&M(}IGAHMC9< zW_h05X_!#kwoWuRbKMGYl9Pq{KDY0bg5Ra|@aM zN}X+goWs@D>u-El^F>O>#`AOfzMMdT*Sd>uj)W@)*=>tI|B+GZ@=^2*y<-ZH=ymPP z@vapFmg$mMWbEJ9Ah4E#!0NLmN>IGY8#-*eGW(c2PKiC(7PVz%{Tl#;anHVgzUh8= z8Ke3AjR8782tdj_fi5MpGU^p@dur29nv$vijuvW_kh9`oCarQ-(7wD~pq?iRI3o&X zbqIUncz)9FAPAa(suHKcS;JqS@j11k>*FCi= z*)lPrnRId2cC~n zH@jK}^>3xY`y-z@{RGJ4S6Z#eD2>K{=gH?>5&px3W?^~{_)O~ zAI$@m$7zwOarJwJCKM>U3ZT)%N9t!?{J?|gwssn2Vo56yfY|(Hw zVn~b^wq447&kHj<{#R}k;I&3EfUIVjD^bLvH|uEq*Fyn7h*%BS`8ZC}^Q8$D5vmee z+HiD1%$aJ@H0v_ko<1pki)(KUs94FRrS5;_BC=FH*Da74N6eE+rB3zmR<3+8VwhvsGXs#as)i( z8MR^!tB9~bN;I@(QMSbH7i>&wmgc{D0m_6IgH+!_gx@GH^4B#h=5AsQj=(_bT_fKM zsWSuvq${)BS(J@K64a=~W%zA|;c6yU!qX?jH+F`9mKq z?y-G3W0GinZxUp(eb|jBXHOL#PUYIOvkps%eLI;C0DJY%h}pA0g~ELKrbKr?8c9fP zvf}q%f3~{B?$lM#ASKZV2)9X0Y%WoEYfqZ^r5c4p89@w)oU^rZx|MP&QVhDP=WVs?c859oXcJOu4B{JJ^H7_8M@;2tCr+&px48>z#}*Aj5PgqZ_WoSj=6WkmB)R*^BpLreGHs#$2so;3&j4KX zZ!~d?PV~di0!NaG9QsG-WtVd-Xpo7M64LpdWsyy>FnH4V6-9sI@~&4ChFK=E7_|;6 zR9(jDWO1ZNQo*BUB14i|PdhY-IGG&0H$CO_F>PBzmT<^NxKYM%3+#|(QI7Lxw=Qeh zSCgl2jr(sx#X}O7T6{E061IMh*e7ay(_r^2yE^NKMwNLuUZ{(ST$Y#XidL_ z%tM*&_fm;!8#MWx7o)SFKeOmd_Ov^F(tTyp{4^7Sw~4KwCF-sbzxB z&qL-XBIq~2E+Lk&PvdJ6J4HOHwh23JUM-I-?O4-{}+kQ-W zlN9_amhd8m-G|_7?aqx{VyJ@Ehc6?PRTxdu_kupDYF`Y}^)j8fjY#cua;?B@b^+Os z5EdhQ0r6F@Kd%lC`h$4%oevBLqkDFeSNyJh$*V=JEZq5;(mO`2 zvq_DS!2f;w-BMi{buIoS2x9E3SE!P0c_`D8Uv(!25ydQ*>g|4Ob&dL00O=>#&9xM3 zR_V)Llh4~6;O++e10vH3tB$D?d_2q9?sWgD{P*D(pY7O7bj5nYbc_*8 z0)@+pDR?^>CWB&kffH100>;jN7A}ZxieHDbMI$mCx7G2kf&nhu}6VczG{D0-OjLk z2gVSs9sVfFfI6hnk!58#TfQ+=fv}1b_Q@hWJ@ub81d3GJ?K%m!QQ(KH!lEW%u779$ zA>=p-^mo~4=FqTDz};b4fZ_K8sn>!uNF)HD5ZOOOx?-A#Tw7e_ONwaTPZP58+p z|FUJ*d3r6~Ig{NAxj5~`kYQ0Q?>C?OeVfs}3_||D8%OtVMtEZT%6WcY zu@A(81FT0AU!(o7?FxR?A2KLFAN%*NWf|R&6^iTzvKSe!1ds7>*U=2^kJc)t_SEH{$^-K9C^z!(@nze5qwU(~MM&wAZb_beV@jHg6W>gCQvMkrk(tL}43^IY`hfdcKhLj^6Jx!fEKp~wEoAPFh6T2hmxu~dnfTl+ zA%RuMzd-KHKw4$*Kswir3r@AmvQwVNX5I6WSuty22tMtoP3Ig5GQ?Vr=f|Cg|AO_< zwP+Y@Ct$Az4Ex6C_eQO2ZO9TD-txTdqK<^nPSLjw(+QKu9I-%5)E?GY3(ki!y?7D> zpQ<>LSyr5iWPh;uOtHXEhu5FRG`p>`b(2RGZ9y9RNlKlA#u66tF9xlKh|OsgU6-=; z=mmiT$&O#1Ca}o5OTTN_aVz`UP|lo5;+jDw2(^Us{3iKBZBq%Pe=vFwcbnd#X22^| z?|;M5gmk;#O)`C_usKs{so(FkQupM*fV4j+S*mU22*w1&rY3&Vy+1Y}37o~z8 zS!zdviH(f{aQcmjDA>_%mY7ZAo9U*X2{hG&{|hX^=cLRYAU|PVQ6K~H92UK#tMMB; zfb-O>zZS|gE*WRA&1M|P=a@^8lO5LVIuG8$W8frfgyEz9)HD$TgiL;NH64N8d`Wsj~K z-d`9?nznY|qH`UocQ~sVLd4-f67V}6fx%MPiWwM{G^dlHsP#D_opbkze^&V;2*0g& z7)2vV79G39@HO$w2f;wZWTczxpF}scYRlPbJayq4pc=sMUUe3LfOS2eKgPAtvEKaf z|Awg)cONf&`vT=faWK3E8y@xEPvDd59)jDbEATrFgVXETQsO5E=9RYPP2?mchMaC$8jWw!oAR!A@$-MX%V z^u+e_MnqF@aa9CqgnYRY)J$yrPisF>qeJ0@*u!5fl4I$AccyyF*M-rmo`PSilt~RZhi(2C2pY=KuwyT6g zU1+R_JTeQ&i>J-w!9@BH_a33&~Xo{c=dZN4B# z2=6yuOuRuWxA4sd#$)S6WnqGlO!-yBdy{XhPWaoSgtEfVS{2y3l;mgqEzPv#KB~eU zR7D;m!J%cT!L@SzA;S|qXiJ1H-y;$v0-$qqyp-+Xf|f`)2&Oj3QA<-{j;eV5J)-Vu zF{9?VQEuwyTxNVemR$j+3icv{Fx0Oc-Z+eBc|zX&xw_ zJl%)fs|GHnp+rwUpQXQI?ffd_ASvJMQHi5iF76YvCDgVNc)Mzl+!+R7RZ%#~yfn;< zDS=#@l;A&WT92mC+$6;*sU$>o^hTdZ-C1m3fU@-$&)d5`;Nf=sBMM0Hy*gO9+D+*K z1osGEQHnh1T92mLk+eNq+?Ppj2>FPQDEIaEh{4hGCWIelwNh_~Wf0ThtT8637tz?6 zwSe{l`Q4;at#1W(TNa3aTUYS)XWd$?#$3IAt=)_kaf!VFQwnr0-g7Ti#RS#>47NeC zvb&hT^o+RQPqZU7Kayw*w(GVKHz9zU8}9>o{Z3gRy(8+NF2kP`DwhL!JzkDaT_Frs zrjm!3`;omB_}lQQEK?X}V``uwk`{rem8ucz1n3y$p7J%M=tgUlSZKPfDc&*j84fPgz}gXw%1Iu?cg)#Ejm z(l;TikW{^~#;37=8VsC@KqP+K54X-GP|iplotWSqlq%uuhf z|7g-n%V*H#nv7AO7eW#zWG&4bIp~0rl}x%61jzGVI`WloEKuEK27Pq*FFSjSihI&r zmFjUi$#jlC?R|9zsZ2n8b{|BFN>{8AJ0G&*`!_Q#nmDwq4&F(|hE#P>Vb6`EG8Eyw z+#QD3Wu)=%3;cD)S}^5SlZk@iSbx@++jz-%9XXjDemfKUZ>fJ_ocMG(o{75S8R6Aj zrxs-A>l##R{Xz?H_|CboO9&-+)VA?KrW&=I5+MvN;9ecxf|%~KK!=e)WCX?U(`P#v zNJ~*WoYvquoQIVeB(X@qT7Y6gZ2Fm`!=VbX!B|f1NO>TYndO_`7~LwFZyypp_g>?O zJS`*HP9zRL8Rss~!uEY@*E2etW1vTNNKkcB|fT_bl9Mr1TggsmwO$<0DE-i0D5= z+}W5c{P-8GyD!Ga!2uBGM`rSCwr$GZd`Up6O`1ep_U1#t5cY-JwIhcH$A!^rbiVD% z6D=h)zr?oN&Dw^4nUzeG+YIm|2^sSORgf$n(4+Hc<*IDi)z9wE5w5&NT8)H;H(T{W zC7~W;)J}QfijPWwPcJjII=U_~yKU=t7vrU*{oz*8`m@MV%KAUSr z-qylZh>PnyO9xyhV)I2B%HxaE<%EtNMW4Q3pGUKRFE*pS#N3XyhU+HgLu;b)5QhEK zfKe7uf7?NFdRdCthuZ4vpA(pi7nyMnSZ%eG^nHy4E1Xwa8ic-wi=_uoMh~&{5na8q!EnpL zQ+z)6Bb>WDa(*-T`QvupK6uN`y^jQ8SpkKU)!#&LLn*id?-l-s29f#&vkHNCi&BuI z6;;1P2iu``nIu_aGPQ+OL3q_X@|+_0kthxHN0Ab>hK>zL*rQ$79B9&0vU!U6V?w1Bn~uz&5s zQM|(|7uFaZ5$^Pw-1Mu}HPQmoD#~XsME^lZ4q+S# z#zyb^q*0}&l(>7Sg|Oi<=z#xC{C3n_x`yGFz&%7qE-Q;YRc_c^v#^nuy-->k&7^x4 zhU^x~fe&}8B(pwfe;FZWM#Hj#Jl-ivDW@}oYO+Deqrc^*rKO?=jPevyqPRQaw3%ai zN1;bGHf!%w&n~QcYbBL4dF@BiWT2IqlWu!mEL%a=aiH^btKCW=;Gv3j?7qZznNE=p zi51y&tN6^}oEX{_{FM4(I#A(Ye?63%iJ32O(c7GRZOLzENx}JV1pkUIA{--~W70WE zF8h_CxW-xHWX>AFzN;!NaH+YJY0YJd5@X3?q`|qcW!&T=y-?XkiiTy^FO8am3BnRB zIy-*V)iMQ}MCS`@gCF{tDjUJ*Ej`${ALL+Q$gQYWRIZq=$yCee-Jo;x*+5SWV$*%A z;Nh5jIO59BQHG*A8rQgYdONrC%hK34Qkl_o0&{DsE>&b1f%6>U|41~Jj-Z>B#?fyV zHU;Ar*wEo8)Rew!fK|3$a5kXG?yJP^6t#OQ;o+~T4ySPVVDs?Ergq{{e^}lIntCJf zz0ak{)u`;Tk9whpGvK~{4)`4j9gvtP4*A3Pa9Uw0F^#EW(%!cWbX_@ducJYe3$?o` zPN76JshFomL077JE#76W9qKAH&vly4o!b&05~xf#cybhq91ir|asG;;sW1KHm_jgEFgk;VJx#d4*4R%6!82woQTyjR7_OAt6bJ{Oz=!<3VC%ZKoeDDqNJS z<#*>8D31kZl36<+r_kS!*5_g9DFr?4d<>@})BUn86+fozX4ca%9g&*Nt*dxa613Jj ziP2GI&;i|fOMX7)|M;+f){*P~zAYokK!dqTA(lY_-vwkpn>UiSA`qid!UCK77q^8^ zdn5nYt}G2SvoG^k`_aM5h7Oa9tGk2QJ|Jah+2s=6W+n!^@`5?bsDL^l*Rvx&}V3uiQzvV=3f;73jH)*pmAdm+ydEgn3kYKe% zQH%9|0^b6;M|A|%*gy2|l}|x*o7Q=n2ak?34WZET1#Ti;^v)``^BsjN40z5`Z?z0=KjpdRWH!lCr!p0Mpm)!`pwlgKP)4z+Z$&9 z*_oLSCz%3(U1UgIz{Xze1WdGKHFIVrX7pA9S;IM`rdf#`Muw+}bJ^ zb@Qg;LLKnuOcjrkuZIES)!`lyG+Jq@*&RQ#-8<;}k+~Xib0MYTO$aqdv&52l{RJ-( zGY)~f-~!fcz5n!N{R|qSzi)Uz)<~?S48jb$w?bqu`u&Opjvbgt%edAaY*LZFV$Lh= z^v830mrPi0RLjs>5g`~#U-$1K0DW3qeJ5gF$3PTelvDb7 zBxaxw7Fa^prD_6=BAg>lG^G76GMV&hbhTMb{^UhCP$_wJTMWG;$U^eS-FUc+D&NGz z8O#6KJ#so58l@O#Kn$wQ^UU=PotxV`4Ra}*F83ZULRiDGS)`=`ohR%f2c38Vm?1_$ zy@^Dbs`zs{93hFjZPLL0Y7`j~*Kp@+Ej^hRauDIP%t0L-UA_F@n>$OW3SzZ2$#P(9 zY6zlK<7eZ_Wv@^SGLXS?NX!=k4dyC@dbGqsCuOv!Jxb+cTDG@4U6{drzqw_xm2P&k zi}t{$r&f1U+d~7hHQIh0kOQpsXqf8`NuFI~ZXOvj5c_qGQ&~Fh+4!X8)(JMa6rU-W{pLb8M2yEvW4G3OA5CzsvJ> zl5yRQMjgtVEHg<^k>@>uNS+ccN7nxPsea0}{LBWo<{4Jgb^$>1S-$I%rO+$uEwbiv zx%c#=-)&#TUI3>(+_m;Lidpv7AHq?#yq7Qw1=ewdo$l24wK{^upYFZuv1+iV?B<(l zi6y-xElFny9e8##q?HM~@QhN@0dnK#7;MK<#;61YHMW!DnXBoY6#vrNT!Px<=5|Oy zl%uwRbq*#5b8knm5wF>B-{HpFPSMd}{i;J|+1+k3Z}$J34-{{7==uVy=w5Dkj<5KA zBI}$%G^E&N)PBHyJxf1%*_hJBu&EBWzriMTHwt#>-iL9%eW$FalT2t@HzVzdK?{^;9`30FZsOf zF@cjAi{i1_s|JUB5cgwcTWN#g_t@BmMq_%TR!zthM*E>hCiqtYxJoNuTu&?h()^%U zP~ZcsR85+r@njOz)Tb^lB7X=Q1FH!8N(N?~)c`&G>5~|hLGQOZlF2>zoso_Zd;~C# z!LRV^AgrE;oypPoZ2o`>5wQt}R>0vk{P#wHad;0Y6_4Jnv2lBJ$nGkZBgHsh3~Ei{IsCYuc z)MnD^R>D@nB4X1&V9>&@WoG1ethUx*XEXt=#o=Z|Lwt!FR*FQL0btZS;^Xt^znN-& zkVm#}4y0zep0duD(L`MGkXQ9nSfOpzaWb?VD2Hg7H8%YJrNx81tztOWy|~9cXPA(P ziqsHh%tJJ(p3G+ut&?INHHWiVU>FLSo)o?)KBCpSkE2A3KZrDjeNuc*##|&ttXuQN z76V;KPaX5u)hmD$sH^@$^Z}P zsaF0c-^az!PG(|{@1<>sfdhPxkfNkWwyCH7OD^kqg3F?qV-F8XQ0SnTh*hbQ#YN(YSgG+P!6srrW`MjABrxqL);eC=6esi zEk1b0obda_B7>(*F)h(DG=_!$__&LlSN3jeeXp$f4(oPb%cic9#7y>s1P)l!}5eq5`G zRr#G~8}=(5VN~3-`bfon-kXs^x3N?uaGX`Xe?k_Io>(Kyq(+LTAdShv10@Yv7bEYd zz&iU#M63CQellcJ@npSu|1Hp`B6ED?m2GWo66si-g#6Lt7zT|0SeN0!l^iMz8ZLv1?kwgApi-+(nnBw;D9yGF*DQxQ2f*^e7*LgJu#hrF&ML(r?-1Vq?x1r0W^tmVDd&K}){QUY z?+n<|K*{wKACC!$%@o4129wBy7+alJ;QnU6WN2X}266(kFf^wc$q9=Zd40oP2kC10 zJt&mFT}RV7lg3EBLe7KEH~R5?g<$l^KaMH9o%ezfWRLxxc0C>gp(Ln%cOpme*pL$z zu$Y>V|hUCx(+%DkX)cC8--rQiFOQt7-y0QAu}RVul<2*khbsEPXD zGHB1(wMxNLYBI8(59{7&eN1j-rvxq3{B|pYF>ia}8!;3N)g>CZyomM3m{gG$ArkaE zqq&u97y?dwYC6QQrz$n&V@3Wnu&h>5X9{S6`eW2RX6z~iFVDtnTotH=BKF9dk+Ei6 z`*n4)-d?<7de^@en#@ExIwD{a6i>_39mI1G?K|QsJoxQ865rx?aFPuFRHXPYganOs zuz5OV)9UsZZ-jW|ywr^gKeyH~Ji3*FD|*yApd1WwTFsEjw^;3Ql@tSH8VW6>##%Bz z%LaRX>?0;bX!m>%F;)U!Q16C62Z3mm(*DibuISuSeOvoG@<+2ceeVsw_Q5Qx;lNOh z6>*Tw)3~hH2f80$0^g$0q?%B$WD_k ze-QR{-cRtnXD-G&qt4(V)Ndc;xez2Ha#0#{99m`B7TE3nk~FQw+hQdKyE_AETyKH^ zy{}i}WB22^`1>lv@HDQ?NzKZJ@fu^&-J|_!#3SKS;|yGG#GE63gQ_VmumwYcOxZm7ui2s5;}D#My-(g&n-2SQ6cvy>alxz${j@4e>c}N;FN!iVG)fYGkdsRa6~uXsGTtM(Gu(ry>Ljady%f$9db)r&SrE&u z=*#iOL zKMxF+=P&R#>X6F5yfEk6n3oCT0wHgQz9bp6^MOZRk+mD|Cf6XI#(+r&pbhNU)n*o-zJ@-{D;DnSKIQ3&LjN# zh0{;lefT@jQJx@Lk(i}?2eB6>{EVOwBDX8^1FdJLEVLccE!M|E=Um)(`)`+9s2z-? z#K3)1)#7Oe(~W5Jy$?!}bBQEn;4ZOHwYTiWfSVCn)(B@WJc&0$np#tBvs}{>^eG+OeYTZ8e zA1cD)0fM9q)SegZwnT^3J4%aIGxIj#qaDnqa(W{J2@M2EUbQL&f)!}T9=&rl4948wP}0$vxt>HG%cE#K0#Xn}eap|!w;$vi+@>V$W|@#f4keWC zLq;rf0ApFnTgq*+s{tYsD#N$RS>A4?BdN7oXm0f}xK|yY{X$DBNCh+}IhO|8y}Td> zl`gEbWK~6RM|!O8R~WaPnK)|AZl9fs91MKOd4YekQwlYf?TYZ%7x!ER{KlARu{}rD ze%4t|agDN$$hXOFfd4Qogs^75ammD!@rXS9Ryi7dt-*=lhiN$=VN+^#&W+aKpba=G+1x{(|?Y#M%`yI zZqprq2vCl|dXk*8w}x&9Ia3eZRM01t9sb%kUwr5z@oC5%p5{eYtC|GbKO(p5`-H%B zk9KRjr<4Pt@o^Dw6mI{X?}~i4taB#6XDCXW!))niU%qPvi^}tN!hgK2M}|7t=h*5? zqwlR7l!ey51$8>)w4E&%=X^v_<}2^&Dc9`~lVM7zC3c4^@qHT2d;J075PsG2`~%>= z=CeyGx7MZ}Sd>1T!Tq;jX3{88A!#ZZ^P%#^`jR_tCaRP?{PS=l^r-nTjDGLRbF%B! zHO~XjHN@ONuSj?<-hQ_1t-qp@k`P?lvqCvdIcG8@A)x>lAO7 zKa*{z;`}y~TRtlyWSd-J0sGpk3sq)5SBjYManiPVFM$$4m5wz51L$qBg4`0 zw!Rp+d;hBp!SHcuh&{0JgzqY-VqmWsHz^P`I3$mv*oKh9GwEy|sg<~+{r7P}G6=f^ zNg_P$Sbzs)dy${=s|b(BkF4|MQm2M!gVk`U?UV(Lf?s3bW~>ZS1-xvRfXU7~TqXyz zrC+r?Bq1)8ce^Ht@B4T`POqzUlPZaQDk0hlw1HV~nl^KK{VnTcOJ?C+*O-!8Roo|e#i&eZ_F%;$$c)>y^;P1O{W zp*SDo=gR0bX;(OzZ1-M|I`0$Tgzq4dKB4p?n|+gy>mYs!mwO9TjdhzXGR6`jsZ2=c z*Eomjmh{?{n#`9_l)%UeKo{M^S4(>cd@QT$ZZ}-4E6cTsnFVVrCDww)!7XwEQJ%e} zRL%Q4*NeTS2GuSr%$<8J5jVmF6~qS2TGX0KUOK|GY_Z}}a?k>^H*tI0zSj#yb&Vn# za|&={`jmsqJ=|R==3ED)o#rO(Fm77uLn)pvS?rAMwDl>&`6GH?iz!e~`c5g6Lk(NX zgu&ffDUoJ05gf;rud!6BN30_C0Gjmnypk;zF_{!+QFdWSUF6m8V33IL7nGWFX4PzF zDzIs|o^$uPS@nV5O?I+orMBrwfa>`!b}EMMq8g#We_s3_cydU zjm}2Lmvk&!bH||GvYl9orZ&L6Ok;W1s!yDqrl-cS&BxHN)=(T#R%GE!S!p_;zB;+p z*mC9OA*^ab2(96u_|#fRzC5jRW#s09z%nDN5+K*l68~Y$AwBPFRI*B<4@OAgXxi`+ zuFjB=(+3t4vR2%C1d0zMMMW<8#EgzmnM3J zCEaq{Tc3Wx8=G%3NG>={HXcpKFYVS>ZMhS2S*$B5pB2uPS`>Cj87NjOM}2#TrKcy3 znmNTMgY&5ZttX1ISCJT5)e(X5U|y`4F078_V5CUaRQ20gH0hfFz-xFBbE&|srgCG3 zS);^c=I=UyRFmsTdH=2%n7{9Q&8(JGT zHfX8GD*7g^ffRtozQdl=bTUz2Bv?T_Xp|?7C*VQbQ>`~;QOi-FFis{$HvgWWg+wok za!rzNs$YWGjstIy{GrP{Fg=+l4&6=;FI8$n=vVFi!_jq74IHU zZib{vd~4){hwXJyRVSL8Hr$MhE=g7^WG3F5c@QvviP&25?B43_?9Z=3Cj6C!A3R#x z(_OrmNKriJ{_c7l8`*c9A5rWcntQlqRW#?Sehls|R{dXuwpP39@eotJ&`?3>onMJ! zT9Y(~)_iJ0`pqs(-o{M(5iDO&;B$dzr`njyWY{M4{P3)pDcvf`58T6ZY!5^pLh@F~ z`C3~o{D{P;GtkHk%p`JC)OiW2Q;Hk@R}uvI>_85BAmc^eSPSFAgdAE$lz2`SECRK3 zEUKa;R6&zc_=A`{D-1Ddms7?A^@y!Kr+kAR~lFVp}tP{(hueAgOIhlJe75}P3z ziJc)Glkw!mmRG%=zVGg}bd2bc)e(YlY1iJPzrKSq$;NOn9PiHTvh6#VWDZu{!>;8B z?5~@!70^ZvVpHSy*!#$-+>5EfC=nZdJ&EmsE?lC|-0bd~EEW&VRh;moON1Sx^H-~$ zcxU{G8aXso_|P%&u$F73AVjox_A2SK4)!DwXKDWV3RCjac1P?#9Z{2Htj4ktPW;ZCs`}ivjKYr)`}k%IhEP+G2yt z45_A!gw2XHE3|mn450%Enz-Bvhl%i0SYFYx5C8ef+l%$ob#yt!mOwn z3ne5SaTL9sRuLIQ;&>E6$Q>7=JF-d+z;&O-S@O+dpgevzJ3Ut;r3f0dW|WgD&^H^q zLRkj0gkGi+OUCtK($G848G+~Qh$9IdJs_2G!||!L$jQWMkzTVsBVUstk22k=AZExN z*&+z8{6{GBcEb!I`jaV5aB9aw+cE)*x7Aqd%+3(PV^6l!nXbUHJ>{i8{v(k7o#fbMLR&oxJpRpW%&6(2ZYi*X&P#k0U z&aSfExfPf^;VwH6IV}sla^k_9RBIM~W!nF989n3Yh^$v+*7)@(PaeG4h72NooclYNStlzVU*t_wj#ruTyp>ru_W{0ugz-)Au<#AAny@ zt0d0MNQ+s?N#40DproD2kS*vqH@kj!R&!gn0EfD}GHU%gQ7zjr8Jhr5@w}C$exzW{ z_`&ySO+lo__Dd?SEAr^J+pU^)hztDXdC~AWaA0Gq*#MJXOSw-V1^Da`f)39E+S_NG zb{J64t*}Wy1CQy1(rn8rD>`(^f;Q!v;Ztj9lndpZ9*`gZk*_2(FG0~xl}7LrbOp4Z zAI2n|ChCVl4P12Y%ge(e#VB;T5C7%C<9HEtgw6Jl=-=jyk3D!d2iacoNok(1C5FKK zcj^(6*KGKOiVyCF-`SrDjGt8a^~eJE7#IFhxVDB>=+)|F`TB>)dSJY>d=+4M7lLgi zTSuuxWRg^-nUr2g)giIHvCtZ-ha0^q_Iuy!1FjPG{vQ=$SOs%7>$sowYf8uRs^6ES zW$-xm!z1Z(x_eKcnc=z`63(?eixi1;dD-FrlG&s&F8Jhq24BBz`yP6JAelJ(S9mMU z;-34Mfg)~vG@WO|P1618k27~KR#~LoE;Ya-9)J+9tf*Gx*Dkc38ezubP$1H#eXm0B zy{4^E(q_>~nMBAmXcSLq)~qWh7zB%dT6dWFHv3dA3uE`2f|M9ll_xHXLMNOsnInY!;X)zQW zH%Eb;AI2n3xt}=KjBmmKQJUCqLdNI!Ny29CYmYhBX8`#~{BXeuuMABZ&8mCgpvUe1 z*m}#bD5I`zSU~}4q&r4Ry1PVXXap4LPU!)pLAtwJ973f#C8fK&2k9ERzsu)+emviO z`)iIlj=8Scx%Ro%Uh6!EYj&_Lg-@F?%zEb$?Kf+h6Lpv7dy1LtowWl$EOLnVcWti& z9bY{*TlSq|7(F@W4%2eGJ3)pY)p=38XJvWYyym(xJjKH^v+W$JKR3uIaq3Pep6oSO zzX7E2CIXU08u}hPQcFtiX_@@rgR94xOZTSp_ChD+4`_8*np~Mr*II8BU&qmQl@1IR z-<;23XI++t=vGmR)VN+Bkf}5_E65byR=nPze*6wr-TUfDrl5u4fvVfq8<-MX zHORlm^rt6)k?_nTH`_#z+H>%Qr%V_`ol~53qbDW>U?@@=acWZ1;*?RQilx&N*};1! ztmk=GKgIoN@{`;Q9?r%Gh77_sh#LfrdM9hLl~N?Mkh_q)G_PdrV7^tCRlTq*X&G{! zz*1WOrqT6?X#Z=vtk&crjx6ysR__YHBF0$ucni_ulz5~h##pG}K1*Ppuc#DCrHZx* zIbBL=)5?0f$cS zsiaaTJb<$pzddXR4v;z~^Pq|mM}Fy20CjhBLBxtH=cEM+$E~tj`MZ0G^);pay_i%) zhsQxUS1R**jhBg+L}vYq&-kYWtfVhncGAX+Z9`~qI`P~)gRH{;UNY)Skw5%aA4x2l zdT01bDDH6Dsxh)qy2mhBQ*}3XWz=b1vf;Eg=KPiaohyu`SQ&_u;i**xXNSyxSEbol z^D^lgax>&s4CQ*d!o`F=v6EY;GDmyF8gA@bj)Z)_L6=Ji>ql>oa8%|n+o6?49*=hPRLpLcM>$hTN}p2b z<53a&0qRsiDWv$VAYFabDdF%II}Xn@-oE&nT`VTPVo&C^Z!F%#wnyouC!JleJ2_EdQ2QkHFGhQ&m{nb>BI?LZu; zFQuFiNToG7c>cyKHQ5b5w#}s*>p6hz@!f)0*F&ou2WH8EDneZhu`nKIHycD`_1M<^$Q7o zg8-3oFR{0tEv2@ad0TtvcAb#1lSR1D9a~t03?r4Zi~Lp z>Z913j}=G|TJQtdJY5h6-{qix=JZ}7;aThgq~xd2$+(^!-ePkhaZx69f*;KLn&)qd zpXV-XLZ08;wX&B#8w51a zGZ?X223(xs#TY)sHV>BPUGsz*_LdY~-=q<+knVks)llFyj;*FTGp zg*!aH+#pd-67$&6g==l+93w4_h$FL38Sfyehh^|tlU4JP^T|)$e;|9d5^>N!U}S)q zY(B)&cmGJjQRIUEP24*=*>+*T%F{Ld^SXUJe~CzM^dNzlg==zN$;jx~ysh^1D?M{J zMF)yM#SPXjJ(f#D-d5>=Fbex91pXpRW(O%vU5J-5RB@h5y_ZQCFI%HTUz%jj$1Qz$ zRMo~~+lM4GIW7kVI>@Q2u96Xf2_$`|DVcZ0jO2#LZN|HCGQ6c4+cVOOGnjua7J$7= zM!P((7T7DH?rnd4W-(S`1TU8lBDd-i6gN+_Bbhms6?<7_|ITha@6PQkd&dDDXRprc zfkWnwBiwP0=|iHVtM04y>0&Byhg%;ol^QY#Js5d6Hq|dd`;<%x^EwL8^(R8j)-#fS zzM{_Yh&?s?@A$L+-}E6dY!_`0!)3A%ai@_xqHNR zEe8<3;FSJ&vyA*-cAqCJWj^Q;|7_}s&Y(C$@C=EJv9szPGZg>EGsk|~x>@iM83uzU zp>tXI_Si}TVCmuOo_HzxYadmX6MuAhkUgLz)1FQ+tx}F`dU^QES^EPK^+NT*k4q1Q zB@ec<*t4BaOnj4JTlDZ~k)DBD>fxj=j5_(APV0WnB@+12Wn|o(6Tkeu$EPcXB%m~z zoS39{!lHC)N4Rf1--&X`$RQ2!x%fR(S^vAF+uTpv!J9*8Z|Y6*KOYD;!mjFs)0@?S3jvS7P35-dG30_2vVL>UqJ zzBOTCBsQZBYg2 zwepja1aXWB=Sh0(PNxv(_!`8=E9@yo23JTX-(!P1YKrth5bKSa+p>(lSK(A-#oOE) z7?nYpjyNsH zk;ut*wd}Iyy+BsheEknEvCoydmlNQIQnSAABOOELq56x0?kS=zU-NX&eP^Ccyy^64 z6K*PgI$ZZ!BY-Z`qIzb#4Jf!D07!pf6LB*Gd3}4Pt9{6u1~~T$Db?rkq&!4F_2)o1 zqE}xs0okfb6ANb2`+HHXU*^)11HKRUKiNA4^|2PU=cmoe6=F)p>3#4S6wy#3N!t8E zWDZs{cAQE(sq_)7S~0=>lS)-B`psHYsV&mZqSC3FnZsYc`@F2(eiqo^aW|}D)wDTG z+O%0&A!2%V^DvHxQ8nB$j4ot5V3#n6>b>w`~T4jh2%BHQTSj=+myho58xHgFP zsu0|EZ_-QK^YP+YL6D&ozD8*asJu{`xNUAUkuuh6oD9`hyE5k=1Bd--j(ZAw$p_6Hn)F}># z10n>0eA`(PQl$)mGlP=s4-=jq>nVEI=NEfU9PBA45Khr7W^ zEtsnB9rtcF6lPncta>O$r(Up~`0FCX;y{#6StPb|?&3)4E6k;5h3$X0TiuwBg zEQj^T-jL60ka2zr@hSt$WK9;dS1X>S4av8EZOf4I6PKQjtbO*I z(vYbnBk}62|eSvfk296&p2)_UN`hy~wyrueG74f2|S!(YgO>h0!kQXofH)tnnIt z7S&`LpUpAUctK1|lpql~l!!Bvn3QhY)L+4vnP*$>F}FgW0GUVEDEk_4Rx*T(bJRv{ z;Z;7mO$(>GvmWLA;Cg4rzQUoBLR?^%7aMM$)AL22jFso7aE+d!sn6fnywbGBIDMG?)Y z#!A(5jc?_TpQoCj8#UCust`(8hSJXV5AJpnfB!bvHm8hNac119+V~b?~+UB8OmC@^wMiA={Z`C<6nQt7DnRKWx3S2?m@=s)w0n0s&1Il zM{e&^)^A#+Tx0jsqPFD+$=NusGu~rlz8{@ttyj=r&ex|(`*fr%SWj15A3#7JGL^X2 zb1tATH+%pUh$gPY{zyoXcNM!~mV>ZjK<$5Vft4w*BKgN7_6oEtP_4dbr9ya#E`nY{ zVwJcyyC9~F>QVW$)V@puAMN8m8f4JX%hM02tBA5$WO+v`n@tZ9cMr$ym9|PfcucLF zDb`aWjvl=VsRO27yAr|ys>^A--|FX}k;%KHPt_%(RKvWaVjj+4c%V23A>Vj}K)*98 z6kuo#HicLr?bK0|lvKWP`VhV;uo)#tY5HIgp^Ih( z{WozL^;!pt-K;4g&T?T3&Et%%Gfw*ns^c;u+vJ{elcIYTHGl%-)ec%{lz;h27xdtl zm-QHXgVj8HgXgD$V&*v3As1q&XpQ!^(7Pv2BLrfa`z6=~oG7=ae6tyLyzZOX?%89s z>J@Loy^bF&-S^jwxQ|k%=5d)y94};rumC~fd{5Tmq&^9bF|4AN#G;40T6SqI5aeth zMros0MgDT_NG|Uwp}jQHT)|!9Or&$94txN-P+yuS5@om4EJu}AsWrXBv=BFS=9vN+ zX+0S#*vA?_k0TFHTG>*+_=-%~;hLJJg5#CzDbF;rP0?>n&}sQ1WM#YC-Wzfl|5-04 zw&lJu`UUGJN@H$tsDRiLu}KwSaE|*fGM|Qvuiv4KBt#0J#J zi*SY(nO90#XyvB%WbY@tr7R?xIgyD2IwH{zPnQa?$22;7_7qWZ$-Is99b4`0QPf)0 zKDhhdd~!Fw;n(`vLzd6v`TA!a{gdJ)y>ENkk0s!Vb9I^Q)Q`HmuEK9_vfR{0`M>Qm zm{iMIs%v|`eCR1cHSY4Ge|xmu{T@gTz~uk&X@TE*n%*M_N|zGs%K1?FIvT$(z3&t{&W?{}Khg6+by1g^#(Bm=g3zu&``K=XJ)@ zV+Yf2+D!k9nx|niE5RNeoYRwIuqmS1s4eXEhn}11Y@&?chl29eXAY&d;`(TugUZ(U zngglaWah)Z_E=Ux@yV3pW`1|Ibj;e06=Y3*ihb?*dMX}k)4G2b%9n3?(5+S_S7dlP zoO~lXE2XpZb$=8|C5pifiOt-1^uaHCv0qvUI#v6&`;_j*6U-OsFDv!M1Iv6L6V5XS z-4VwDE4yHfobC5r)6m@=N+1E&G>cnXtJY?CRR2_oTPG^Wb%nI7=Ztl_=4XlX&za{^ zBelue@&5_l^#fFIKoMjyK5IBHIJ+Q#k*&k{!@KS~K0{7o z*RE2m5#}Q++cpgj!bFK3Pz*y+Dw(gwGTkTCOFrGt#N7E%m6vJIn#xD-vF&SU%B8^QafwuT-oRX)aGDMLNIHPPQh)2y$HeTdyie_QxtetSZIpIwT-ss^ zP`PE5mUwut?|!1uu(zDB`Cbn{d&Y;fcWVScr8le2mlfZ+Rx^fbK6dITZtP8}rAI7} zDY2IS)^WCQK&0cy87Up+(r*aL*fk)gJA+q=Nh!5}#|!=8X;#DYE0-Oa`#QIIlGr)b zrR7w-vYY4hq(AioSHGple9MPku34#+ou_1*e^YlRCnvvBiWonLx?6X6X_X1ns9(Qp zdGEO7t|`Jgken<$|4=kh8kSk6?$RF8h^FLrYwPu5?`gkLrST@qZtUIH8kL-7=$u)~ z^9)>>1jbF2m_-UJ<7e5<<8shN$DTg_=NOePR(2=&jw67hwMvRGp(LEd_r#8|O37u6 z)7s#2bWEg7qo;CQ#TNyM!Eqapb)9gQU-s-iH);mz8X%}@*p_GmbOH%ah+B*d1?$wqKln@nR}WBJ>EwFF?(bY8X8ojRs+f@@|Zg z|MtTfHE|g{d&b+T01FXCK)b4OwU+2|NftZQpOD$0VNq*LOVh~$72#-ifTV{Da!ypc zcWMQ6vtROfCXWp#aV42;N3HR_93c^UW**qqTkeBdN62KVU%d49!Hmi;vS@yWPx@m* z-;HfFzFjR)Orz_e2;h3JJVN8C8+G*{B_vBC+UCct?cNrQ+`tu>iavBKjQyU6yY~4W zu_n$Sikyv?%h?zNwdwrLM^Wqt8rj4bxw*Q>PpC+J(bjN8O*Ys{i9g{iR+>fd+D)S{ zkdlhz8LMf?eAmuTCi9N+2jg^@$}hS}qqWw@2t3f(plv_EXCuFsDSyZiaV=f+ee^~& zt=zN6iu9x9v?G&NqsL?hP{L{`G9))dY8vuR!0d|VxpE$(v-__u%Q<6!(>VfP8v87;eVRPfdKD0T{R4Ke0&_KvEdI@ z{TDnJ{K2+pj{!cbcOma4M*{>lv435hUwT3S|)}pf(9LUGeAspwr-${uRoETIog;5y@sXcbi zbkJv*DLBmy5$rXppxkNHi&dVQo_ppMBfp2~FV+~~Wgb>J2O{dt81uKGwb&;ba>Z=e zpc*N0#49T%1cw|ieWMjS={YiMDCOAKtVyg^ny#reb49J9C9=cUYA&hwsu5KmIW49c zns>nzW?k!99;Xe;MNNaTaXPMR>hV{-Pp|vVMkzcsGmK9*PxxM?6w7)2V`l-XOOsqe zvY(^zieAUJ$*_WnSb=_dProB3?qE?oI*V;Sc%-nIFxD^g^WesSeRbDjKc%H|1W291 z8fa-qB;LxdvgmoRE681%yzlHuwblZfPym(a*}IuyJsDZbT(&1-aaaxseDEj_e+c9_ zddGZuiTD8IhD2>;Pu)8VkH^)Q!WSNHDeGmrTw}Zo8oBV^q|EFi@Sq@-o?a!rlO~Fj za^NNn3Snrvu%qM5TZ-20AO|d=t!qEFZEtRnXq7$w$#w0TVnHm|Eq@w&;r=K})TBt! zM;ZE^387>z#7xl*$bwL-7B!cy{3uj~6o$tQl0VaWKvSRp!N25aju@GB=8PO;ETOvqdVz z5GRH;o7u8oMeF{OJ{?vIxO;>J|RY^4fS{c&R3mbu zl{gyP#?xZ+qPGYmr@GRn_;>9O14v#E8Bev0PhvE6jpn6|XWISGYsSVxsR7>E_mN z?O0MOg)gq#8cMEo-(MH>_Nm;nF3Ik|f>$)ZLjyR`gX(ZNH->fS!sY1~* zxPrWJOF5s0naiiXShGgt5@%HP%%-Ayd~-<@;Xn-r8{~0i&WMaH7B+W-HvCRd_Z{K3 z{?eqUFFDuaP#hJ@nW~tPHa=?hx}mC&UI>H$l^p;@55s$=8%r}L3Gp|DeH`d`^Dre> z-%iG4gk+JsC8~*fv(eHMWw-2%L0Sdg<4_Ci2HrW$0Se)Evbo-@9ij-8<=c5F>PjEZ%nR zQk>yRlm)A5PTNdB0hdM*kwr0V1)c~`PzMIv@(Mp|fN%sxG55boJ07(;$D}%7qkM@Y zHhFpS;q0P@?HqA?i(l8Juz+aWLdFOKi#F@Z~1~M8BsU5ZM<;# z?RmESE6~?k46)|Gt?mHM$4HAj!eo^*G=!xGeH?;>K>IOh^JSWV1wuAEv2z|{)Gu3- z;b6hkO7j=jJt`4H$ZD7OI!#30fald);Va1ox(f6e@_tG8uU+$!1((biX5S)@(d^Aa zh+t8;BTnn_KD^cN>#52VrC6p=W-gXQF_1cXfP>@{^bzrnJp5?LTA1}4^KgGpFs>nC zFj{{C8bs>{6QCP>kJb4yPTVRmNO+kSOQ*b};pHRsxA!PlX#5NVkDg?YALy=0e623@ z(G6b|Fjxm}KfhIybF&L46W8o~DihU{iys;Vk|LVA|eIha?7{KD7WdT?wi!z-EOAMPwkt% zBUa^7IWDq5Sf+`WCqyfzQ$CSU^BbM1$RQh)i}(7$Q3U ztr@_3GNrLKz097AbsmsqBhk|i(fM1{3~Zs)Z7FbUTDdq6DNj~66FW3Cb8hl2e9Sq0 zirf@aMR&ql7dEm_mD^`eXEta$=GJN7Z$Esva2Wzkrz*d&u6OEf`4H#vMc6o-*9}`t zCKt7MKg8Iw@@coAGm+paISx*&jIbJ7oD2`TRYsfr85M2i5FafxtZwCa#Ld#3fy}=D zi!|5)9#}_~b4kGH>6Wxyn6z)+AQ?YZcl49`belMI z`S2T?=$nASv_;jPIFUqhdA7P>TR86_|*FwY?mzoh+DYVJf{kFms4V)I$@30R+5~%%&V*Sw~qb_KoJ=CmcG<(~h zoQbjLc(Q=2YGnfW;Jnwi>y>N<4#jcrE2~sHi1=7g11TZgPo=ub!=}07EXtt3lNq*S zZv92)c4|GH9c{+9)LlRghk#H_<3XB>_xg!F93S=Yc>0VAibic^5oi|g8fI|^j&5M! z72+dhWS^1Q>3~|zz~x9!5?=^^CDi^pV_A@$=B1?-HgX&22Acwsb_NQ+Z+~N*B<(l4 zj+1CDu^E-T_h%5TUJX;x1whA-z&}w$FfnZp#OAB8cTZm8h#V&1T|P|=k1nTqKds4$ z3N8LTiQ47YhFr!s`gX6JUbda1ZP^E*QFU6rQj|}^hzR6F|ifq_^C~| zxNv-oAG0gx6M=2Mo1s}86RGq(TXz)Fn^sXgv;pQgMR6r?dxAYteTEH~|5Yf)@^PkjZ7@2!1IZA7!B|@_O)OQ!j z1C5kN2r|5i5kMB%`)y%#r(j~#`ZptXs-_o4oT^(mqtYqE3Il2wCNpZZM>vnZ)~}iy z8{3Z)&l~vK!mO$8{kH1HjBYv*nA~T+FQlIcacN2MuMy7T(>Wc-TAv<|W}cwTZICTH zDtUCbI^107s|GQE;9;1gSm2l?nDR*^E~-+n-!BZY-*qd2iy)kY%WvO6ne5kOf0E0R zUMzoQ`P%sg7RDQDv5A7*Z%;d8^hPYMWG00n zptkj2kg1JD1>apBH=J{lqm0Ic_+ZGTY$YE)fFB=pv&#p+0C&XMZGx2qz1Zs+_CLxM(a z`Jkh1YZQ?}hJ(E@0)|PX#@=ryxn`WsFiT6HKw-cj1g;Fd*&3| zc5S!!D-;ch14m?vKC0l#+I%J-ow**d$yBBI>lvZ=}FPJ z(drb@RCknih!(;S0>&9}V6X@r#l>#mZ>_Tm9USSmb|$gC5TRJqa%I=Lr%7<81O~MUS=f zb=%H9nIq=55apsuEu|5~b?I#f$Mn_huqKH3gp{#S(Lw`ZrKyT1B3?sBzzamVDRD_( zqFT6QS#NXM-_YD;3LakIJX6%~%=dFe_2;?HD_+~{tz5Wdq?mUxUG%L?sIRbLKg&dZO& zDm#YKZKeaxPqE$~YV~@TD*gE?VI!!yvCvVzn1(Hc%Y}?AI!qSWFosv^Dq6SF&~@$^ zCiO%s@~j~rlf!`_#K=2M=kAyL2s{G!nwzw0KQW1RksQ;MLZXMo{Xds9^(;Zp<4Z{G^eYp}Q-R348Uiuq9q^K|v z|7k*1FK!youQr~{9puu^ZuZO0>@Cy`9@Y-$26jwX#Zjq}cqcvDz^AK+htHIy7UuhZ zG{B}qq9dI%4P7f4mvk;6_t+EWqUfh)zEiXs5532d`4;y59W8?crmVWb<;_7yw>6+u zwEb4J-?s2S;B=PI#dmY9he9zI96l5_`4wPFAZQQTsScJFtlsObe^|hCc!nO*=C_#p za+-W?PxhIjL7;R~bYv5UZ-^C|0VKJvp9=dxn7j1x^D~O4kT$$taz8pQ=;`oqjv!KW zTW)$5~4QN)uhb@vzr_TTH1rGne52lyT zup>*l!vmdFCV{5>&tLrCk5ym=L@fqH!IWcw*8_j~<^O(!a3a~ksX7K-$0`4|?f*X5 zh9@|S>j1hm-1t7R@m~){x6wv^A#VubJk4_cuLm8TVe>zeKVfjW+Q zJ&9JN|M63x#WWnx!Y6yXzVN}4rFRZoufE)r$60r?8J(v0r)w0+-Mn@Hh(Rsr3~_e})2P-Xk#>5)xlF!j+7K}|*%*MHa2e-3)IB#ED~@O&eq z7btu0IP_pROXPVgpj+w=2tEThp*cxDjMd2#%Jv*RXV&y){fRe98tR3tNgS=Iv@v3L zHD`xQ5W8RJ*j50G2%r!6dSSL})kXSJVDuX6pTjFdV+1ZGt&dzZUNkp1m)F)#*ZmAA zQT#k(Xt?zA8!z?#^<8qMl3qI7$2d>){7Tk+GwD$lD7!+ICN@bK)63|Htk@g=kEQXy z3x)I9OJjJtpEazGq&`EyJ{b_;>!qP=@J6IL`Hy`Z;GK7XhiXBMTYF490$-@L`gk+z zH9FT;>;>CZGkJ*qqqW$S&(_5mXE1fi+s!<<_v}XmonkQs8)`()_ZzrK>&w5YtRy4) zwtSO(?xT~beXgXqxw-xEj3ccOW(6)t7CO|zx~}s7m{>tsyin^`#K`z2_{b?^z)Aq( z^*~XVE@R2UL9%tJgfMNnw+OUW*KOGsemc@JiB+^KH&dO4w7^2y*Es(e779P_2+2WksS+V}%(~r5iyoCb zJO_Ht*CT(vEB7LQ{~-hgxbAOeXVECeMYQ)~spzoVm;-{+@ql(Ks>NABUS4mSMK)>X zoPuyM;~xqGQTISflv=TcN;8G4I0KRYFbe-;BKA$5t#dUFeKQ83?gN6G_;(B?aUh7fL{$Vp8lnK?H~pJQO zBK()Fn0>?z?dz@CX*Y+@J8veje3UKG4%3Su4=(nIGL=)%$q|dU2A3u=QV6u}8Tee& zyXiJSH`^R+0Tt@cXY>lmrUnaL7#z(sB;6!U0B3*Q(#7b%HkqM>Gz~k@6BW^kq!V)f z;v_P@@ZnxSzu4anyM|4$AT&-FQO!+Y{wl@QfkGyb1=wJ?h#0GxIJ^DXiuMv=m)%~V zppNo-*7x7j%!u~e6t4D}T#OHdAHYMO{}YhD&h3@`%is4$mCG}o*PSQb#w=`l)D}>= ze+T~j>5r}f`P`=Yr%#`bS?}x-@V@qu=@K1<$A|mQn7jFZci|1t1%YOE;s73IX1SQF zx%Z~<{E|7=K4!UAtqAfGN9*T60ab=xKVWOwf!Q@YZgM+Whr;9jP6h@~xDZI7j<1Pm zZFQ9T?THLxga)v@cj=R#r z)iF)T#Qo}XN2vsr=!f}QF;<925M|fqCOYBk-qBp?d?3gqr47t80p=objVuJ2zt~uh z;J}VtH|`4W6WYo&sT2F>fj&Ee%Txe1KC?Kw({=-^&RvC_7wgp8!~x7t{Ws@(W&p7| zV&1w;J`Gv~Kd<9ZV)`Kw`5-eabZ^%1_z;2=Q1xl*ry8K# z@A=9=H5VXlslTEpn+7SJ>ZJK1H(^P zK$yV36N06OC}b56C&&N&5dV)}qY@%R$Tm&}0X0-6n zlxka_5YfxQ-qr0K%h3!@pSaA)BS-gx%@Zkr^?MR+-1LCe{ViDoN#nnpP2rdN`6CMd zXY%C+CyVj{C>R`Uv()$jkSk*Sdjb1dTOTO2cNjX-sxt4pI+2j?zF>pS?@(?gP6*2u zP1oc@H;b`yKJU*mvuM~Pj)FES7Al7*0MvTC%A1CrjBievnVJ3YY(Y&V zbNt`-SY7@(A`Hs1`0sT)2cF(H8nC!;u~21iOoSlDs^#7AYM4ZmrXmfLTc?4H0Lo&I~MR%0Om zAY+#`o=tZgu#fz>klG*lkq1iJ&Icj}@!7xu2|ss2a0zLh`D!r4(LF?TQRrXivmg=; zZc3a25N=N09l(Dc2{&F%k0Xz^o=D0;_Y?TGwi7aHo@$$*0nL6NL?NW zwh;#^pM!tB(ZLcFiwhz~8``eM(@!R)x~6_qtT>yIQ22hLX|1vX-rfwBx;14Csz!Sm||g zm+nF+{1dZEle54%$%CQ5HpMhS8u8#n= zNZ6-Lv8^))y8scdf40t1Dk;KClthz)) z`)88UK2xdVdZcg~;vgkwGqo}@PC*F!eb3pUQ~=7~d?4)iFVR07ZbXYH1INXy*2nuJ zjPq>B{{|ZVuQN|W_RN?c%Ej6VX5TZl7)qzlQvWQIV6W&XygR>meY*7=h3}fn-v$8W z4P%X`BN7IJCcxG6czkj)1Atx4jx#(#kmyMMw7}m@2ek|ScLRycrx|C(nTad_paUo? zXv$h{cJ0>*|8xT`Gg7*M&Fs6>8R+jQ#wfFakcDzsPiIO~ak zAplGF7iqAjc$j@=_@>)XpxUI%CAUy6eiPt`XDmITzdfO7P&xc2=J@M_htHe9+!t%2Gw3U^mI{P zJjWsbSYW?H|ITVjSeNE+`=x5BpWE|u-=KZ@Z@IZ$&B+_!R%T3tvUc@eWvvg54g_N@ z4uA_+l?OOhQ3A`Fh8KX((>kjvSJsS>_d5~u|Hrxsq5F#ko|wT3%}tFR7(SNJM%02O zZ0ms-`)n=bbrMI0?QC%2YaMs=j6bs#CPB>AadzF2q~mq=D{W{8vlTEeKr>q^5fme; zmM&n=e!1ura|ejXxAV1b4QH{`EqhoW--d#sLlqB!Zex)XDMqy%HtyxgC%tZUr*cOQ zNcD2q=ZsQNQAv?_ydF_&`fO`8Tb>IfmQgXPgf#cUJ zz(4xq$XRpu_;B>-zh_hP)&aSR{8`Y*-z^W`&`em8bJ0Bb>D~* z@UQ7w{aZbV3Kjyh1k%YU=dpW?CyoVgV*9u=)P|mzB zml%samJ#)bnf$)^jjw`P3P>tmx5@H#W~t8QN{96SD9nCMtmhoZ`7N;rkO6vIp8jC0>$$WLnmU9cIhIggFFFvNn28Ck%h;5RNYA`TN*K5yx{siMtKV)D(_xzLl4>J@d`PZ@I zu_1lVyK7hvLj)UcIJZu6$AQ3rFnYFLU8uB1X6m9~>TB{Xp`uOkYgsPpXOkM-nXa-$^_^TBuPcgNZvB?DaIWG~r9% z<}6>cv9Yc8ky>QZel+lQDV;lMDcLg1yDaa-9^ljW5jgFPI8zb7hOfF{+HO1i13gQs z(!NuK@L<@6A<#kL3w8E5(G_#e?o~GP)pFCNdgLS;Bp<1NJ!TJPacVQ*FtH_V2#Q=D z_N764UY&c00s&W-JU}c%4G0LeeK(P2;-^RPPOt6quq=f>aJ88 z1)oP_2NmzP0TGG0wr$aeyodM}v#LoC`}jNPC(<7VxyJ$BDfiXt;`$#n?3z!~+rD?D z(t9J?zju8B`S!=l!p7R@hTf9Y=UtBYKB#k^33~F6nGFrN<>Aw4_h>J>0n@+vq1)~B z?D8M6Jv7iew*mE)?$mZ3JrZerWMUXTDACF6B$Dca+ByA9>_X7$v`@LgYF)W+mT>9o zNl%2Ud_)Y7nJE}c=rLXb1@tAE;jcR#M%9jmocP^3nr0m>`|h*8@_YU4=zr{$kk?3D zN*_^J`L&JvxkU)77!o(!QfmjRXU_L#P+`fs8^mnduQq(Il!@4MY3_FuW1YxCt4rVO zeoA6tAar4M*Sew7N~(aMaLj?Elf!Ig)xpA-qVn^#w&cYTW{Gipg?~tMP4St?=el}p z@s{q>;%s)EX-NLRJ^5|i7H5W~VEyE*@b2ZQhZAF#2^lw?UZKin&}Q)3B)If@;o9&knuJC#Aa>8!a)2!#UrncVR#=Rr@+8))j`uh6!>-PQ4Ie$#F zY4&1<9fqM9=O;uz-L~NN&dpWP2j0c?GB7f+a0nH z>{#s{v8LC$xp|$Ro$``rr5Br|DQ*vlX5HQpEn2R)-SF&9+fqk!S5#y4xOWDM<%s85 z>+Q+2{Cw!OQ=a#%)jzJC^U8b7b-McEl|yft@~*G?q4(q)52{8El@B2&a+^XEZ|m2r zdt38dEQWgr_fFP3t#^uZ@-y{Lgq_{{cJBY4`FywjCwN0#40ZbdX$Mplx7Y7XJ)FK< zJSILq{{D2W`TEOV&Fi&Yy^rpl`p6q+DNp>za7pc9)%n)z5x6|5b#B64cIors*wrRK%^gcfGwXUc$Yy z_3GZ7Ebpo3C)~bK<@hb`KKC-S_4)m%0epw6rk;D&{`lsa>ZhHL_a^SltzB65+q&Ma z>gvryZ*Gd$mDeWz>g<0Wh8!ybKm1wkRGy@5)ylo?^mgxCr*~TW@0LqvwcZXfK~Y!X z^5C%3yPqFe@14{AFz@>I{Tp^?z3nT%I}Ozle>@)qJH6Y>zd`F=W$wdoHtF+Gr@>T+LD!V>+=Ckz_!DaUh(VV`uzd-U`&PjZyo+9bQssyKO7e QS_UBSboFyt=akR{0QF4ocK`qY literal 0 HcmV?d00001 diff --git a/img/one-for-one.png b/img/one-for-one.png new file mode 100644 index 0000000000000000000000000000000000000000..991c8501464ad80547ab037082012f9339ace50c GIT binary patch literal 27208 zcmXt=Wmp?s7p@EC#oZkO!HT=P2A38o?ob?xTXDDG?obNF-Q69EyHngfob)^A{D9%Q zCQK%?*Iw&+?!9&*RF!4Xkcp8$eE5I{l9N*Z@BwB7`u8OwJoLAXaBd#-faEBr;`fA)Ym)kpBb8@Pv12BRYdPkSdDNUQyllV#AX(5~K^>zZN- zq(bINqWz?uv#-Md3#(of^u73D0|}Ed2@ZQpbn|l3)de)Qu3+yRy2;_q+s5hA>2l!1 zSy#uo%#!{AE%FCd9wI#~^x*g(>~d9e0Rb+Iv>28u9Q4pll?N;f{eP$P5P^X|z-Cn4 zN9O%iOgLaRZlKxF+B@>9a zTtZEj<&<9wHEY+L4z8~r^i4Pbusa&SlJH3vh}$z}sbRti_Kg8rnWPwq!_~v{9G^Nl z4ag97lOeRlrI#~jMI<=oSw&n&SyDq4-+&b#%4_^KIwe1`3k3~V zV?SD~1y2oD1b@dg(PQ z_yihS{v#9oE|vW03nnhE83@ciS9ldEURtZ|)dqb=X4r7Sqytr#yi-%TqH!@Kye6ab z3~4;xpJK^{BTcaq!i5QWkMg1kf^yd@p!otZ+<$+OyIOs-WyKn1JlN&!!yYuN0l@&z($Jwpcc~_ zmwZJreNJ^boZMbC^dBH`c#s?)u)^(PpomPTR`EdJL&uPbZC}Rn<-?zmHNk3mW{RDR z9hgB=Yc7?=Xn*Z3FL5#$-_<>`Jknhvv{bEC+^|XlCJqg60N_A{c+SzWv4aX#o#8dR zgL$(Z#y{^p(%6^s%rwlT`S?VUPY}qY%c`9T->wdSYW^k;3 z8W?zNN|;2S82TB+VeNBjm<{9@wLA?k1(B(NKBL?k3OzT-gUIA^ZxOo{(gerq$zf9S>=-=`pSXnd_4E8Kslm&%s?T(S3crR zM`rmoFhYYNONt`$$UDEuGQ;arzX8*m3xj+Ly?L0Sb^q*+Mu8%^WQQacq7+Po zM0{cABkG0kEqVr|G|SgDk$jnDP``IH6+O={frQOxnr=8-(7jztXvZ*#9!CZ`p^WuT zzgS{`2glq$0_D(S3~uDq<~?hquvJRWRp^>zOl20CANNby?~4V|bi&mQT1#1^J9Pie zG7i{Da^M?ETv{683*>5qE>|k`zlPH+c*7C4-*Xmv8tpuTuWb09HwO)URU0khQ{!&2#U)E=T#`895H0kc<{q3KxQs|HK-g|A; zj15CG!pL5uWyQut&Ti5fmc{2p1zD*@U5ug69@aQ+(<|^4|9qr_$$IYzV>y-6a6x0$ z&lD8z_Ut@eiU$oQYD&txbk|ITx3P3i%gfsvm3-wc6U6ewS|ow81R8Hu9xft%V$qm4 zEQJ)NmI45axx7+_&b9%~2wXiAGb1)G1~Hputsmdc$IqTCKHg^06TelDHzc&L9-OPC zM^eadEND#9t@?@o#cef~k#Gt;-&+)F-DDO&P6E5EYNN&q?Qs!Y@|KVI&ueXCA1;$> zGNQsbW5~{pI#@=tO%}v~6(gwYL2B`K*0kx!!-s6X(qh@oge(dugH$=T^X}=}^m7>j zY-mQMl8RID0zOFokYc#|YOvq#@Yg3MwKtx*%{#`i_w%w(Wux01@#PIRPw&9c^r5xG zwY;P{__kN->*wfV0&9@-id0f(--9Hj!SZVEeru@702przS!6sjlxcrdAwV=oMKRU}1tkoUkE*@4m`8 z-=8k0P`=#}$wX-W$7Z>0JLL^Zk@f~7kt9H`*en+nDa=RCs8uNyFzXrL8BMf-_qRm1x{5V(yJnoi6R5t;Pgoh*Up}Ieq zw-yrKC!r8fL!|8IVYz)Hlc$ARQH*1UF>Ky~%S{vP=z3h>4;79rr0C# zAC~ET(5+>-2(b-F{Wa^6aF7@xE(W@$U=p%aHr?iTe-x>EZ$G?%@bT9@2t@{muBK!s z$zFe<#s_j~!ogy|EEw_!GocWCI@O|ZIo_ek62Ud~B^jgK$Kv`kgoyp){|=?Y?$!mg zLQo%QGbvvPAG$Vqi()|h;O|bj!g<^O>hnA}FD5|~KsPg7`w6>0@6TmK1V7O;%@nO7 zlA2ep-bj3S*^#WCHrvTA^FJ=o>CQEEJTn>R)w20@;%kGWk@9C)_ZJAZJ6_zH7xABR zt)~4;mbeR-dE9^3G(-e`&OKT99TkM45-2R>9~@MtQfP4Z{MS(ZLnF9|KyG7?zRlfm zvl_|G$DPVxYVt%a_iYWL0sOX5Y5m~sVjuqLP-ILFM;$7ox~M5dLTesVtE<^?S=C6r zOGw>2gq&swYAkn<^j3e?Oc=`Pj_!EDz%qTL0kMYgqovC9p~bbZTc52os8Pxyx+#U< zdi6GmPRR}!&mB=W7r*`HFXUmtKHJ~L6_5%!n5&$aOtT=&|rGfQzZ=?0F zFei^=hFk@g?KSD~P8Y!pUh4qo{ZSnXYB;5LDq;Dn01;NPAGAM#$HmRTh#cMz~GwA zE*o!u;YEjkyAtv9j9rBB6CK<{r@J8XKBDYNrhOwM`spfuqo@vWUKh7Ahii1vewh$ar=x!M6|eyCy6q5XrXq zm(Sut-{1Sl-+SAGfja0{-Y|&e<IH)>vW?dqT&tVrHUiv;NVo z==Z>ojs=NGO!uf3M&8$A8`WAfkV5ac>?hP4H}nuThyNwpX2fdr#CYwh4iGrTapC=l zlYL8YRMh*Tr;#Fd$HaDpcsIdj%0#ott~+PVYNa`Ss4F}!A(U^7YEOyc=Qagg;B)5X zl!`Dl>}#GTx(-%Q4bx_S4wC2yB{DgwEIDR6Lnsu&2C@N(`S)_#M17r6g5%D)$g)S9d0^edn7hwZao^p&W@6&T;0Ot+X?u1>S+ zHM2doAibe1+7LSHWoTS${v7&gA-I|IkhuirorcV zT_hY!AsUNAr%d_j(Mz{}+bHVr0K*ZlULlu8>KYC7FxD4%{wnoo{ znbtfs(MwhEg%r^AI_)-0s5aW_*i>*WO;1mIAXL>13@wnyXnR4!E`4t9m{yFAT^ zfAwWn0{yZLF7-XFJaHu^E!yMl@mz|W^^y0(`9NidpwrH{lT@N!T&{R!JAXpe4RT$;=0AnB!fJ4^_%KAtJ3KnKtU&o^EK5_p zSJbMxrb}|{qx-Yf8c`G0$sEBb&eF*(pXlX@LPwFkk!%9v0~|2&KI4iO@2o-*1%PO_ zG#*$?Fk)jHTn955M6Dx~M7f)p?C<^*o$DS7k(LAi60(-ANd(=P4kojsQVqODQgpxS z+In7LN*0Q1R_ZNdGv#J-Sz{A(S#3QzYMq;3#LFmB7uQ5$)y<+3b0MmTzSZk@|9H9~ zN|BD?mCTIBXF-C~tTM0*!&p9=`=H_wQ`E1;K$`Bg9YQc&tU%xxApq|wJN!K5ePnvX z%cn<7u-MJB_i(x#^rc*LdqLNs!CK&gW&!$enP)NS)FyL;Nl4$kt_BEx`_I99%;J{` zk4RlQp|_2mZ_gH*mB%uRL=@AFblt>fcY?q#_h)sv-=|e|09Nr_9_&V7AeuHhK!Rm3PS3ye6i@%S$`&lyc&|biO!;_BOa}P_;`>cVebIqPWHB^7Oe6PA3wPW;z z3~GXNpaTDby2K}Tv4ufMYYIzLyyBR*e<<7`A*~zQ3Wof zta8eu60=nHHfVEW?tD5RS@V5ZUznuuIsyn?c7H^~z`*!CGyBE<8Sa??9-AcSkznwa zKq#E-4fZXHs}}^mWz7ng4BUIW#dGd@BqaI`PV0<3^of1A^h=SH+(EQb;c&8LT0UE< z`3McSx=@-6`t08##J>rv;)@8`vV%Uu60>1oQ!oY-cGf>?BY8}oytZKibMvw4u>Uy; z9*IbN=18bF_ks*#)E;~AqtXOi1bEsV&lWo)aeteyj!F9u1>d9cPnF@O!OL!(sL*0% z7sSikZZwG@$A($Au|C$~_3$$f9tyzMTxdU9;upBKAM1&aedB6w8_zrWtxubfnMowwYt*ifcbdZV43T?;Bxl%u-Q~d*x~(fuSddTo7OQ%o(q?fjMnmU-loE$lkO37 zE%$g_nro}SeVH4a>knB515RRspKcC!McKUEzWBO-NGF`ANf>_x0skAenIn?mnKkW=ety%`tTmN*HBbJB- z^Ds(O8FWn7n98oXF6!%O5`d8W_J*-kB&E)G6~!*9o%iK06w^fYbfD`Fp_4#PE{Bb^-D%PH zSC=SHO==L&($eLQ5W-4od{Uj(5m4xv@bBm{qNv9u7MM?x7>2cPS~y_-KGg zsE8ywK*IXF;KGUy>Z4sakbsYCQ$WCq1q z$YevC!Sn;+L{T-fc_E}U_=T1>nH2CYKjZr8q(IboCb)%oeP05ZWR=|_4^V>0Jzr%D_IIA&}$e?G;gY)cUrGDFVwXvjv+b!9SN){w! zkKW&OJV+dgqZ1wbxfpmHf*i)eSKpscL7d|5NL)+Eg_asd*oevS&Jr*v*O!!!%dSnw zKDzl7OXZk~SC@aws|rAgv`6!0b9jAI0W5~!MyCK8=rz}z7%X<_s+0!GqPw@u z&gbp2vw82B6jV|Seln0j?Z9V3ajIi&&};xk7ys8rC&c03cEa$f8zHe zK@UWL*Sp&W-Qi@K3cfqicT4RQ3&1hBDTy z4Q>AO{9;&+RFonzCfLBBSK1mUw?>zRnp=FDl-qfzQ7MahKlUy?=kHjhq?8)c{^~$_ z5Kpid&Hv^*mcpE4!_ofNM>9ri0tCH1YzeW~;JGe7UsM9d?NTn~TpAjU@KOEj+~#X-!Z6GYLc(FqQoS z5kK^%(l?pl<+otrUvnOyaoKb-0x#dPPH&k)2jUg`!v!@z|KKkN0_?N$thsP32`|eA z*qX(%bOh=E#qcG5=-xO9>!F0biHVp3j{hF6ruI8bV;MRxO zY(Y51j}o@PoCt^ahIYG8T;+Sr4PL;IdT}Clv#=!(&owYP&ATh=wR=mq{Iq} zJRecFQP6h*roI6bz-F2nwWH<#muScHz}42~_%uiJrM zh2KO5w_A^E)6FRTu_Y0BZ}KA-t$#b6Rf1^HGJy#tL76+yi!*;dtx4s}Z8;zV{S$&;tAD)ceo z2J~ZYooZHA7xxImar_7Byc#6&csXlYmt-JgfR+G~v(+XQS>#Ut?}wAw|LTD&hWw^v zIfI1+v`kzbWI9Z;T>!E%waV20qwssoznT3adf1M@GV}!fMEZBNj|CqD*8Za4eq=+N zY~2?OhSn_vqvM8Q=(CPbzzUCzZN&M{q5VqcrImp?mAeH>oM^-1A^QJr42mtssNxwy zbvbMAOH^`W_s3E#_o%)t|2rXu#rORPv-5E$>I1W{5w4&Sw1z%x^p#h2W^AiLpz5T? zYNh~HSNU9+|ND{ZWue(Wm*_|Yvw+eWxdo{p;)m<)C+p1JLtPU?gWg~50rb8s7r835 zYYN8ri-kS?K8%kbt=ZFg-~X`VEhhWmtBwV&i_Oh^hXnOjGgN)@j@!Nejc4*=K=rgV zRHJd)^5Zb3fz3>Yk6izkhM=oSaX;KhYTJ?MKm0G`>0u)Pn5sa2uUjjjtA3n_5ip?2 zrK@2eiokNX)|2292gj_R!>I2ybQ0^J!Jz9q`wxp{Sh+u58v8(=4q*yEu~hat)PH0+ zax5QP@j5O}RfVczHuT_b2ReS8e5c*P;RI?~N?e_>pNM8UaE~IjP~v>wVMh7G@pzV| zZTN$)NsIG7Z2qt+_`8l0RI`aeWszEovw5n^?A|{ZT@Jv+!!rk!nvbRED1Gl5DF+_& z{^X|qe;-CnlCrNWkrZlRyl#&Inwy)&`grojyq|BaZ%-C6P8O>$toZ9MBgNDa`}_Oz z6jE8Xc$|q}qdpwkpppv_zDfb95zauZ71=E=hb!*hN|`*a{B)ai{!q(kyWaZeTs)mo zi)GU?j{%w=#9A@K!^69Sk#H`TOyi0fv&-);8|_w4Pkz>#j~V=?0%lfCfxxU5M8JEI zsTAhSx3<@z&5gx9hjhUCY7?s*n;uy>Gi_(?xZ*W!k>z1Su-TvgP&k6j4atc58-W%K zS%(|Q7qrJpXNIICh2uHVY`8 zc4l!>hmEMi%?BQErqkH2)Q?n}^c5fTWtKx34~(eIyzb&WUel1V``X}O(L{c+T9e%u%^)c2n??U^M5~E!RzVinPmESo~4n^u-f(h=HO`E3YAn;^z`(3 zdwl*7Ap`$6SA~1%v}*DyRGc@IcrJ;3stm(Rga71~kk%>vaLj%11NI9_CKGHu3cp9) zBib`wPLzLsN4Bsx{#s7N!G=jbykzntFBCi|Ge){2e$iU6IlJpvqW6+2O-rL@<-_{O?_PAips)EnH7pv`nGsoCJp8mIrscIcC}o;& zB4jn~Y}12^HNSUXWPGNqs6yFT00#%M=k;EFE0d-WDYwmngkYgdqX%^T#rlq^H6_hM zUV*K6>921(2Fw*>G*SOT|I}sSdk6|ahIC!2(a5Z5Oowy^{IazquJ~{^9dBf4SiW(e zjMZXgqJCT&+Lvd78qSrpxgj$koV1(MZBlk;^5Rsspk-HB7LO~kh0+^Z7*#CgjubBP z-eknl5(+&zexy9!r#!pV)zbn*ym5YWH}c!kN$SF3<)>yg*oa(+_;=>c&=Un0SsG{L zO;d7#?{F>AYkNtTm%a2~nMiq?>@E39D_h$~-bR&|{ z>5+Km@U^?q0z@IJ_FZT$2je~e3Z_>5KNTlaGhsF+?#rpVXi{BKKhJO3?RWPlm5|34 zaWs`Ra8mzUf$8DbK_ip`s~Ok@tZY6fe5|m-%fnu~G$HGc_~V<=li7lq?)AQBCPM`d zgY)h(t_CBp#k`MjFjuV6)~3$W2hAE2LlhXix{|)JUmM1@@J$`;R{n@J*ECQ`U+cc` zyPa~yN@ewC>10-PFdKG$d|3CU$nl3rxBU#8+t4-wDCXjdWJjo4EU+i&|0VJsPNY*A z6nuN3@*cNd`?>?$n%Wu8@l>1dkz{f{kWscxXc*IpTf2NDR$zfwmeU#r|7q4Q49 z8PvS>Z{s{~xoE}x$a}TeHFLNhfyki5TWo=AUZPDu6w{^rh!-bv z{qtBydI$1yG4xZdP!WSMZZ|gCTi%xwkw0=0N?sOtp?+TI+ag13kEDa-fc!QCHc;SM z2p&Bxl2wM8%R|VBSgvYSGxxIzPizvs2Ap|G8wk1cUdI;q}mAd^o)%?p$&M=lMq2$S6 zzVua6$1h_q3Wv@gFei1pH3l7*M|1BoR#sL*n;BRFfC85*b}B&2jeS)*g;;53CMd_51anG0SM>f7p0lo z$CkQ89~Ix!Eo{+z*u4N7o;L7M?$c%Qvnao`O)p>j)^d*=e6wBG6qyaRgsK)Z|5>hNmtm4D%b zspwV^M(7q4`6+Rh7KH;Df)Rm(3N*FJbDh4rK38`^&2N)3Wq&sD9#pd1;hS8a`b)+h z5J^*t0CVAG^X0|kjL6SjC|FowN-cqEKUD#`JQpt(ldm7+ZhbJPMD+0-y1Itjr^*rh z#Ks8DCc8lh{fW#S(U?DN;2Y12%!jULjM0d$VFqK~VY=S0P@%9Yt8&Mn*%3=cWQ{(G-BG}x3Oe>dN`E53GoNc8g`osVlj21gVUy1TxU zqWx{^<(z7_jQ|G&?%lY<)n7lI5Sv>F@3LPlUU@*?a|Xv3#G6@!%sYmPx`|qkVP(#l z!#5A#BI4sQr;gCfQ@swRl*5no_VAZmmR%ER7xPuR&^^uv8bJ|B1zm3q&b$KkM4{=N z7U@+epNb+h)aSWzq~#gICY_B&>JMo^a?Ic zn1~$KHgfu^PieL3f-AYXiwH;5Lwec7bZYm86%r)S;F5s^w?95O*5dg32-3aEmfi)H z5*T>&>>RiIHB4pqQOxs_EkF@lzk>r!HTD;;!H{g7&@?0)(Xk{8$?hrX-LRw)x@FR zGi4w*!1FrqbQW_TejrZx3Yg+Lyp0oBBs67!6I>Y*yIWGSxZ54@QPckFH&v6UxupRW z@0hr-qj9i+>)$-XcV!WP+~|6qF+Y<5G0iD%&^)59wo$wm{}8{}!-60Lfp zcy4}Fnr@W*;`6wTkn)s0YL>pnVMeU{4c1#?hTW3QbCj=*30Ks;<$J{De+K>2#?-36 zzp~scOX|!8b_7HH$oKs@%L<_|4B0ea_S|0TI3ZZDN)SBSY5n2+-q~PBfx`}?0Ns7o z!1HK1>sIE4cY#j`mB>^EUu@#BjP1FZ2Z5-Qx|;*$ zUpvJjcH>7xfgo9F8tl)+m+qUsL3Ho?=UG5q*eM> z<#4NZw_9*k39GUJfcKx|gKDiG)b8D#O>-j0bLQ#S8Wwu4_e~#PmW$hJQTtMqT z1dJ&|T+!c}<9GLg{Mj#0srEc*)c_uqmCU0o^O^W3%y=)R7>*UOa$ z5FDcta8mFqVHJ_#3(3ZbRYxOS8u@K&*BU20vo`LoW7>pY=z!zPsVsjk1yT|}Qs3z^4Z3!y z(|^2~615wn1{gmTWOfWbyP|B(B}_P-R;Mf7&Cd~XT?&7$jPDz_U=AQHu}oj*LY9;W zLc=nsFW*=!cbU8eXHtKlcs`?CRr+ucaRCg+KAijjQ_MKXi+ov8zlzuh3 zNkO~vN;K&%aBmvRJAOfc2Gt5N#xnuAUV2o_Wx$22QwQbS#;KW1fZxq6?XjUYeo9P@ zuDBTi-GtYm>C(%^R?Lc6r;$6TR;Pw?euN?_*Y9~`S62uBsto((t6xIqk^p~*yy!$AHr;q_(WKO* zWH)I-P2*f&D)&Cjc0UDw)1y6}6ub~Cjm^G#Ciz%xcBOuE7W1Tj=MNwe1wCzEemnCD zxf%(#2w&=&tq|fwddM^|+m_&;phGWNw9$keu`FRv{iK5#v|$_WzF}vE|2e#W(wA;L z>He!*OY0b@HZ047HC^Vw0`pKT8^?zHH7!+mTV^rEozo_hN0g#x4Ys9hT=SA~L=}z|3r%eAic#}E<=++BW*-mA8$Gu2kr5f< zKbrb2hw=qEv^CnDeLo%JFq71?C5tl#B&n`ff;@U$JbB53`MrCxlOm7(kA`xq zEeiTf+ST2aSMso?o*u(Z!dbr$PP!d9PZr87MV#MzD?Z(TiH35b{CqR=%+h@~bolp1?o37XEp--y=D4}`^84sxk z-cx$Z;5Zs+ATnohxN<^~|Y= zp8fS!D8sF2E^yG?+8-m>&Bj=HB;JvlvstOA?b*u9URLzIX#YM zJTlm6ANiz~dOBwkOvqucEflBk(xTtyM&2d=^h6x-eB~;f_{`{>sFA{I7;Ez@mE)vp z@ZD{#wAYJfp~2g3Jh!gfCu4Eak8$k!QZ8Bp`xI)Lw)=2H;m1LO#bZUb{Pb1M#ZQ*$ z901zQF_cZ@o4H|+W+FuGVtKq%#SP8h=MwKQ3XYD8)~}mgCtG0`877ZgURH~=2!9t) zn(#)oldS{Oi*LsjCnQ1wl`s;g2lZJBo;1iglhTBjZa*pQlibX)kaD7MAY|UW`>>Pt zg|-J4M|DY_la9^Q=tuKQD56X;8Q5MPTwXw_-1b39k#g8Rm(e{ij$H8D0tnR!=dXu# zT_8RZt5v74Fm6={V=T?a#4|M|7N$3jgb9=I2oy!U{Mk4?R4h4nZq|5>;#1=7#_OWF zi*~-@bMp|Df4U$0Wfo=l=|TGDctu>rA;l}tz9@QxHa?OBypx1|z?~%eanV=Ujj5NK z_bG;hj@CVJ0v{%lJs>d%Nvmq#Fd!0-)ynIO;BPUeu(=;PBD^1U)%6nmrUf0+AEH8L z9wuh^+=Pf#gmj3Q9Xt3PkLb`PkL4LIMj5vE$|5K*RjX9= z^AReF(Sf1jjm*?33_;nK*?8s0%%Vepe09cUZEwXK8fPKK5PfBXM3AcvEK3yiGGI{@JD#&mzwO_4bMj*#U5G&F4uzLA6LA^PR!%j?Hp< z&Ld$$Cc7Xkjr?O30Z3?=p`Lqmf&)0jfCL8OV4JJ6(Mn0@L<<;euCb1azA)o5f1Pf; zPV3@R!itonf*=BiQw=>~ibwJLF7q$8FB7a@UzUH-n;5(jFj+U|Tsbxz_T!FuPO4uw zEhCS&7oNIl@w*UPT6Si@Xu~a&TiE-&tZQ;8r$pr`rb#a^22o5q4V4$Uc{D(cHr+fxff#Y2f}~H3nW+gN9ar*GaNS|x zOg0TjV<}luS;j+S7TMUlwDaw(g4ghd5*vA>!`88RTc>>P0E7+ldg6CKV-|E-sojRo zx6?y92$fuEUcc5Sl&V9X-_O*t;CGApbQ!OyhfJL=z00h9OkYlmybW$Ma=3o}nXbL5 z+|6i8sb~G>AOxy2uhDvT|E0{O>Uqvla$@4ZH;Zyc|4@B)@q?GM$Ij#@1$~!7rljm+ z4T~`b+IQi$w{Zpw-QUyGfeZ&pQf(#@60n4LU-P>6jgfe}6_}LH@lsef&BGX!HAaDh zDNA+7EtB>9Yoix%r5Q@fvN6xSOhgUDP3!9T!W6AA4ZlLtA_QJjKGDF9fGRMz-;qNfn_LI5v9)FdO^KbhB>;T3-?yd+PO!pM zR;1I?MaTA-2*%0)E|gpIj8ps^{V|CuInnngN77RL$@enB>zAzH5)HTKhCo0*A8yBL zRirRxBAGcVsD=r0`Flq}Ky1NY zw|UC6kObZrE#^rIrn!08`FzV?7pzYhODY&uv+LJ0zge2;o;;x1DWQ|ZYPiwf#uSE( z_i0H*gAaE{hOz`Tx_I`|U0WN=F*Cp3Uag-?9mmAPWWp?8qoml~Qx>wGZ=G< zt>jWpMEQ#V-(rlCH-dbxiPbbAv&K>0!-LPwR&Zy`7OWI?k4OA<4_UuX4=Dr~rn?W5 zj;yzM`n|(w!{VkgQX_dCLvP~Xfx+gqB%SI<|M?+$UBegi8~aP5Td#V8$mEJ}t(S>` z-`RBAZ{jxNVO?QZo};(l#~%`|Xpf418l@s7Z)&=={U#lRED_(7xc2rQHyOXGC|#a+ zf3v{9LVAU&Gmndo2V0`Yrx;Q5(#dqg7-Cm;ztdm1p#7*!3q{?y(d5xpW;-c^2;YRx zl7uTv%y$W4t*#OtTBXa}jyE3ThIP-tpW)e{Q;Z_1`_?-WqtlIf-uk((gB_MECR1f7 z7V!QwnZcsw7NJzUf-pm|**&pm5Ni47I;agV(cX*z14AxFnsprin`=wwwRHLFn)vH5 zg=N{;izB3RsY>mG@)wh`_H<8N&W;vn58EnH$v;NR+NCIyUrlQiX65-urq+bb`ocIKX8r4%QXNclUJjRRZC=lG#F$e z9S9VOZa7f~g_hVd*nK;O^3=18h#j(otbgkeSEtbV(^8u&n1Y|R;ibe*#{{8G8t8tR z%?l1RIef-w(B4p+8yvyJrQZ@BW8<5^^1hkW>ce_}+ES%;cNp$pT5YdS8Ir2J<|=ab z2IY2|WV;5v5X3O_?EcDZ*@5to%CnaHzIis)X?bWdG$ zqP>~u^`3i_b2XE?2rr7n69Xr&|G~Y{E}de3EuMl?-yO}>z|RGbX3PKfkamCt->ZQ{ zJnoqHXQE+?hPpADcC;psLyTBSg?Tz1mVAEPnN>VC3*$AP;=Hk=I@3S0({9+(<8pFl z_(Pmpyougf&6P2C?02ATTaBIyMB3<<%X+Ng7Fz^xSpvze`cSnYMIOboI&SCMfl9(l z+MV&m+fD_1w+5A$S^F-GY<5?sN!e9ul^yqAQ8&^q<8xWZdJJ|EGV2Ow&?fAN=mp2R zujSI$!1~`=t~oDks_aZU@p4T=jn*9~qX`8_&`1`VJfbEdPW$-)EgDkb$^N5iUWdr; zS5IA^vL?pauXhjRb+eN^&=t#B>|{hLiExf8kLe)-syJDb>Sdz6g9STw|AQtEcQNMI zR}I1X3%!-fp-7>UMRSouISn>fwtIC_k!MF7`&LgXkw>eVClL;G_YGH!e~Ynnxn67S zMl4on+R()skus0)C&Yzl>;AJ9SD8&M+O!Cvte<-Zgj`0y)_I~Y82S&SrkGN@*c??P zl$J?g{?3C`Qx{e2!?X+tFMP|V{2zSH3$9psmvtIUZXngXrOYVe?C*`)_B~pwtVUf# z$gZ5P?nD2XjTtW8ZhiCiDNh2KG+n3O7W;8l8gvLC?_qF{xv@Ai>So$YN#V-)3Sii+ zy4?;d`PM0-7I?q(T^R-fN5cB8qzFGHBBa`Xn<;4EHfb`rt}VW~zS*7GdoEe{u{;WX z9r#JFY9hhEr*OldmANh0Gnp;0Tc@(YnMs%0ySTX)$n#r%>u8(4Lf;kchJc*wLXmnL z;h}_=Ax62*72o6a=4x+WD+RqM#=~vKWp4*rxFufH0sY1f=nm@o;cPy(v+lmhEh)yW zPiAljAF3T$>B-`O<9!|r7E}?30_uPVQ<0Yr??tt``AzJT`m|Dw!%61|@fRjjrQr)i z)v$TyiU+7X3@8~@9L#Tc#^Lm6R!5Ld2*Tn8}29z|Djvd>07p^(W8K^JvvDf+q z$$0!3NEjmI+`jz6`A}=#ro7O_cS1A<@VaVPQ;bWKe-*YrwsI(?AI@$>C*IAz3xVKoIm>AmAUo%%K)m{Yw#d1 zB=CTFD8Deh-$gbKq}>F-CaB9?$R*;@Zx_OL=;6|LtEfZJbi9v6`A=q#3N6;U;is9$|8DwN|7OZi z36xew2wruaOnOguT{u{rttg4Gj$G|pPG1l99K1Qu;j$X6CDNg#KObzL$e*|XxOzTQ zQ^snX#x%-g?mmFs*JeIqY`joZ=(dUeIi`K+o+OitsKE=p2XE5R%n^@MX>6MjEuZpLXHMT4Uel4>>_D4e ze`HD<`}B$2$Zi*K6*F6jn$~6qsNLoHX~e-rlvL658mMW)AabsqWeevK-8VYA{FM23 z>;7sT!j~K8wS_n3n=)yUOoE#dfLd9BA}=Xjs+`w(%i(mJyF**4arjf{PsMbOLF+K0 z0#r%I%2`B|s?cwXTvBhLylKq7l|xU>(B`iU<6&EV#~lx*=Z+fe3;%jjhgH@Q^f5L;`0*S);v`>SNsVW^Zi!V-XV5iCD^Pft21yid7OYefTkF$!e8Vz$dR#~Z^QmQ=>1 z4xtE~%8wP$f&&B*hw=eM#``^{J+F*tp?e`DrgGv;rgamY9?lAVs6Kaz;9Q=s&RxCU zOszU_NGE_DoO_EF^U^8YSGOvup*J!At=;?KFH1^gs-}9TA6n4Zs~v^KGY|(mgRQO|9C3mdLKmXd)q_2r;it|JhnHZ zCRiw(tQM$HEhh(wCDyF(4`0b*h~J~XWGbf(jIZuy{MtUy9ockRwQE}DR-9whlFuQa zPKjicVOoL$T!Myge>j9s&#m;{?JhN9Zs7$gEEc;gH5VDa@w9aQISmIdYIvqs@iC`F zxcvm&P*UC>+_HYp*fF$1$r_%$9X$B?wG%@#6N%>-?Nr=gbBnBQZ$|MtxkKU}?U%fpGPh?#BA8qHY1X1IRXxlXTa!?@qI;xg( zZsS-5fevyMWGw)??PE26o%_TsJ-xza!p*Ert>YV#WNc}*-3xkKvQij2nQc1Iu*t+B zTTrVIGutIX?Ecdx(QQx<#Q~YTPLRr9r09#xR=4MVTPc0$UhfT))cnHEmhc~2Dq-Se znEvbs{iw^#id_2cn3s**rE&S|S9ue7%wNY>`g9#2Kc*!-$14SkHS{mi?^Q&Z`t+M( zcwFm{zBaaxm^#$A1fMDm&!Qb?*9yM<8XmW)fBN;Md?z!6r20&Xw2~2w_T*gThr91E z$9W>~PKAER7_;iN=J1yJ_=V+7Zu8Tf;xO26aoM=2?$B+TpxF`vLT=a4R-D}tYWjv)tN?+TRQ3>k;v}@$KnU*|J-w32BCxz6hf#zl{PkYirjko`&y|avpI%?a! zAT1I@cMV+<64FQwB_#+7B10HRH%KEfNDMhNNP{Awgrsy!h?E18(%lGW&*O8}I%}QJ z@3;5!%s*zwz3;v6>-r5g`jIbT!7E9%8InxXK4h>QH!OxjeL#tH5w4p_-1VW>qgL)9)GfhUMTbu%#JyT!67P=LQs3*3g+dTK7=r z^ArY>GLsJyTC+`vrvpvT!Vsyw_tMJ;*xP`2K*V8I$~C(4{3s>-F2miIKzzoPV^m_` zl0FPi!{#sEBx*HRtn(0b=l8K42>I!?Ia%4>*mBqRHM6X>8ia4*Scc)zFB-{vfUl5( zD)3??x0TpsTr4j{q2T}lFvutGuGixKM(Zt9W!>y>P^z+Tqi}3rIyKF*3LHj9G&#?( zPp6;K6g7>sIJKx>y+)uWk5A_M2RgoJh&$@vHgA}y97;G&=1})wyn`?qtJl4FBWF7} zYw4$3-(PMp_* z>nH1dNlV}2{m!aN(UW*8k=T=PTXk@o4V)H#Q>vzIE2U^n{gvW$Y~W#3V$t8&RX=o2 z-KT2`luAd@;c;QnsqxTXL?z3V(``{y{gvJacuNCGzsmI{T2Es}bDFhMc&(*YqnD}H z{}!<%!^gTpiuH0C>F+k6&oxKt4ILKZ=yhR8Xy*$e{V35+C4(TN(l}_v*mv)_EIHp# zuYT&f`cE@b@Rs3K>xwAh(Wi%#t0ex}d{}pvG0v<#r(6>4 z_1@d0e)`y_`Re?YSbaem6TMrZ>XTvtNA6+frJ`*AD44SNM4@ZXmn4q{?;i@rAXIphl7M%6*i>zy^#BN&(w;5+m11FlSBPZ#sa4dw zpaeUKsK1m&g@%QBqj4j#5NOraMqOXajbwa&LHF%<*$+Y|R6f#U*L)7mD4b+?9Tgsz z4^nc5*Kx-lOFe9{O>~Q&kM7tF&DuVyEdDHcPkkCfI!*OD`nFh?%h1NcQw?OzUCGGc zK&_i?uta;D-JFaC6JCXY1G4L{uYL+`PJO;vX_rR^Uw=AKf1uVZ+mrcI+l`p;KJFsR zsiuk_|1t$C(_LqsZYQA&Ya=zCL*m6U<$A_#-1W=Nsf6pr#Oj{cm-ii#6{KRl-QB%E zyn3;F8fA!t!t}YQ_?lpA`cZ5MY&yfXMqQizc`72e6WW1PVjC}Qs^leA|Kn=9?0aw?j`~o(KK6V{`P~7 zxkjdxKS*m0rs`3HVTj?}CBqH}^Qf}zJ>qgVH!0a-1rj#>Y~448!{v>6j#oPNA;Tla ziL7I#Bhh~u-`si~)~xLFu!gfJVDF3MJ#BRRMJHNC6BG_>=q6OB88U4Qj=NgM&|WQ* z>G`X+1679peQdW4COCn#EA`aaPL9JuxyC@vPVT)xxt&2#hPyUsC=W>fsV!{yL+bJh zIZp=iPzxFY33^X%h^2gE;RazIk6JV`#}H+7FOv%v7}z6b)#{ciuI*R^v)LzWUzzh3 z9$T%J71gvv{h590e`X`Q-^0P|XJ9B_*v3)cieAi73K7t+bg8G(P*b)aN5$3ti4A~z zmw0lo5yCXvBTf$DYEGlcSll&>IAgTg?Ri$XIfQgJWT32ogX0#KcmXm-`_3oZWkDMb%>hcY<)@ z_PH?|P2aYk?j6c0UIq~|6lb!JntjI6z|$%*mgMy1+c>%~w9NJJu8KFrFj7}FN~9`D zoimQLM0<^woDvWFejEV_DIYSNN7u>%p4Ql1(i6RO$21vO__~G4RN#l|mi*th=Euv# zgxB8%B=l*Z);<-Bt!JK7XKF1cU6?wTXvw$VKirHiaU7A}vGbP|w|2l$=^n2ai)fO5 z9S&8RQ@mEr1Bxv&Sk!~Orb?x*RcPU_ND%k#kXHKI(ap-+nY-X62PxTS+fYb%)EYRH z*o|VvR~eFczZKQg)`<|k|%3i(+q zvmQwm19pl9`+avH*$EeC#H1pABX{BTYUX0*Xl`BQK6ab$QU`2`Oo*}kZO$I|%3GC^ z=QPf#&|A~#;@>)LJ6j@Nd(SXbj7y18_z@cv`TB1N}hrbjE(Hs!z(qMs2O$emW7k4aAQ~mD3<-w9l1< zl!(BnP)bGQQRMQyAgk&9Xp{KEA_M`ME1vT`7onCC1v}hiym+H0FQyKpi{9c?XwH6D&drIf$^?G=X|2dqoiOiQndbEWA zwr@I}I4SVvRsf=@*Rk?hrL?@~-H>tq@e5f<^caXPt$U%o#Rz#ck6VDJGLQ&jO#7aM!wuHw>@%LP2 z_YY$$t9`rA8{=Z`Jm=CC2jfTyUMrGVK>})&#eW{ zZ3R|`TC~;*uo2BAkBu$t3X*-7=-yFb6$y@X_b5H4$mS5)Nt&Tv)U|w z$}YJWYMrvL#bot&K?$YmaOH7jp4I!j^3Rh)KL6#HE}9J*1~;hNP9KLRyozoLF*J1% z5=kmygL5cm`mpFWapxa|@|7HzbtOO2^1Mr)BRA4ayYyu9!AF^X?&i?SN!sfnSLPtf z#^s}{EkT5#t@Ntgkn4_}=z?Q>f{p7rwe*RQ*fIs(@{@(b^;vG{nz>~WE9HdDr(I%5 zkqN=h;Zbb>xf8v(tFHEHo}-Axb+UnLzW{;M-&;1CLuk>20P-n{1XDKwcfsl&As6lf z$E(5H_2z6a{lu>SS}TCBhJ)-8oHQu9Ci(w*N9M`&ki=3+FM|GS47&^qMCP}|P5AxS zP=!qb@I8TsDC$T5%qD=G!2<&h)Is(Cnos`s2CA(818ty45@3^m#)HkPIuAC*$HtLo z?cQ0y!0ZLvt3CPbCHaT+Kc17(2*7ut4;t(CL)?JrMd%H2+Uo90X2}qW_#XF%uqh`( z4&?tK;PPoeM;@{l4;S6SVpEP3wrC{S2?>w^1_vN2i~y?7(6?{R@}NrltRi^0xZWQL zBZ-0v%L30oVT@=ma1x=_HRlysyUJ0J1X=(!6Ho&BR4hPHX$N!x5qKA+{Z!R$ef=W+ z1~;h~CWNy6CN3qvF^=S~ac~yksXSJwqo1RQ^^avnn1ItO{@Kp^$gNzO9~*@gWZwld z_s2@4%h#vF#>V0nA(BSY!r|~rt4<Qu$L>;8S?si&Wul(t;Em^WFR=UK1Ck_!a}5YcGMi z()7UMplZj=-YRNpz+dh2Y%r~5?>wDp@B4clL*kAJ!SKCrKyUr*?=xZs2?%I&6(^@p zoLXzDdgbyEDeHkWO7_KOJ&XAne%!CFWLvO+!W0@JRlJWb-LVLvQhUVqPI?Sp)n2G`uVytN#W9Um|jU&^2*5RJ4+*#7!9kdYyw zyz%*nvNG zP}XHbQo5Hm!c|Yg$Z8x6IR;-8Ew$z3Y_7bqlRPl(vD6xZK8JiJzCO>B>=i%W{`G3i zEqM(Xf|S5(I*boY!ts2_DA9SO+@X_Q@|Czur)20Y3Nr51>}qDt97r zi11^&_oM50OJ)n{0ewN|!s%Qj8BKqe2y^Z^Rb``J=k(0)a<4lJD$V->NI)?Js@1fN zPmGrgv13IK`fb3Hq{?~|a2QcoNk~+biYPMI81<>zx>vMjOK(ir3J)JYSGE5Y*b-s5 zS#%CmkOx{yD?rLit>E=dAce+;MZtR?n&p4~BAQ7?La=vD3^Ll;U zh{S;2(67en$;qEla|{3ZDM}+2%1Gxuz773`7U=(mer+}fbE$JX z+g~<*3n+{jB%$gIP@?SsqyCtG`kEl?@`~~%Kt7C}#G&W^ID+5Ik@Gds+Hx7kWkdrB zW$W74MT+$1dG%uZQ+;@}7c6Bv@s_i`l6W-1nr(D6VYc42NGD|2q{1ST99Y5Z;8)#| zvgl`re?}hFJdbaZ1*QBQg$+kcSLI+J6Cr$DVw4CRXnPuz`*{9zTL|byo_5|6lZgPH zsIHY^Q7;Ocm$w!-gIuO$@Pv$5gRD21k_8{z3MUDeLO9f84zH@OjKP2;jso!&@1eFW zZ}K4zOk1|@#eUYW?!SEKt2f=`W;`7$!tLb(DU2n8#0tOW=|2(6cx-;!13o%q>jbr7RC`^D=Uv4SXCe|yw#qw$DUp8 z=Z0{Tzw9x6YZ}IbBJWR~Zbid^m30daW+M*w-awYz%+(6SYxc&pj8Umkuf>8S^2w}X z2&TL6Fx{0~6ev&t@M;iBxbBmwhUK`Y%TqObWZ%9;b#6#&PK}zy0r$t8d|#xZYzo^% z^P3B<)TP^!vz8S*=%yJ5lFWG>A54f7Wdt{))8#}}D~ig(kiwg9qXTz<*A~cf+ujY{T1fhNuTE&A5?G?SjtDzPIfRe)2V5=}YQAv<#6ACrdkY8cP!%<25YQ zuEOa(^sazdo5fG8QU;ZU08ZJDHo5*KKAbfu&k%{ANi(||N%kTi2|2m`so%I?jpJM| zQmkJw4`M}`=k}Q_tq#)(;-KBIJ~T%^_JYL6A<3Y=@v^5n&-$r9?2X`yIUg4L-tucf z+d*FbEDgz1MNAJSa|U!7NO1N=zq)TGioqR%SU_GA7ul~^<%XnC$YzD@Cq!&iI7Ueb zhS!O1PFD=$T#C5zH@L0Y9>AL!UECqoTyM=)0h$$yg)IlV#=&|QbxP24hac=-?sh?W zA=X*4-kT#;HvCI{bWz*2{#M5AuyvtSNJo@{yvQPdiXztD~9M z5znf3#}^gmONz3ps!hYz_z zeSW9CygQO}{5bXMqCuQ$zcq;B&rdscd7l6hpTjy9V8T|rx;)!zYfKgPjUP>*#0F%W zwE2l};2jPL^yfy|7;Ob8*%UB+oa#$@0T?~6d=PeiU<1^ZL^WndPR_X39Gsjs`CP|6KMGKN7PWZtu>a244V zd_0kUVEBBlgydt>7Xgf8a7{Ff%zKl#j(Vf{4fmbnlq4xX9wOi>Re!Jc6ZI|w@tECu zCvi!`YDrT{)@yL;ed4p9H$sgNn1$J2MG-a{YiZY6p3ww@*l24Mr*uQKmfF+w;?!otISJ$B;0?ON#K zSvkh-NP9c+B+03E&4%44S(DpmE)*0qOJc><`OiYei&=w})vs45a5OZa_aT(rLwCS~ zGk%60o_ZoV!foo0`R8n0>XP&MhNTFTmbgRtpu7|wlfIP5M9(-sV?t!{D@-1ch?xYC z#KSB%jyN)r%9ORu7ev3Ei<5BFgO|tzVFq8Ot&}v1wIA4)-3xzwmcc06*msdn)4S?c zrQ7hM1T)_7Fyrn0dKrIlT&^dtS7yFCgacJ+0+t%3DqzplFvv=>;D&p@9hN4Ad?c>a z6c()%)o7r{U=Z2o-+?`lB>VZyU7SXkrhmd_h6>E(U|SW*!}&>)2c(rM5a{rk0G!#+ zj#o~D4Gy~x2Qx(v#{k1m*3=vI^?Msg#zC1*V{BmDB(Vt z%ThfVbSK`8Q~3YCG%B##L|s`sqSZ_1Bg;Lv9)}4(;BLS0RtMSe=1T9ZA}XIpPL0i^ zOF1T*lPQ}hHxfexPF4h2q*k%?B4ID8RF^#wRVKjOQjl@y8TQIxY7{Pln!_>1o+nEI zUvsrT1}>7~^k);7QJMnLx7BD}=`}{?1nMGIyCjc7;DJm(_P!Le+ z*yOn&d%T>wN{I{hGdXBTX%G5^X)m`GpRnM~LYPB@Y(mJgF7`i_Qv@Sc&Zh8%t-iHw z(tgE%?mRYl3hd7Vqizo?XdO78RjHT!OY_Dt`Hoq~;guWPJX8aj@3&C-VvK7x;O7=k zf*?`IMn~?mM}4p@`&mxnC!~H~ZhKuN$$M??am%Xo=9)C88|7)IOdmeOfDgcZY##0e zw87#wr8Rfrae8~Dt8GzRAAIDZ<8KSif81AkAvBr$N9(yXL*2(gL9c)wr(teZ<-|=L zkbn>Fu7`l8Q(;{mJQ5&`CEDW0$;XEUQ@SjD?FM0I0l3|}Qn}VRG?J@<(e)9UdQ#jD z;(zIv{}*jw+tc@5=-@wm#wbNi^6y#Jn(?C-qhU9CBUL&*W1&KjrLK1jBc%`d2OQFG zebCHr!e2B0bU-7N+g79X-1mWS4G{q(iEn>2BE59&FL10@F9A^_qSa%{QL8cZ=!EJaTaGnjYy-n6Lw8WjhHeZRMvX8Z78L$orH zCUNE(@Jxk;lLaS2-Q`$woA02;1H=36kT#nM{UqLD|IpC!HBx0GE? zaIgunPMVBZb8~Huc(I2p^0g<~^aZozZvN8hI2W(zfPDR^;h~Z#8wtt`KGZriNbOC! zrjP#tzkuk#tMPo32z!W7#!tA1P^N6cQDp|%oIaRl)FzGi3`a4h%K3f?hr%XBQ5xaq z3Ca(y3&E9|$d@-ivdwS8_TR9=Tz-^3EH)P>CBnJbp@`P)T=?{IjXk}^bw7p}|0`Y~ z7bfyzVeqM~Wq062tT?=KApMqeSt5v9Io#o<%~ajs}>P zNz~U8U(wxdbb~uTpHZafGyqQkT+0aHf?-$}y%!W0i;)!Et#5C{Vzld(GswnmggA0Q@w%Ac zlnjd##u8it>r_|N7=1DcY8vpC6FO^Kc%p`@Ls zG;ay2^<6bI*dA0E_i;G;XtnRiA1wEKs)_Us3`Nt>HyZ(fLuTWVO znG_$8A?qy)2v*VTs!;mJr7KHYm8xK(bIrxGK8=#(uSoe^{$}V=jYmZg9 z`g8X8v#Cv_b(S8z_91{LDZQh=s|OszfJE^FDP2*GKMSDpGEd5P*dinF0@y9_@&M{& zo0Sy`#p$gBwbn$9l2a8i~oP_L2wiK+PJ_i5yWVW%nt$oC}+BK}^S z_>RuU35NTq$1=aeZ22q#0DDEi`9R&sC^1t&)teSW+xXhk=<4hZgOBFJ{!CRDHN|@A z08Ed-0ie#kK1WasRs}3xn1ItN1@fNl;Q{fAazvn%+Hq+V4R3T9`PEF-%h48p{LzH=XD4}E}lJb>B84WG?{BZ}7*dl{i=Mt^2%E0`9x z=2`*_YQ3-2knoC_-$^DGl@u{ZzQthNTLMb^{>PW6nJ?OcaWU-lUF2yaU_c4S=f5)8df!;!P%3RTjI+DjUS=DwF5z zGvC*ZpWa+}02%7Ht9R`Yo4g>$Mw5u>rb`SSf2)Jl45S@F7o>h`PgKIY7xubYYF(tS z68~Tz!g2wN-+7!hN~59Gz&A2!r{M?HJZ6W5LNpV3g1&#(D>wf-VZlxf6at9~Psyu; zcIU3IB-_OE1mfE{QIq`ssB`(*fbT_fd#S%zPtwcppYDv?;J2LGy!^O377Y zArN8q=g|6yT#=L|dPn6}oLJ@#gj&?;4l9xIcgv_^*A1El$@hmsh&IWcN7&Bg^Z1jncDirLM3}of}tDvJ`Rx%53mCR1rL}R zVDty?84^nPkESCY<38%|@1J={=42$uTb1nGTT)W;Qq|*^nk8A}0Rsa=e~p=`Y43cb zfBwgGev0+(O^g*8 zWp!2REd5=h#R&~B!W9WSp3hlO1c~8%%O~9Z@bRN9JYKtj+b8L{9_|$QLkI_&A_2#E zeJf`82XO_3?D~PJsVOtnmk!yfsXgL3H^?iGrZvU|^*?CSg}WVYD~;vi z#EYk5gr2g}(kOF=S?Tln#(FizYze0iUESU3(v2sT*gE{*4bM0;0mENeO zB9%eMVM8O3&@?1X#=!&=dNTOnbhG_raX~D6Ve)rVJKvA>rJ|n;!|y%8A5{$5KkzTo zioHRPt@lTFDVIdbn`|}ddywnv&UCMdiHWNE7%eR=5lP9PRa`$`a?H?47QFJRWufrV z#~Eq%gq)Lgp06ZZ^(9Fac)%GU+}5@$Fp@^7|9&5k%X(`*O>!wf|4s=t3k3 zQ=O23M9;C*xqgZzcZOuQ(sU8eU94<)ZG=3v`kjmRI*z&No@ui-=i;}wVchr^{@_X7 zGy^xYG>S=mF(w!~#_6Gq{NjF#b!>)fcyFwRx#BSv|85KR+x(r-SxT~q(QV_di5qYt zjRqKOAe=)Jsdw!ubV+uLCLm_!qM|#pEvxQT4x=Ya2@cW-ec@9uPGNGj9a-Jt*tIv~ z@NV%0O+%4HpkHVbM|iH6oY2w6_bur-?khR6-duoX5_Cir~oLrMK|X+1kVALqnf@4Ao={(Ijkp?$r#rU2+2~kTCyIpWLWMU$Itlr55`$Xa8u^ z1sv4<)R<0v<84Ys18PMiukf9!A&L4PH-XW>&4`)ycb~ACDgxQ2_EL&37^x z*-+S!KO+7S2}YKrLWvG$Tkp3G0<{WdP2(un|UlMBafwxj)VkvJ!DuauB)mY}$rLGi<_@jGKe#y?PB+Bb|_L0g!COFk#=9Vhd# zo*3Uwv^PBDx6~>M=V`^~E`hH0`rpvYp``r0B%`?QV9dK(K=ldx9iOF!L}_Zju3}R*`oOIw$1B%d}?#cdE2E)&;0$>G>^j8M9Z<@8a$sU!ve~qa+8TUQ!MZl`iCGs7>T5STVY!68_)Mkg&qEA6?uK|A-I~S z6#K2UensQ(4ACRZ_dUS(XCJcDeERqOJic8P)1R%J#<_1+!Cyn&fT`-Ld{nj!{y$0+ Bp{D=< literal 0 HcmV?d00001 diff --git a/img/sup1.png b/img/sup1.png new file mode 100644 index 0000000000000000000000000000000000000000..71565e234a2f75d9d61d030aa8450e2e82cae6ce GIT binary patch literal 10466 zcmV<8C>_^{P)#rFQvAPJC4FA@Z40R$8ghk#PgpMZCO65wmz15~ zZ|>~fot-)5o3m$TXJ-A*NPrz|2^4V&_!n{dZSrji7zx-wwt-MABwz!%Sg6rvBG49r z=Oy57x5LlR&-0a+&=PGRdqD9D+|#E|LysL(R-?f6_doX_)MLHo0dh)Y5jKzupmu_l zkpN~mb?Q|1e?6z*CQu1l1uT#Ce?UNhx!(r05ktOyY#`@DY*qj(cu$@@X#l-r#}3Sy zGY7L5%))_$14v9bg7o7V2#*Rw`AX5KcUgUOx$!n!fBp4{j*d29CfF6kBu4?B57J8P zEqL=)|kpRxj%*2uk`m;AhSef zwSnxW+N~hY%E|&Gt?>O1--r57MPejPxXngtk43sZl9zj7t zCelhf`qpleTjH|WKz2jrw7vu~EwH*0ix&NeKL6~4tKX@C%GZ>0qm-Pp{4pj00l!6I z_7`(erc9Y^Fw@FaukY#5qL3GQEB z_Vh0JZ!d=hbG}DpWF$(LE^Po!Fw<|gb_m@swPfV9ft-V$*$<{Qwt{%io;~Q;`3BT~ zAsm<79GS!2`MVT1@GrzkC+4dk3WeG`|%tp&Qoap;rb>6kflrWvDF*M{`iR@u2|Z6G_5i&|r~$h3;d$;nvu^K!&= z_q<>1gap*MvkYd>pJm(`b}$KIb#?4_SQkpn2J(e6F=q&7f``_S7J1pSWr(U<&b*~_ zaf?S!S7{rDxRq;=nwpy3*|aUPYZ2Q(b|M>nd=tzBF)i|Q<9fYVl zWwC12Dno<%$)t&a*4Yl{Bwz#ioGi?d6~tDH9DisZLMjJ21bA6)*%dE%M>S!w9b6+sT5ZD*EBz!Gp#klMY7GX8XtvN|V1fkS~;p zIYYoGc$g+9r68odZ!I!`oS2xH?IUYvkEspj^A%?U`Fwd8fGAK1W(9Ouc$k@r>5$#4 ze=`r9Mp#(bc@0WkAFozi1Pix;>_TadV2%urMEc<@7rDK2I`iOJAd?n#DKzy?jYYRq z8^|tnN6V*RW?CV#BGUHw7RYIPvk(;(l?`I6J7GO`!9s~TZ3Eefx*b8n|4o}TL;Ql{ zPI7zaRN8?PNZFT;+O=z+SAipK=Uptpe{3MTP(J58Flwk@zdllSrz0cLH&*$t#VKge zu)bOT(fCSTfu58UU%C-cA`&xl+y|lLq$=&%s z6*qVv4zEi^*IRBkb+JCy%lb(Rqt(60e@(^jP8-M$Iwgqp@lDWJfgBndYSz)!tXUKH z_kIA2`fS6FuL^FN&zwGk)r0op$dW7!8aT+j&C|NnDl3?6YwVz~zyJDn7L0Qm3f)

    &z@W`pIl8jI_$?TBx6_ZVzd+ z!#N4q+X0-DMfMTFq?3gdxU|42DJf>j6PG<5J$e*LNl7?%?3g*Xckf>Ov~npD{z^c_ zE6SnztzihM6oilpK?sfxL|S|n2;QVW)3I;PF&y8YiF$SG;p!HxjNe_ZTsee?ha(~) z;=IW71T-5GqgrMg$l2;L2ew5%UmoV4)gsdp64YvudCa*Nj0|canXgKfDhBM^w{OSp ziMx<~;uzA7ry(sR9p$3R8vi{ZY7t9c*OojZ4$$v*@mttu@nqmE3dPpm1y&B^=~ zD^|?5$hJ%2L_wBQwnerG!|_lpF3;17YM?v8KyWbTsFt@FUl+Z4zIB>_x8Dv)W331pqq ztc`-e zT>$#Ej2vy?TR-k^%vhm91>+j<{L-aM%`q)A&+|GzyTJV1b(4A2SGHPW0)F%6&A8>3 zTd;56z7iqx{4#0#$ZkrG{xTyiEPY?~f7OMc1=eF)VztH`vg_Qrv)L4!7FFE~wYUVU zHV(D$nuqx{x=Ch<&1M5R52`1S83e;_)nVBk}6}U_l=;|M9Ap|AC%0TO@ zW6(U-YYJfIu|Z5JxEE|7JIIOO1>v$L&OOd45WoxyRt2^G*HrT{uYy!T%X125y{7q? zItIQ`yzjpI@XRyMpk2Fmj+aaHe;ddJRKFFdJSKn{^gLaSF=NKy`RAX{u1SaQtl;HE z{_&~#)<;K+`Onngub%%>*g*Dy24>HmZ6?Rt zZ}Pk+Y#YelldWB{4<%5nePph1?%K607A;!jLy;DqsyL~i(^3x{I8bA3E?>rkUc4HvnJ=4l(qKG569kt6Z+(@&d?U3^eo ziLAl~vJceG9fh`S+h%qa@F+34QMq#Yt4(efOOiXXd)Uq8@A@7j+HV^cx8j3)iBs>vHVav1TLi!cJ#Vrmx5X znL|mp-g+w*FJ4^e<^RSTZUE6@R1*RO5KnzwbvN*R5OEqvd+KFpmS-y6dDKYf9ps?d?$9caY?y z98S<-2Z|dg7UmZt`2m>fS5uZ@J$BSN|2v+`PrcTAJOX4TnyDUJKMJGgSh1A>D?RnSZr`LFF)w$|db+0M&6(khM zM^?fKT1HL{b-6om&u4?}`D%1@lwq zjMlR9LXTtKe0@}1TN{aBV5)>MC0JLkTqz%3oGag9UL*`1Mq*;Jj7y%KYf!mLMKo;C z5FI;qHtWZ#R;_9lXIj7f$%!__B~Tu8Ex1aAi(`voxb)IXF=WUPa1jH)pG}<;#HxEX zP(FDUESQ6hn>OM={6WKJT3V*8j0u-b$fMD=?RB{E#+z}~Racp<5%i;(s&|%^UkjaQ zAX`DKOpqZ0`1tYTm^5h;#*Q5&iyI@+xl1fQn%n~wD}~7i7|WRrsFRb^k#HaxE0*oR zx8F>}OD_!)<~rep7hXoqnl;TAL0PVT4V2QzFLQ+fFqBvPU-6hab*iykiHV7Yfvh~y zpikcL@Nl!zg_f22rrvk&-i=|yhME=HZQ9nuEu!<+hChHxmBSGp9%6ug@Zd4gd5Y+J zJzjmaKlbiDihuoU03LYYfown%%#5mQbwS)EG*3RV6~tsjnc<^?t?{|%{)M{rqA~i- z?zpUO&R-5y0y8tSFl^{NjDL5DyngN}jJ{^}fUth2nM^5zdS6jVX^kg+JPG~()!*#K z`S|0HqbNOX6vS4eMF7)X`1ikG#ur~q#iLKO#S4RPluoRByNzz$l7MIXeT6NX6ER}M z7+in-^+q-{UTUSO@4SH7Z6GT{ObK9umkiM_djI|Z;mtQkoJF z->Ep35+ZvTPc|P}w1U~%TJu%GMYxHw+PilT?(K6w4#sc5{`fTX=+V>o$`^s0JXcBj z31FtwHFeHN-}jNe529RHI^O+XPkGvg<+*I$@YB+*=ym4=^y~L5`u2U)jOHkqRp0J{ z*{#kV8DX$8#K#L;<=b!n7ZW~w2S2ZTF5e(#*%ZbXGakZa^@H&2vyYpJ=xihuAR`+O z>sMNM&&3X>PoFX$nWW$7EU@FVWbsefzk4Hk-(6RpDyO8ckaz(PS37h3=uW%#ZBSSAG)?rCUl5Zi8a?8M_*u4WiNTW^Na~z*vmvA2eW=wf^>dP??Ti@H`dg92@ow&2d?FP&QgSs&a<{S(0;l-@1llb-5 zb;ealPCg4}3SYyKBS$dxi_dUPt8nS))G$6^Qc|)3h328c)~;PEF4e2pv15mxzUZlf zi1ZO$)H!!I?cDiB+|aR((UZH@GACw;8{(&X_q*`XM;~Q_S#?f2tTJ^~R<1x+h6rRU zQ(RFnYSb{S-}I`u0imwS&)5063;Rh&A)t`gqI02<T)o`s?u>#+J{~f-Z)i+O_Fn8;(b6F+9)4c{)9D63ttYD_za|&)- zWVQ}2d;n|KtikW=R^Xp~8X-9J6iNsCiOfnHfKQn4kqkZc z!H%smT^nM}R&&VTX7=x*MXsFeQTBdzh*{$1IR8(L*$N-eV|iQtpq z$}6u#SXdZ1Om_S2w`26^(V)d7@6Lk1pT7(l0oS!FkJpDb$NIJ3qwk|n;=>8=qgmr{ znTK2vXHK4xJbnlZ4?=R%5gFxq8?9UU%h-7f98XCX*DMMxTefsoyid=m&Z%$8Svz=a z-23+J#6x}m;dOe5tGC}CYjCD6hU%KBtKbsIyn&1R6UL|2cLU5fNbk{QUFJn>TU%j&!$fx1)QvJ1}z8Sgil; zcVwN&GQ}~!m{AcFq$B5#Hti~7=>Ijv=0ASI>eWke@BOg|4GF-R(`VpUDo}tNj0O#4 zoPBsp^nW&1UM3vDfx{VS)Hv3%J0?4e=Q@mzbbEtutwVvVmXP{p175IT9`5SZTpDA5 zX7yUikqqK%)22`N%2EowFN3 zrGn7rA8jl%;{0$zz;h(&C=MnZ!v6jHuzB-l(@Eru<^4I)VaP-(1zwr=PrS)EuT!Wg_1FXdDZG)@X{--y~M(W#kROA z#(Xe*@7}#;sSu|?s<3ln?81`rc7~rxXy2|q-g|GneBb#+?Ax;egPyw@A!P!^LhZrl zpN_=%aq{*lFjRmi*5WiDZhk0J>Ll8?jYP{U%OYI*5gft589$*N@`ccs~&Xkxp&RpkKc~ZWr z^F4d^h&z-F>cLl`%WX|CbjVM5=%I%U@1%!OUy?YJfm0wWh_wJ^immgA4Cq&_7U3(N zjN&R*3^QLQ;#`d2;9w(Uwo;|jTg0%@TzO@4nLF|(UVH5|41IkuUVpWumOe*{|`nCABh12XW*q5TcFq7)ullPqiu&uNKMH? zRG9b`rwCjL#>J(D=7DxY0+}>u<1(V9v2YSx0$CX& zb1aA;VoXgEuN6}ZgI{?`UZ+;Tus1%G`NhZO z(N`OhWdjjWrj%G*q2xRvI}N?uc^2Vla)KjpCPC zlERsB9NT66q{&K`HwCOhJAC-Ce5Cv3HtMlaJIvuY2mCc1`(iUKV{|cR*~V zRPsxQkJkB=B)|W841-@>j_()kkkN~Bcv{>E&ebhj&bLoK1q$WMKr6_PZc_ff44k_h zDL7LrtU4zRsuM?gTmo5dV9J76C__|aG`4R$h%2tF=~$Lm{+~E;QdS$BU6sHJRM>j# zl}i6Elw+v#=FO9N6vMD#6tQV zXvKbHo;r!kE{j6q(M*{<^C6~9`wCSmS3*p!n&9G9c62!$)uc(2vm+l~U|eO(l=^1M z@1G}ar{*x1W4zKMT)^Ax~CLf2?WKre?%|UEe_V1`yuaW7oTWRt}U5;fE$h3sryLXq3{oa(1$1IW8^OJEr<5HOi zvIO_s^8@D0Jt#xS*^aVf5pMl2CAXIi_D58uVAQN%1|gwA_~n=VSoX_q1LB@Ndx0O+ zx#ymH%*z3{3e86*%GX(}SGN(U2VVr@iufYVRNt)EdhCo2mq6B=wE%*dV|dZg(K2LI z1D}4p+*x@ZJ$LlzaeVRlYFVt<($uXqk8WM7AV7qF#*7)~r*^T-Qpc$PKWy5ZisuKck+qI<@bTmgGCFh#-g$eR%sqI^ zD3)}1JCI+Els6l5ixw>~ZEBo6EzV9q^(GBdK3O48;OYiv)U!s?!k~C`q#;+=f;X}- z1rZs$vC}Ph<)sgCckh;7^_$-K&pdgQHZk*boiXH!DMuy?@}K>KJ+fTsfA4>QX`f9& zomyuw?(LSc`dg-EhDzf$e%nbI8`igbd8|!*>%`KsC?0iH;Rls}iEOu6=N6zh)A-9PWenGo-9Za2Pt>Rf4RLG=J zne<4dmO|*4aqN2BxN*3>>us3(?Ic-S)&TE~zX?}f9*MoXkK(sA2eEjmEQ9|(QC=y_ z&6K7ze28^Ddu-{P@R~*1GD8F4Bpm5Ll?p6{(&AqnNZ@K1KQxj}Tm}ywY+gz8jg~J0 z<*2$=`Kr!I1DEoDB>JFUycLM2$jI{2>Amdlr{GzAlMbcL6__oey^uUI#~ZB-@wF{q z6W?)1PYi#39_D}N`Hys`$kfKR*NjAm4xLb~npnQS>m-;}$ld}lrNEUNT)xkVL)<84 z!-fqq?3RWPC&r=Uji2L+tEb}X*3;#!-V9m({tfy*z5u)T$(XV{-!i2EEL@a;QUBeD zEI}^c)Uh{_n0Nvy$K;3(v79-al!?(}e#hb;56XPyGwAz7LyUT(jl8lB!s@s<43v&t zqBJC52g+6T&6K((kg10b9d5u4?Z(R5!rbR+=aufSrmqn^&BvX0-fik+z0|uE#5tAe z)U_Zp3StEgnPFrFAu`Gb+Jx@ znDwh=W&`uIMW!1;&~k?(j#6;=jq@AS8f0b)|DiG+GorKzS>z!xH8o9!2e%+2OUAba zxVv^9laAmj?B913r%w3EEA2)|Njr_#USBUG5lJ!usU=#pl(`u}C$MbAA^c}V9NM?P z1igFJl@-=8s1Ox|rw06l88c^?Ui;`VW5ic3_XW0#SW&e@}$KJziLPM15Y!|iiPNolz0hBuLw87#Y3_AzsV*}0@5IoE)1 znK%rm;MPXwb_Ka)RCj_ra`qd`ry`lEg0%rLJtbqmE9JAn_uSnTpG;_j*k;k__r!91 z`o-BEewfU^7|`!3w7b44x_A8+hmNKpI;u1VJbe}JdpH)8CauGZFRc)&Xa!|NsBuNy z_dpYQ2@#6n|M>;G_N16k6LYIAj^}ziup#r6Feis`0R{EVHHnGcu1cD=)NXsKuFn|kLl^-Wz+=lVo;2Ihbv5?l#L$M2Xja*&+N zG7{wc&o@8Z@@nLWFK|jaJk8}r311fEH7uOJ0k_@!E-I9dk?D@N8PF2AoaoLv$PC9c z)jDe{b;eX)>(=}%i**n*DnQmpFl&9*rvPPgApU@S(_}hs=uizSmmk8gQLBu=n>TMR zJd~FP9*0FgB?tgc%2db}0+3UfGj9Wa`S}1s%aq5j>-OW)+O?(gcs*;?bIGQI_s6Z?g?sLrfR87x#1&UugKMvCoy`zmOOq)ASuL?Lr4}-~ zu#0rSSz6PlPe-8mw&TaIN9yrR*=@6x@v*yizg>3I*@C!Lzsu-DO?>s$G~C$0D@-s`NxH_D^_;>TZlTnJsSiEGB;g>X1)^Txb47k7fW|mC2-(nQY7c(P9jPUBb zWCE5?ThhQj0$0q_t)z>=nQ*FermAzgmzy@NH*V<7w=|YjGIfp4tI2$vW5+V&sg-PW z{^hi_Vz_on$FYixO}9c!OpM`+`lTD8I;Y-wj*ZswiRenFxdNFjP#L4YtY9W%%7_)n zWL*8LojZ5R<8hy?!OB2}Gy-mD5?V&wgVG_wKqP9*BbELT0ZJc;U?l?@l_4vL31of5 zTznv(F`KGz?=m)Ez6Io_HSOB9J5Sz}Aq@Z(W2KqC=$tuoFn8`;GjX3Tnlo00xAH<>wh>?kra z)6IQB9QhF4mI?|+l`7SZ^`)_*zNvTBHB;3$j};SYxU_HLinknr%$vxVwUv@FD{}<2 zwpgZA7#U|CwrCX!%VD4*2}DLZ?`&~oib^J6)r}xC{Lk}@WJp0wx55etQ}9LlVPoyo zsgto*{KP_!9zF6*iS{?hpQ}K5a*KhPGiMs*bBZVB}qDv*^i z=4aHhtV|Ki3Rp6xOtMun(nnH;tevf#81vON(pJ5Cb+eKqKOiQZlm%l>fik-G+H1{j z_XJdK`Ex2Ow`-I!bw%)yKL*t|k9l5!$?F6ijh8M>V#8!((0f=O>7@>dA4lV*I;XCQ z172rd-otwta|^jzWEMbz7_F9>tyEj8x)V$(WP+HF8iJUSjA1`qn3^f|UF^>YL{ni3im;?_(JZsyFIUfv>KG zRo`lbna3+YCNT;`E0~ozGN6wZ1u?HNszv5~3~0-C9wBQpwO0+<3ROdvK4 z72_XgU{;+w>YEr)ovW*)qEYM3l=`&lT94IeQRfO`HF{c3PHE21Gs~R{AOeaYWZ<#> z=Q$EUg4nXi6dBUTi~^bWks(GUTJPh%WYpRsc}*Eo8d!!Mye|RDoBGwD|E>B{U=qlx zZ$}?l^+${_KU3AUrh1X$lTu?^WGvs<>dPb-By6 zTRyV0!U#_5qlf<;TclcJ&8NbkU`VKfl_?_`&~pl66{0g5>~p@Afcm}EwOU>3n8%v( zSg}F9GpN3)UyZ77g4n8S&6`)w@(pBGL_!s$OqEe((($?yM-Xc(RVK(7&*^{7qxaZp z;gEppTfwS2Cy@1;rdEm-pae3H70{}4O|AFj<+wmV)`}Fc)>IkQbDCFEWz5Qe^_=Fj z)50kMD_E_%)?-Ji)i7CgY(3|gKd=8i0%XTZtxWP7|2qa{g6FNrj`{5WMMDBs0K3vR zIiao@2xw0RuK?MT1lncU5^zTXo^2NH)M)3iCEx`K*g*DzQ0*dpF993KzL&XOqZcIb Y{|vgn^Dx65vH$=807*qoM6N<$f?#!f$N&HU literal 0 HcmV?d00001 diff --git a/tutorials/5ch.md b/tutorials/5ch.md index 45c28cb..1a56c3f 100644 --- a/tutorials/5ch.md +++ b/tutorials/5ch.md @@ -1,7 +1,7 @@ --- layout: tutorial categories: tutorial -sections: ['Introduction'] +sections: ['Introduction', 'Quis custodiet ipsos custodes', 'Isolated Restarts', 'All or nothing restarts'] title: 5. Supervision Principles --- @@ -9,28 +9,144 @@ title: 5. Supervision Principles In previous tutorial, we've looked at utilities for linking processes together and monitoring their lifecycle as it changes. The ability to link and monitor are -foundational tools for building _reliable_ systems, and are the bedrock principles +foundational tools for building _reliable_ systems and are the bedrock principles on which Cloud Haskell's supervision capabilities are built. -The [`Supervisor`][1] provides a means to manage a set of _child processes_ and to construct -a tree of processes, where some children are workers (e.g., regular processes) and -others are themselves supervisors. +A `Supervisor` manages a set of _child processes_ throughout their entire lifecycle, +from birth (spawning) till death (exiting). Supervision is a key component in building +fault tolerant systems, providing applications with a structured way to recover from +isolated failures without the whole system crashing. Supervisors allow us to structure +our applications as independently managed subsystems, each with its own dependencies +(and inter-dependencies with other subsystems) and specify various policies determining +the fashion in which these subsystems are to be started, stopped (i.e., terminated) +and how they should behave at each level in case of failures. -The supervisor process is started with a list of _child specifications_, which -tell the supervisor how to interact with its children. Each specification provides -the supervisor with the following information about the child process: +Supervisors also provide a convenient means to shut down a system (or subsystem) in a +controlled fashion, since supervisors will always terminate their children before +exiting themselves and do so based on the policies supplied when they were initially +created. -1. [`ChildKey`][2]: used to identify the child once it has been started -2. [`ChildType`][3]: indicating whether the child is a worker or another (nested) supervisor -3. [`RestartPolicy`][4]: tells the supervisor under what circumstances the child should be restarted -4. [`ChildTerminationPolicy`][5]: tells the supervisor how to terminate the child, should it need to -5. [`ChildStart`][6]: provides a means for the supervisor to start/spawn the child process +### Quis custodiet ipsos custodes -TBC +Supervisors can be used to construct a tree of processes, where some children are +workers (e.g., regular processes) and others are themselves supervisors. Each supervisor +is responsible for monitoring its children and handling child failures by policy, as +well as deliberately terminating children when instructed to do so (either explicitly +per child, or when the supervisor is itself told to terminate). + +Each supervisor takes with a list of _child specifications_, which tell the supervisor +how to interact with its children. Each specification provides the supervisor with the +following information about the corresponding child process: + +1. `ChildKey`: used to identify the child specification and process (once it has started) +2. `ChildType`: indicates whether the child is a worker or another (nested) supervisor +3. `RestartPolicy`: tells the supervisor under what circumstances the child should be restarted +4. `ChildTerminationPolicy`: tells the supervisor how to terminate the child, should it need to +5. `ChildStart`: provides a means for the supervisor to start/spawn the child process + +The `RestartPolicy` determines the circumstances under which a child should be +restarted when the supervisor detects that it has exited. A `Permanent` child will +always be restarted, whilst a `Temporary` child is never restarted. `Transient` children +are only restarted if the exit normally (i.e., the `DiedReason` the supervisor sees for +the child is `DiedNormal` rather than `DiedException`). `Intrinsic` children behave +exactly like `Transient` ones, except that if they terminate normally, the whole +supervisor (i.e., all the other children) exits normally as well, as if someone had +triggered the shutdown/terminate sequence for the supervisor's process explicitly. + +When a supervisor is told directly to terminate a child process, it uses the +`ChildTerminationPolicy` to determine whether the child should be terminated +_gracefully_ or _brutally killed_. This _shutdown protocol_ is used throughout +[distributed-process-platform][dpp] and in order for a child process to be managed +effectively by its supervisor, it is imperative that it understands the protocol. +When a _graceful_ shutdown is required, the supervisor will send an exit signal to the +child process, with the `ExitReason` set to `ExitShutdown`, whence the child process is +expected to perform any required cleanup and then exit with the same `ExitReason`, +indicating that the shutdown happened cleanly/gracefully. On the other hand, when +the `RestartPolicy` is set to `TerminateImmediately`, the supervisor will not send +an exit signal at all, calling the `kill` primitive instead of the `exit` primitive. +This immediately kills the child process without giving it the opportunity to clean +up its internal state at all. The gracefull shutdown mode, `TerminateTimeout`, must +provide a timeout value. The supervisor attempts a _gracefull_ shutdown initially, +however if the child does not exit within the given time window, the supervisor will +automatically revert to a _brutal kill_ using `TerminateImmediately`. If the +timeout value is set to `Infinity`, the supervisor will wait indefintiely for the +child to exit cleanly. + +When a supervisor detects a child exit, it will attempt a restart. Whilst explicitly +terminating a child will **only** terminate the specified child process, unexpected +child exits can trigger a _branch restart_, where other (sibling) child processes are +restarted along with the child that failed. How the supervisor goes about this +_branch restart_ is governed by the `RestartStrategy` given when the supervisor is +first started. + +------ +> ![Info: ][info] Whenever a `RestartStrategy` causes multiple children to be restarted +> in response to a single child failure, a _branch restart_ incorporating some (possibly +> a subset) of the supervisor's remaining children will be triggered. The exceptions +> to this rule are `Temporary` children and `Transient` children that exit normally, +> therefore **not** triggering a restart. The basic rule of thumb is that, if a child +> should be restarted and the `RestartStrategy` is not `RestartOne`, then a _branch_ +> containing some other children will be restarted as well. +------ + +### Isolated Restarts + +The `RestartOne` strategy is very simple. When one child fails, only that individual +child is restarted and its siblings are left running. Use `RestartOne` whenever the +processes being supervised are completely independent of one another, or a child can +be restarted and lose it's state without adversely affecting its siblings. + +------- +![Sup1: ][sup1] +------- + +### All or nothing restarts + +The `RestartAll` strategy is used when our children are all inter-dependent and it's +necessary to restart them all whenever one child crashes. This strategy triggers one of +those _branch restarts_ we mentioned earlier, which in this case means that **all** the +supervisor's children are restarted if any child fails. + +The order and manner in which the surviving children are restarted depends on the chosen +`RestartMode` which parameterises the `RestartStrategy`. This comes in three flavours: + +1. `RestartEach`: stops then starts each child sequentially +2. `RestartInOrder`: stops all children first (in order), then restarts them sequentially +3. `RestartRevOrder`: stops all children in one order, then restarts them sequentially in the opposite + +Each `RestartMode` is further parameterised by its `RestartOrder`, which is either left +to righ, or right to left. To illustrate, we will consider three alternative configurations +here, starting with `RestartEach` and `LeftToRight`. + +------- +![Sup2: ][sup2] +------- + +There are times when we need to shut down all the children first, before restarting them. +The `RestartInOrder` mode will do this, shutting the children down according to our chosen +`RestartOrder` and then starting them up in the same way. Here's an example demonstrating +`RestartInOrder` using `LeftToRight`. + +------- +![Sup3: ][sup3] +------- + +If we'd chosen `RightToLeft`, the children would have been stopped from right to left (i.e., +starting with child-3, then child-2, etc) and then restarted in the same order. + +The astute reader might've noticed that so far, we've yet to demonstrate the behaviour that's +default in [Erlang/OTP's Supervisor][erlsup], and it's a default for good reason. It is not +uncommon for children to depend on one another and therefore need to be started in the correct +order. Since these children rely on their siblings to function, we must stop them in the opposite +order, otherwise the dependent children might crash whilst we're restarting other processes they +rely on. It follows that, in this setup, we cannot subsequently (re)start the children in the +same order we stopped them either. + +[dpp]: https://github.com/haskell-distributed/distributed-process-platform +[sup1]: /img/one-for-one.png +[sup2]: /img/one-for-all.png +[sup3]: /img/one-for-all-left-to-right.png +[alert]: /img/alert.png +[info]: /img/info.png +[erlsup]: http://www.erlang.org/doc/man/supervisor.html -[1]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Supervisor.html -[2]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Supervisor.html -[3]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Supervisor.html -[4]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Supervisor.html -[5]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Supervisor.html -[6]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform/Supervisor.html From c7580266b6486cf0511be53a7cef9b60f6586c12 Mon Sep 17 00:00:00 2001 From: Tavis Rudd Date: Fri, 4 Apr 2014 13:06:12 -0700 Subject: [PATCH 018/108] typo s/recieve/receive/ --- tutorials/3ch.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorials/3ch.md b/tutorials/3ch.md index ea2e385..f40f017 100644 --- a/tutorials/3ch.md +++ b/tutorials/3ch.md @@ -37,12 +37,12 @@ whose contents can be decoded to a specific type. Of course, we may _want_ to process messages in the precise order which they arrived. To achieve this, we must defer the type checking that would normally cause a traversal of the mailbox and extract the _raw_ message ourselves. This can be achieved -using `recieve` and `matchAny`, as we will demonstrate later. +using `receive` and `matchAny`, as we will demonstrate later. ### Selective Receive Processes dequeue messages (from their mailbox) using the [`expect`][1] -and [`recieve`][2] family of primitives. Both take an optional timeout, +and [`receive`][2] family of primitives. Both take an optional timeout, allowing the expression to evaluate to `Nothing` if no matching input is found. @@ -81,7 +81,7 @@ removed from the mailbox. The removal of messages from the process' mailbox base on type is what makes this program viable - without this "selective receiving", the program would block and never complete. -By contrast, the [`recieve`][2] family of primitives take a list of `Match` +By contrast, the [`receive`][2] family of primitives take a list of `Match` objects, each derived from evaluating a [`match`][3] style primitive. This subject was covered briefly in the first tutorial. Matching on messages allows us to separate the type(s) of messages we can handle from the type that the From cfda1001353c42e40c28c038c964af3a2158c7f3 Mon Sep 17 00:00:00 2001 From: Mathieu Boespflug Date: Sun, 13 Apr 2014 16:15:20 +0200 Subject: [PATCH 019/108] Replace link to Haskell wiki with link to official CH site. --- documentation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation.md b/documentation.md index 1256419..a9e3f8a 100644 --- a/documentation.md +++ b/documentation.md @@ -500,7 +500,7 @@ TBC TBC -[1]: http://www.haskell.org/haskellwiki/Cloud_Haskell +[1]: http://haskell-distributed.github.io/documentation.html [2]: https://github.com/haskell-distributed/distributed-process [3]: https://github.com/haskell-distributed/distributed-process-platform [4]: http://hackage.haskell.org/package/distributed-static From e7a01181ba8cd63f94423cebe6ffe26e0187c6aa Mon Sep 17 00:00:00 2001 From: Mathieu Boespflug Date: Sun, 13 Apr 2014 16:32:32 +0200 Subject: [PATCH 020/108] Replace link numbers with symbolic names. This is more maintainable since easier to detect dead or confused links through inspection. --- documentation.md | 102 +++++++++++++++++++++++------------------------ 1 file changed, 49 insertions(+), 53 deletions(-) diff --git a/documentation.md b/documentation.md index a9e3f8a..787dda4 100644 --- a/documentation.md +++ b/documentation.md @@ -5,13 +5,13 @@ title: Documentation ### Cloud Haskell Platform -This is the [*Cloud Haskell Platform*][1]. Cloud Haskell is a set of libraries +This is the [*Cloud Haskell Platform*][cloud-haskell]. Cloud Haskell is a set of libraries that bring Erlang-style concurrency and distribution to Haskell programs. This project is an implementation of that distributed computing interface, where processes communicate with one another through explicit message passing rather than shared memory. -Originally described by the joint [Towards Haskell in the Cloud][12] paper, +Originally described by the joint [Towards Haskell in the Cloud][haskell11-ch] paper, Cloud Haskell has be re-written from the ground up and supports a rich and growing number of features for @@ -23,23 +23,23 @@ growing number of features for * supporting *static* values (required for remote communication) There is a recent -[presentation](http://sneezy.cs.nott.ac.uk/fun/2012-02/coutts-2012-02-28.pdf) +[presentation][fun201202-coutts] on Cloud Haskell and this reimplementation, which is worth reading in conjunction with the documentation and wiki pages on this website.. Cloud Haskell comprises the following components, some of which are complete, others experimental. -* [distributed-process][2]: Base concurrency and distribution support -* [distributed-process-platform][3]: The Cloud Haskell Platform - APIs -* [distributed-static][4]: Support for static values -* [rank1dynamic][5]: Like `Data.Dynamic` and `Data.Typeable` but supporting polymorphic values -* [network-transport][6]: Generic `Network.Transport` API -* [network-transport-tcp][7]: TCP realisation of `Network.Transport` -* [network-transport-inmemory][8]: In-memory realisation of `Network.Transport` (incomplete) -* [network-transport-composed][9]: Compose two transports (very preliminary) -* [distributed-process-simplelocalnet][10]: Simple backend for local networks -* [distributed-process-azure][11]: Azure backend for Cloud Haskell (proof of concept) +* [distributed-process][distributed-process]: Base concurrency and distribution support +* [distributed-process-platform][distributed-process-platform]: The Cloud Haskell Platform - APIs +* [distributed-static][distributed-static]: Support for static values +* [rank1dynamic][rank1dynamic]: Like `Data.Dynamic` and `Data.Typeable` but supporting polymorphic values +* [network-transport][network-transport]: Generic `Network.Transport` API +* [network-transport-tcp][network-transport-tcp]: TCP realisation of `Network.Transport` +* [network-transport-inmemory][network-transport-inmemory]: In-memory realisation of `Network.Transport` (incomplete) +* [network-transport-composed][network-transport-composed]: Compose two transports (very preliminary) +* [distributed-process-simplelocalnet][distributed-process-simplelocalnet]: Simple backend for local networks +* [distributed-process-azure][distributed-process-azure]: Azure backend for Cloud Haskell (proof of concept) One of Cloud Haskell's goals is to separate the transport layer from the *process layer*, so that the transport backend is entirely independent: @@ -114,10 +114,10 @@ Haskell or C. ### Network Transport Abstraction Layer -Cloud Haskell's generic [network-transport][6] API is entirely independent of +Cloud Haskell's generic [network-transport][network-transport] API is entirely independent of the concurrency and messaging passing capabilities of the *process layer*. Cloud Haskell applications are built using the primitives provided by the -*process layer* (i.e., [distributed-process][2]), which provides abstractions +*process layer* (i.e., [distributed-process][distributed-process]), which provides abstractions such as nodes and processes. Applications must also depend on a Cloud Haskell Backend, which provides functions to allow the initialisation of the transport layer using whatever topology might be appropriate to the application. @@ -160,7 +160,7 @@ of other `Network.Transport` APIs if required, but for the most part this is irrelevant and the application will interact with Cloud Haskell through the *Process Layer* and *Platform*. -For more details about `Network.Transport` please see the [wiki page][20]. +For more details about `Network.Transport` please see the [wiki page](/wiki/networktransport.html). ### Concurrency and Distribution @@ -169,7 +169,7 @@ distributed programming are exposed to application developers. This layer deals explicitly with The core of Cloud Haskell's concurrency and distribution support resides in the -[distributed-process][2] library. As well as the APIs necessary for starting +[distributed-process][distributed-process] library. As well as the APIs necessary for starting nodes and forking processes on them, we find all the basic primitives required to @@ -215,7 +215,7 @@ runProcess :: LocalNode -> Process () -> IO () {% endhighlight %} Once we've spawned some processes, they can communicate with one another -using the messaging primitives provided by [distributed-processes][2], +using the messaging primitives provided by [distributed-processes][distributed-processes], which are well documented in the haddocks. ### What is Serializable @@ -272,7 +272,7 @@ need to spawn a process and send a bunch a messages to it, then wait for replies however; we can’t send a `ReceivePort` since it is not `Serializable`. `ReceivePort`s can be merged, so we can listen on several simultaneously. In the -latest version of [distributed-process][2], we can listen for *regular* messages +latest version of [distributed-process][distributed-process], we can listen for *regular* messages and multiple channels at the same time, using `matchChan` in the list of allowed matches passed `receiveWait` and `receiveTimeout`. @@ -313,7 +313,7 @@ and decide whether to oblige or not. ### Rethinking the Task Layer -[Towards Haskell in the Cloud][12] describes a multi-layered architecture, in +[Towards Haskell in the Cloud][haskell11-ch] describes a multi-layered architecture, in which manipulation of concurrent processes and message passing between them is managed in the *process layer*, whilst a higher level API described as the *task layer* provides additional features such as @@ -322,19 +322,19 @@ is managed in the *process layer*, whilst a higher level API described as the * data centric processing model * a promise (or *future*) abstraction, representing the result of a calculation that may or may not have yet completed -The [distributed-process-platform][18] library implements parts of the +The [distributed-process-platform][distributed-process-platform] library implements parts of the *task layer*, but takes a very different approach to that described -in the original paper and implemented by the [remote][14] package. In particular, +in the original paper and implemented by the [remote][remote] package. In particular, we diverge from the original design and defer to many of the principles -defined by Erlang's [Open Telecom Platform][13], taking in some well established +defined by Erlang's [Open Telecom Platform][OTP], taking in some well established Haskell concurrency design patterns along the way. -In fact, [distributed-process-platform][18] does not really consider the +In fact, [distributed-process-platform][distributed-process-platform] does not really consider the *task layer* in great detail. We provide an API comparable to remote's `Promise` in Control.Distributed.Process.Platform.Async. This API however, -is derived from Simon Marlow's [Control.Concurrent.Async][19] package, and is not +is derived from Simon Marlow's [Control.Concurrent.Async][async] package, and is not limited to blocking queries on `Async` handles in the same way. Instead our -[API][17] handles both blocking and non-blocking queries, polling +[API][d-p-platform-async] handles both blocking and non-blocking queries, polling and working with lists of `Async` handles. We also eschew throwing exceptions to indicate asynchronous task failures, instead handling *task* and connectivity failures using monitors. Users of the API need only concern themselves with the @@ -391,7 +391,7 @@ Work is also underway to provide abstractions for managing asynchronous tasks at a higher level, focussing on workload distribution and load regulation. The kinds of task that can be performed by the async implementations in -[distributed-process-platform][3] are limited only by their return type: +[distributed-process-platform][distributed-process-platform] are limited only by their return type: it **must** be `Serializable` - that much should've been obvious by now. The type of asynchronous task definitions comes in two flavours, one for local nodes which require no remote-table or static serialisation dictionary, @@ -430,14 +430,14 @@ domain was more *haskell-ish* than working with bare send and receive primitives The `Async` sub-package also provides a type safe interface for receiving data, although it is limited to running a computation and waiting for its result. -The [Control.Distributed.Processes.Platform.ManagedProcess][21] API provides a +The [Control.Distributed.Processes.Platform.ManagedProcess][d-p-platform-ManagedProcess] API provides a number of different abstractions that can be used to achieve similar benefits in your code. It works by introducing a standard protocol between your process and the *world outside*, which governs how to handle request/reply processing, exit signals, timeouts, sleeping/hibernation with `threadDelay` and even provides hooks that terminating processes can use to clean up residual state. -The [API documentation][21] is quite extensive, so here we will simply point +The [API documentation][d-p-platform-ManagedProcess] is quite extensive, so here we will simply point out the obvious differences. A process implemented with `ManagedProcess` can present a type safe API to its callers (and the server side code too!), although that's not its primary benefit. For a very simplified example: @@ -489,8 +489,8 @@ API, which looks a lot like `Async` but manages exit signals in a single thread configurable task pools and task supervision strategy part of its API. More complex examples of the `ManagedProcess` API can be seen in the -[Managed Processes tutorial][22]. API documentation for HEAD is available -[here][21]. +[Managed Processes tutorial](tutorials/tutorial3.html). API documentation for HEAD is available +[here][d-p-platform-ManagedProcess]. ### Supervision Trees @@ -500,25 +500,21 @@ TBC TBC -[1]: http://haskell-distributed.github.io/documentation.html -[2]: https://github.com/haskell-distributed/distributed-process -[3]: https://github.com/haskell-distributed/distributed-process-platform -[4]: http://hackage.haskell.org/package/distributed-static -[5]: http://hackage.haskell.org/package/rank1dynamic -[6]: http://hackage.haskell.org/package/network-transport -[7]: http://hackage.haskell.org/package/network-transport-tcp -[8]: https://github.com/haskell-distributed/network-transport-inmemory -[9]: https://github.com/haskell-distributed/network-transport-composed -[10]: http://hackage.haskell.org/package/distributed-process-simplelocalnet -[11]: http://hackage.haskell.org/package/distributed-process-azure -[12]: http://research.microsoft.com/en-us/um/people/simonpj/papers/parallel/remote.pdf -[13]: http://en.wikipedia.org/wiki/Open_Telecom_Platform -[14]: http://hackage.haskell.org/package/remote -[15]: http://www.erlang.org/doc/design_principles/sup_princ.html -[16]: http://www.erlang.org/doc/man/supervisor.html -[17]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Async.html -[18]: https://github.com/haskell-distributed/distributed-process-platform -[19]: http://hackage.haskell.org/package/async -[20]: /wiki/networktransport.html -[21]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html -[22]: /tutorials/tutorial3.html +[cloud-haskell]: http://haskell-distributed.github.io/documentation.html +[fun201202-coutts]: http://sneezy.cs.nott.ac.uk/fun/2012-02/coutts-2012-02-28.pdf +[distributed-process]: https://github.com/haskell-distributed/distributed-process +[distributed-process-platform]: https://github.com/haskell-distributed/distributed-process-platform +[distributed-static]: http://hackage.haskell.org/package/distributed-static +[rank1dynamic]: http://hackage.haskell.org/package/rank1dynamic +[network-transport]: http://hackage.haskell.org/package/network-transport +[network-transport-tcp]: http://hackage.haskell.org/package/network-transport-tcp +[network-transport-inmemory]: https://github.com/haskell-distributed/network-transport-inmemory +[network-transport-composed]: https://github.com/haskell-distributed/network-transport-composed +[distributed-process-simplelocalnet]: http://hackage.haskell.org/package/distributed-process-simplelocalnet +[distributed-process-azure]: http://hackage.haskell.org/package/distributed-process-azure +[haskell11-ch]: http://research.microsoft.com/en-us/um/people/simonpj/papers/parallel/remote.pdf +[OTP]: http://en.wikipedia.org/wiki/Open_Telecom_Platform +[remote]: http://hackage.haskell.org/package/remote +[d-p-platform-async]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Async.html +[async]: http://hackage.haskell.org/package/async +[d-p-platform-ManagedProcess]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html From 24f55b585ad1c0abb3b8888f459c782bf0e85b30 Mon Sep 17 00:00:00 2001 From: Mathieu Boespflug Date: Sun, 13 Apr 2014 16:34:13 +0200 Subject: [PATCH 021/108] Remove "recent" - presentation linked to not so recent anymore. --- documentation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation.md b/documentation.md index 787dda4..ee44612 100644 --- a/documentation.md +++ b/documentation.md @@ -22,7 +22,7 @@ growing number of features for * working with several network transport implementations (and more in the pipeline) * supporting *static* values (required for remote communication) -There is a recent +There is a [presentation][fun201202-coutts] on Cloud Haskell and this reimplementation, which is worth reading in conjunction with the documentation and wiki pages on this website.. From 870c236dd8f607af9d32b927c9b62db93e141f2d Mon Sep 17 00:00:00 2001 From: Mathieu Boespflug Date: Sun, 13 Apr 2014 16:34:37 +0200 Subject: [PATCH 022/108] Delete trailing whitespace. --- documentation.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/documentation.md b/documentation.md index ee44612..667162f 100644 --- a/documentation.md +++ b/documentation.md @@ -36,7 +36,7 @@ others experimental. * [rank1dynamic][rank1dynamic]: Like `Data.Dynamic` and `Data.Typeable` but supporting polymorphic values * [network-transport][network-transport]: Generic `Network.Transport` API * [network-transport-tcp][network-transport-tcp]: TCP realisation of `Network.Transport` -* [network-transport-inmemory][network-transport-inmemory]: In-memory realisation of `Network.Transport` (incomplete) +* [network-transport-inmemory][network-transport-inmemory]: In-memory realisation of `Network.Transport` (incomplete) * [network-transport-composed][network-transport-composed]: Compose two transports (very preliminary) * [distributed-process-simplelocalnet][distributed-process-simplelocalnet]: Simple backend for local networks * [distributed-process-azure][distributed-process-azure]: Azure backend for Cloud Haskell (proof of concept) @@ -50,7 +50,7 @@ backend transports. Abstracting over the transport layer allows different protocols for message passing, including TCP/IP, UDP, -[MPI](http://en.wikipedia.org/wiki/Message_Passing_Interface), +[MPI](http://en.wikipedia.org/wiki/Message_Passing_Interface), [CCI](http://www.olcf.ornl.gov/center-projects/common-communication-interface/), ZeroMQ, SSH, MVars, Unix pipes, and more. Each of these transports would provide its own implementation of the `Network.Transport` and provide a means of creating @@ -109,7 +109,7 @@ The Cloud Haskell interface and backend, make use of the Transport interface provided by the `Network.Transport` module. This also serves as an interface for the `Network.Transport.*` module, which provides a specific implementation for this transport, -and may, for example, be based on some external library written in +and may, for example, be based on some external library written in Haskell or C. ### Network Transport Abstraction Layer @@ -166,7 +166,7 @@ For more details about `Network.Transport` please see the [wiki page](/wiki/netw The *Process Layer* is where Cloud Haskell's support for concurrency and distributed programming are exposed to application developers. This layer -deals explicitly with +deals explicitly with The core of Cloud Haskell's concurrency and distribution support resides in the [distributed-process][distributed-process] library. As well as the APIs necessary for starting @@ -254,10 +254,10 @@ We create channels with a call to `newChan`, and send/receive on them using the channelsDemo :: Process () channelsDemo = do (sp, rp) <- newChan :: Process (SendPort String, ReceivePort String) - + -- send on a channel spawnLocal $ sendChan sp "hello!" - + -- receive on a channel m <- receiveChan rp say $ show m @@ -356,13 +356,13 @@ demoAsync = do -- we can cancel the task if we want to -- cancel hAsync - + -- or cancel it and wait until it has exited -- cancelWait hAsync - + -- we can wait on the task and timeout if it's still busy Nothing <- waitTimeout (within 3 Seconds) hAsync - + -- or finally, we can block until the task is finished! asyncResult <- wait hAsync case asyncResult of @@ -400,11 +400,11 @@ and another for tasks you wish to execute on remote nodes. {% highlight haskell %} -- | A task to be performed asynchronously. data AsyncTask a = - AsyncTask + AsyncTask { asyncTask :: Process a -- ^ the task to be performed } - | AsyncRemoteTask + | AsyncRemoteTask { asyncTaskDict :: Static (SerializableDict a) -- ^ the serializable dict required to spawn a remote process From fe9158ba214d5b65363ca95cf19ec42b966d6811 Mon Sep 17 00:00:00 2001 From: Mathieu Boespflug Date: Sun, 13 Apr 2014 16:38:19 +0200 Subject: [PATCH 023/108] Link to HdpH project, which reuses the transport layer. --- documentation.md | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/documentation.md b/documentation.md index 667162f..b40a051 100644 --- a/documentation.md +++ b/documentation.md @@ -42,11 +42,9 @@ others experimental. * [distributed-process-azure][distributed-process-azure]: Azure backend for Cloud Haskell (proof of concept) One of Cloud Haskell's goals is to separate the transport layer from the -*process layer*, so that the transport backend is entirely independent: -it is envisaged that this interface might later be used by models -other than the Cloud Haskell paradigm, and that applications built -using Cloud Haskell might be easily configured to work with different -backend transports. +*process layer*, so that the transport backend is entirely independent. In fact +other projects can and do reuse the transport layer, even if they don't use or +have their own process layer (see e.g. [HdpH][hdph]). Abstracting over the transport layer allows different protocols for message passing, including TCP/IP, UDP, @@ -54,8 +52,7 @@ message passing, including TCP/IP, UDP, [CCI](http://www.olcf.ornl.gov/center-projects/common-communication-interface/), ZeroMQ, SSH, MVars, Unix pipes, and more. Each of these transports would provide its own implementation of the `Network.Transport` and provide a means of creating -new connections for use within `Control.Distributed.Process`. This separation means -that transports might be used for other purposes than Cloud Haskell. +new connections for use within `Control.Distributed.Process`. The following diagram shows dependencies between the various subsystems, in an application using Cloud Haskell, where arrows represent explicit @@ -512,6 +509,7 @@ TBC [network-transport-composed]: https://github.com/haskell-distributed/network-transport-composed [distributed-process-simplelocalnet]: http://hackage.haskell.org/package/distributed-process-simplelocalnet [distributed-process-azure]: http://hackage.haskell.org/package/distributed-process-azure +[hdph]: http://hackage.haskell.org/package/hdph [haskell11-ch]: http://research.microsoft.com/en-us/um/people/simonpj/papers/parallel/remote.pdf [OTP]: http://en.wikipedia.org/wiki/Open_Telecom_Platform [remote]: http://hackage.haskell.org/package/remote From 4cfe2016026a8e8e9af4717c4d77c053d4bd46fd Mon Sep 17 00:00:00 2001 From: Mathieu Boespflug Date: Sun, 13 Apr 2014 16:40:23 +0200 Subject: [PATCH 024/108] ZeroMQ, openSSH links. --- documentation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation.md b/documentation.md index b40a051..5c58c78 100644 --- a/documentation.md +++ b/documentation.md @@ -50,7 +50,7 @@ Abstracting over the transport layer allows different protocols for message passing, including TCP/IP, UDP, [MPI](http://en.wikipedia.org/wiki/Message_Passing_Interface), [CCI](http://www.olcf.ornl.gov/center-projects/common-communication-interface/), -ZeroMQ, SSH, MVars, Unix pipes, and more. Each of these transports would provide +[ZeroMQ](http://zeromq.org), [SSH](http://openssh.com), MVars, Unix pipes, and more. Each of these transports provides its own implementation of the `Network.Transport` and provide a means of creating new connections for use within `Control.Distributed.Process`. From 194be491e7e5fe389e86e0d493410423991152bd Mon Sep 17 00:00:00 2001 From: Mathieu Boespflug Date: Sun, 13 Apr 2014 16:40:43 +0200 Subject: [PATCH 025/108] typos. --- documentation.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/documentation.md b/documentation.md index 5c58c78..0279619 100644 --- a/documentation.md +++ b/documentation.md @@ -51,7 +51,7 @@ message passing, including TCP/IP, UDP, [MPI](http://en.wikipedia.org/wiki/Message_Passing_Interface), [CCI](http://www.olcf.ornl.gov/center-projects/common-communication-interface/), [ZeroMQ](http://zeromq.org), [SSH](http://openssh.com), MVars, Unix pipes, and more. Each of these transports provides -its own implementation of the `Network.Transport` and provide a means of creating +its own implementation of the `Network.Transport` API and provide a means of creating new connections for use within `Control.Distributed.Process`. The following diagram shows dependencies between the various subsystems, @@ -91,8 +91,8 @@ In this diagram, the various nodes roughly correspond to specific modules: Transport Implementation : Network.Transport.* An application is built using the primitives provided by the Cloud -Haskell layer, provided by `Control.Distributed.Process` module, which -provides abstractions such as nodes and processes. +Haskell layer, provided by the `Control.Distributed.Process` module, which +defines abstractions such as nodes and processes. The application also depends on a Cloud Haskell Backend, which provides functions to allow the initialisation of the transport layer @@ -102,7 +102,7 @@ It is, of course, possible to create new Cloud Haskell nodes by using a Network Transport Backend such as `Network.Transport.TCP` directly. -The Cloud Haskell interface and backend, make use of the Transport +The Cloud Haskell interface and backend make use of the Transport interface provided by the `Network.Transport` module. This also serves as an interface for the `Network.Transport.*` module, which provides a specific implementation for this transport, @@ -116,7 +116,7 @@ the concurrency and messaging passing capabilities of the *process layer*. Cloud Haskell applications are built using the primitives provided by the *process layer* (i.e., [distributed-process][distributed-process]), which provides abstractions such as nodes and processes. Applications must also depend on a Cloud Haskell -Backend, which provides functions to allow the initialisation of the transport +backend, which provides functions to allow the initialisation of the transport layer using whatever topology might be appropriate to the application. `Network.Transport` is a network abstraction layer geared towards specific @@ -125,7 +125,7 @@ classes of applications, offering the following high level concepts: * Nodes in the network are represented by `EndPoint`s. These are heavyweight stateful objects. * Each `EndPoint` has an `EndPointAddress`. * Connections can be established from one `EndPoint` to another using the `EndPointAddress` of the remote end. -* The `EndPointAddress` can be serialised and sent over the network, where as `EndPoint`s and connections cannot. +* The `EndPointAddress` can be serialised and sent over the network, whereas `EndPoint`s and connections cannot. * Connections between `EndPoint`s are unidirectional and lightweight. * Outgoing messages are sent via a `Connection` object that represents the sending end of the connection. * Incoming messages for **all** of the incoming connections on an `EndPoint` are collected via a shared receive queue. @@ -328,7 +328,7 @@ Haskell concurrency design patterns along the way. In fact, [distributed-process-platform][distributed-process-platform] does not really consider the *task layer* in great detail. We provide an API comparable to remote's -`Promise` in Control.Distributed.Process.Platform.Async. This API however, +`Promise` in `Control.Distributed.Process.Platform.Async`. This API however, is derived from Simon Marlow's [Control.Concurrent.Async][async] package, and is not limited to blocking queries on `Async` handles in the same way. Instead our [API][d-p-platform-async] handles both blocking and non-blocking queries, polling @@ -376,7 +376,7 @@ around `Async` that disallows side effects is relatively simple, and we do not consider the presence of side effects a barrier to fault tolerance and automated process restarts. Erlang does not forbid *IO* in its processes, and yet that doesn't render supervision trees ineffective. They key is to -provide a rich enough API that statefull processes can recognise whether or +provide a rich enough API that stateful processes can recognise whether or not they need to provide idempotent initialisation routines. The utility of preventing side effects using the type system is, however, not From a263916a7199b8e9cdd588c3c6a5938cc48d5321 Mon Sep 17 00:00:00 2001 From: Mathieu Boespflug Date: Sun, 13 Apr 2014 18:23:03 +0200 Subject: [PATCH 026/108] Replace reference to #HaskellTransportLayer chan to #haskell-distributed. --- wiki/networktransport.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wiki/networktransport.md b/wiki/networktransport.md index ed70eaf..690346c 100644 --- a/wiki/networktransport.md +++ b/wiki/networktransport.md @@ -35,8 +35,8 @@ transport and (mostly for demonstration purposes) an in-memory The TCP/IP implementation of Network.Transport should be usable, if not completely stable yet. The design of the transport layer may also still change. -Feedback and suggestions are most welcome. Email [Duncan](mailto:duncan@well-typed.com) or [Edsko](mailto:edsko@well-typed.com) at Well-Typed, find us at #HaskellTransportLayer on -freenode, or post on the [Parallel Haskell][2] mailing list. +Feedback and suggestions are most welcome. Email [Duncan](mailto:duncan@well-typed.com) or [Edsko](mailto:edsko@well-typed.com) at Well-Typed, find us at #haskell-distributed on +Freenode, or post on the [Parallel Haskell][2] mailing list. You may also submit issues on the [JIRA issue tracker][8]. From 2d63efbc3371eb02d2c7d9101289d60ff5d277f5 Mon Sep 17 00:00:00 2001 From: Tavis Rudd Date: Mon, 5 May 2014 22:26:53 -0700 Subject: [PATCH 027/108] s/terminateHandler/shutdownHandler/ This was renamed in the code a while back. --- tutorials/4ch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/4ch.md b/tutorials/4ch.md index 0f71a76..83dc0ef 100644 --- a/tutorials/4ch.md +++ b/tutorials/4ch.md @@ -69,7 +69,7 @@ myServer = ] -- what should I do just before stopping? - , terminateHandler = myTerminateFunction + , shutdownHandler = myShutdownFunction -- what should I do about messages that cannot be handled? , unhandledMessagePolicy = Drop -- Terminate | (DeadLetter ProcessId) From 99825b0864ad5e34ba31d065a8d14c3eeb1ead7d Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Thu, 22 May 2014 14:33:17 +0100 Subject: [PATCH 028/108] Update to reflect new branching policy --- wiki/contributing.md | 35 ++++++++++++- wiki/maintainers.md | 117 ++++++++++++++++++++++++------------------- 2 files changed, 98 insertions(+), 54 deletions(-) diff --git a/wiki/contributing.md b/wiki/contributing.md index f6bac0a..dc005af 100644 --- a/wiki/contributing.md +++ b/wiki/contributing.md @@ -18,6 +18,37 @@ we're saying here are just common sense and none of them is hard to follow. With this in mind, please try to observe the following guidelines when submitting patches. +## Common Repository Format and Git Branches + +All Cloud Haskell repositories should conform to a common structure, with the +exception of website and/or documentation projects. The structure is basically +that of a cabal library or executable project, with a couple of additional files. + +``` +- project-name + - project-name.cabal + - Makefile + - LICENCE + - README.md + - src/ + - tests/ + - benchmarks/ + - examples/ + - regressions/ +``` + +All repositories must use the same git branch structure: + +* ongoing development takes place on the `master` branch +* the code that goes into a release must be tagged as soon as it is uploaded to hackage +* each release tag must then get its own release-x.y.z branch, branched from the tagged commit +* after a release, the *release branch* is not merged back into `master` +* patches can be submitted via branches off `master`, or from a `release-x.y.z` branch +* features and bug-fixes that are compatible with `master` can be merged in directly +* interim bug-fixes we **don't** want included (i.e., bug-fix only releases) + * are merged into a new release-x.y.z branch (taken from the prior release branch) instead + * these can, but do not have to be, merged into `master` + ### __1. Check to see if your patch is likely to be accepted__ We have a rather full backlog, so your help will be most welcome assisting @@ -92,8 +123,8 @@ contain multitudinous compiler warnings will take longer to QA. Please be aware of whether or not your changes are actually a bugfix or a new feature, and branch from the right place accordingly. The general rule is: -* new features must branch off `development` -* bug fixes must branch off `master` (which is the stable, production branch) +* new features should branch off `master` +* bug fixes can branch off `master` or a `release-x.y.z` branch If you branch from the wrong place then you will be asked to rework your changes so try to get this right in the first place. If you're unsure whether a patch diff --git a/wiki/maintainers.md b/wiki/maintainers.md index e39dfed..23130b6 100644 --- a/wiki/maintainers.md +++ b/wiki/maintainers.md @@ -6,31 +6,29 @@ wiki: Maintainers ### Maintainers -This part of the guide is specifically for maintainers, and -outlines the development process and in particular, the branching -strategy. We also point out Cloud Haskell's various bits of -infrastructure as they stand at the moment. +This part of the guide is specifically for maintainers, and outlines the development +process and in particular, the branching strategy. We also point out Cloud Haskell's +various bits of infrastructure as they stand at the moment. -Perhaps the most important thing to do as a maintainer, is to -make other developers aware of what you're working on by assigning -the Jira issue to yourself! +Perhaps the most important thing to do as a maintainer, is to make other developers +aware of what you're working on by assigning the Jira issue to yourself! ---- #### Releases All releases are published to [hackage][3]. At some point we may start to -make *nightly builds* available on this website. +make *nightly builds* available on this website. We need some help setting that +up though. ---- #### Community -We keep in touch through the [parallel-haskell google group][7], -and once you've joined the group, by posting to the mailing list address: -parallel-haskell@googlegroups.com. This is a group for **all** things related -to concurrent and parallel Haskell. There is also a maintainer/developer -centric [cloud-haskell-developers google group][9], which is more for -in-depth questions about contributing to or maintaining Cloud Haskell. +We keep in touch through the [parallel-haskell google group][7], and once you've +joined the group, by posting to the mailing list address: parallel-haskell@googlegroups.com. +This is a group for **all** things related to concurrent and parallel Haskell. There is +also a maintainer/developer centric [cloud-haskell-developers google group][9], which is +more for in-depth questions about contributing to or maintaining Cloud Haskell. You might also find some of us hanging out at #haskell-distributed on freenode from time to time. @@ -59,52 +57,66 @@ the timescales yet. ### Branching/Merging Policy -The master branch is the **stable** branch, and should always be -in a *releasable* state. This means that on the whole, only small -self contained commits or topic branch merges should be applied -to master, and tagged releases should always be made against it. +The master branch is the *active development* branch. Small changes can be committed +directly to `master`, or committed to a branche before getting merged. When we're +ready to release, the project is tagged using the format `vX.Y.Z` once it has been +published. Each release then gets its own `release-x.y.z` branch, created from the +tagged commit. These can be used to produce interim/bug-fix releases if necessary. -#### Tagging Releases +A release tag should be of the form `x.y.z` and should **not** be prefixed with the +repository name. Older tags use the latter form as they date from a time when all the +Cloud Haskell source code lived under one git repository. -A release tag should be of the form `x.y.z` and should **not** -be prefixed with the repository name. Older tags use the latter -form as they date from a time when all the Cloud Haskell source -code lived under one git repository. +Patches should be made against `master` **unless** they represent an interim bug-fix +to a given `release-x-y-z` branch. The latter should be created against the branch +they intend to fix. #### Development Branches -Ongoing work can either be merged into master when complete or -merged into development. Development is effectively an integration -branch, to make sure ongoing changes and new features play nicely -with one another. On the other hand, master is a 'stable' branch -and therefore you should only merge into it if the result will be -releasable. - -In general, we try to merge changes that belong to a major version -upgrade into development, whereas changes that will go into the -next minor version upgrade can be merged into master. +Development should usually take place on a `feature-X` or `bugfix-Y` branch. The +old `development` branch is defunct and will be removed at some point in the future. + +#### Interim and bug-fix only releases + +The complexity around *interim* releases is necessary to deal with situations where we +need to patch a release, but `master` has already moved on, making the patch irrelevant +for the next major release. For example, assuming a project is at version 1.2.3 and we +find a bug in module `Foo.hs`. Users who're on v1.2.3 will want the fix asap for any +production deployments, yet `Foo.hs` has been completely re-written/replaced on `master` +and the bugfix isn't needed there. To support user's who do not wish to wait for v2.0.0 +to be released, we create a `release-1.2.4` branch and merge the bugfix into that, but +we do **not** merge this back into master, since the patch won't apply. +This way, subsequent bug fixes to the 1.2.x series can also continue in parallel with +changes to `master` if necessary. + +What happens if we have a patch for `release-1.2.3` that *is* applicable to master, but +won't apply cleanly due to changes since the release was tagged? In this situation, we +create a bug-fix release branch as usual, into which the patch is merged. A maintainer +will then have to retro-fit the patch so that it can be applied to `master`. This is +a somewhat ugly, since if we wish to create subsequent bug-fixes and create another +1.2.5 branch (viz the example above), we'll may have to manually transplant some changes +from `master` in the process. #### Keeping History -Try to make only clean commits, so that bisect will continue to work. -At the same time, it's best to avoid making destructive updates. If -you're planning on doing lots of squashing, then work in a branch -and don't commit directly to development - and **definitely** not to -master. +Try to make only clean commits, so that bisect will continue to work. At the same time, +it can be helpful to avoid making destructive updates. If you're planning on doing lots +of squashes, then work in a branch and don't commit directly to `master` until you're +finished and ready to QA and merge. #### Committing without triggering CI builds -Whilst we're on travis-ci, you can do this by putting the text -`[ci skip]` anywhere in the commit message. Please, please -**do not** put this on the first line of the commit message. +Whilst we're on travis-ci, you can do this by putting the text `[ci skip]` anywhere in +the commit message. Please, please **do not** put this on the first line of the commit +message. Once we migrate to Bamboo, this may change. #### Changing Jira bugs/issues via commit messages -You can make complex changes to one or more Jira issues with a single -commit message. As with skipping CI builds, please **do not** put this -messy text into the first line of your commit messages. +You can make complex changes to one or more Jira issues with a single commit message. +As with skipping CI builds, please **do not** put this messy text into the first line +of your commit messages. Details of the format/syntax required to do this can be found on [this Jira documentation page](https://confluence.atlassian.com/display/AOD/Processing+JIRA+issues+with+commit+messages) @@ -125,11 +137,10 @@ See https://cloud-haskell.atlassian.net/browse/INFRA-1 for details. ### Release Process -First of all, a few prior warnings. **Do not** tag any projects -until *after* you've finished the release. If you build and tag -three projects, only to find that a subsequent dependent package -needs a bit of last minute surgery, you'll be sorry you didn't -wait. With that in mind.... +First of all, a few prior warnings. **Do not** tag any projects until *after* +you've finished and uploaded the release. If you build and tag three projects, +only to find that a subsequent dependent package needs a bit of last minute +surgery, you'll be sorry you didn't wait. With that in mind.... Before releasing any source code, make sure that all the jira tickets added to the release are either resolved or remove them from the @@ -156,11 +167,13 @@ too! **Now** you should tag all the projects with the relevant version number. Since moving to individual git repositories, the tagging scheme is now -`x.y.z` and *not* `-x.y.z`. +`x.y.z` and *not* `-x.y.z`. Once you've tagged the release, create +a branch named `release-x.y.z` and push both the newly created tag(s) and the +branch(es). Once the release is out, you should go to [JIRA](https://cloud-haskell.atlassian.net) -and close all the tickets for the release. Jira has a nice 'bulk change' -feature that makes this very easy. +and close all the tickets for the release. Jira has a nice 'bulk change' feature +that makes this very easy. After that, it's time to tweet about the release, post to the parallel-haskell mailing list, blog etc. Spread the word. From e1f33851696b1c126405d95ec60b11e8ff5f158d Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Fri, 23 May 2014 10:45:06 +0100 Subject: [PATCH 029/108] changelog support --- _layouts/changelog.html | 50 +++++++++++++++++++++++++++++++++++++++++ changelog.md | 33 +++++++++++++++++++++++++++ changelog/dp-0.4.2.md | 34 ++++++++++++++++++++++++++++ 3 files changed, 117 insertions(+) create mode 100644 _layouts/changelog.html create mode 100644 changelog.md create mode 100644 changelog/dp-0.4.2.md diff --git a/_layouts/changelog.html b/_layouts/changelog.html new file mode 100644 index 0000000..e671c9e --- /dev/null +++ b/_layouts/changelog.html @@ -0,0 +1,50 @@ + + + + {% include head.html %} + + + + + {% include nav.html %} +

    +
    +
    +
    +
    + +
    +
    +
    +
    + {% include footer.html %} + {% include js.html %} + + + + + + diff --git a/changelog.md b/changelog.md new file mode 100644 index 0000000..675dede --- /dev/null +++ b/changelog.md @@ -0,0 +1,33 @@ +--- +layout: wiki +title: Cloud Haskell Wiki +wiki: Welcome +--- + +### Welcome + +Welcome to the Cloud Haskell Wiki. Navigate to specific pages using the links +on the left. If you wish to edit or add to the pages in this wiki, read on. + +### Editing + +Editing the wiki is pretty simple. This entire website is stored in a git +repository and its dynamic content rendered by github pages using [Jekyll][1]. +You can clone the repository [here][2]. Instructions for using jekyll are +available [online][1], but in general it's just a matter of finding the right +markdown file. Wiki content is all located in the wiki subfolder. + +### Adding new content + +New wiki pages need to have some specific fields in their [Yaml Front Matter][3]. +There is a makefile in the root directory which will create a wiki page for +you (in the wiki directory) and populate the front matter for you. Calling the +makefile is pretty easy. + +{% highlight bash %} +make wikipage NAME= +{% endhighlight %} + +[1]: https://github.com/mojombo/jekyll +[2]: https://github.com/haskell-distributed/haskell-distributed.github.com +[3]: https://github.com/mojombo/jekyll/wiki/YAML-Front-Matter diff --git a/changelog/dp-0.4.2.md b/changelog/dp-0.4.2.md new file mode 100644 index 0000000..98515fb --- /dev/null +++ b/changelog/dp-0.4.2.md @@ -0,0 +1,34 @@ +--- +layout: changelog +title: distributed-process-0.4.2 +feed_url: https://cloud-haskell.atlassian.net/sr/jira.issueviews:searchrequest-rss/temp/SearchRequest.xml?jqlQuery=project+%3D+DP+AND+status+%3D+Closed+AND+fixVersion+%3D+0.4.2&tempMax=1000 +--- + +### Release Notes - distributed-process - Version 0.4.2 + +

    Bug +

    +
      +
    • [DP-60] - Fixes made for the distributed-process with strict bytestrings +
    • +
    + +

    Improvement +

    +
      +
    • [DP-61] - Switched from Binary to cereal, allowed switching between lazy and strict ByteString, fixed some bugs +
    • +
    + +

    Task +

    +
      +
    • [DP-31] - Messages from the "main channel" as a receivePort +
    • +
    • [DP-35] - Add variants of exit and kill for the current process +
    • +
    • [DP-50] - killing processes +
    • +
    • [DP-51] - provide local versions of spawnLink and spawnMonitor +
    • +
    From f99f2f2bbf44b97a2500d273e0650fc01b3641bc Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Wed, 28 May 2014 12:01:02 +0100 Subject: [PATCH 030/108] Sort out changelog support --- Makefile | 2 +- _config.yml | 2 +- _includes/nav.html | 1 + _layouts/changelog.html | 48 +++++++------ _layouts/changes.html | 41 +++++++++++ changelog.md | 33 --------- changelog/dp-0.4.1.md | 23 +++++++ changelog/dp-0.4.2.md | 41 ++++++----- changelog/dp-0.5.0.md | 68 +++++++++++++++++++ changes.md | 31 +++++++++ .../templates/{wikipage.md => wikipage.mdt} | 0 11 files changed, 217 insertions(+), 73 deletions(-) create mode 100644 _layouts/changes.html delete mode 100644 changelog.md create mode 100644 changelog/dp-0.4.1.md create mode 100644 changelog/dp-0.5.0.md create mode 100644 changes.md rename static/templates/{wikipage.md => wikipage.mdt} (100%) diff --git a/Makefile b/Makefile index 97e0cd3..c916692 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ all: ifneq ($(NAME), '') $(TEMPLATES): - cat ${TEMPLATE_DIR}/$@.md | sed s/@PAGE@/${NAME}/g >> ${ROOT_DIRECTORY}/wiki/${FNAME}.md + cat ${TEMPLATE_DIR}/$@.mdt | sed s/@PAGE@/${NAME}/g >> ${ROOT_DIRECTORY}/wiki/${FNAME}.md else $(TEMPLATES): $(error you need to specify NAME= to run this target) diff --git a/_config.yml b/_config.yml index d4f89de..892bf81 100644 --- a/_config.yml +++ b/_config.yml @@ -1,4 +1,4 @@ -exclude: [".rvmrc", ".rbenv-version", "README.md", "Rakefile", "changelog.md"] +exclude: [".rvmrc", ".rbenv-version", "README.md", "Rakefile", "static/templates/wikipage.mdt"] lsi: false auto: true pygments: true diff --git a/_includes/nav.html b/_includes/nav.html index dbb968a..2282ceb 100644 --- a/_includes/nav.html +++ b/_includes/nav.html @@ -32,6 +32,7 @@ +
    +

    Version {{ page.version }}

    +

    Status: {% if page.status contains 'Released' %} + {{ page.status }} + {% else %} + Pending Release + {% endif %} +

    +

    Due/Released: {{ page.date }}

    +

    Code Changes (redirects to github)

    + {{ content }} +
    Issue RSS Feed
    +
    + {% include footer.html %} @@ -40,11 +56,3 @@ - - - diff --git a/_layouts/changes.html b/_layouts/changes.html new file mode 100644 index 0000000..8144182 --- /dev/null +++ b/_layouts/changes.html @@ -0,0 +1,41 @@ + + + + {% include head.html %} + + + + + {% include nav.html %} +
    + +
    +
    +
    + +
    + {{ content }} +
    +
    +
    + {% include footer.html %} + {% include js.html %} + + + diff --git a/changelog.md b/changelog.md deleted file mode 100644 index 675dede..0000000 --- a/changelog.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -layout: wiki -title: Cloud Haskell Wiki -wiki: Welcome ---- - -### Welcome - -Welcome to the Cloud Haskell Wiki. Navigate to specific pages using the links -on the left. If you wish to edit or add to the pages in this wiki, read on. - -### Editing - -Editing the wiki is pretty simple. This entire website is stored in a git -repository and its dynamic content rendered by github pages using [Jekyll][1]. -You can clone the repository [here][2]. Instructions for using jekyll are -available [online][1], but in general it's just a matter of finding the right -markdown file. Wiki content is all located in the wiki subfolder. - -### Adding new content - -New wiki pages need to have some specific fields in their [Yaml Front Matter][3]. -There is a makefile in the root directory which will create a wiki page for -you (in the wiki directory) and populate the front matter for you. Calling the -makefile is pretty easy. - -{% highlight bash %} -make wikipage NAME= -{% endhighlight %} - -[1]: https://github.com/mojombo/jekyll -[2]: https://github.com/haskell-distributed/haskell-distributed.github.com -[3]: https://github.com/mojombo/jekyll/wiki/YAML-Front-Matter diff --git a/changelog/dp-0.4.1.md b/changelog/dp-0.4.1.md new file mode 100644 index 0000000..cc650ab --- /dev/null +++ b/changelog/dp-0.4.1.md @@ -0,0 +1,23 @@ +--- +layout: changelog +title: distributed-process-0.4.1 +date: Sun Jan 27 15:12:02 UTC 2013 +status: Released +version: 0.4.1 +commits: distributed-process-0.4.0.2...distributed-process-0.4.1 +hackage: https://hackage.haskell.org/package/distributed-process +--- +--------- +

    Notes

    + +This is a small feature release containing various enhancements. + +--------- +

    Improvements

    +
      +
    • Update package boundaries
    • +
    • depend on binary-0.6 instead of 0.5
    • +
    • Require http-conduit >= 1.8.1
    • +
    • Implement receiveChanTimeout
    • +
    • Changed semantics of register/unregister to match Erlang
    • +
    • Fixed de-registration of remote processes when the process terminates
    • diff --git a/changelog/dp-0.4.2.md b/changelog/dp-0.4.2.md index 98515fb..2488d4f 100644 --- a/changelog/dp-0.4.2.md +++ b/changelog/dp-0.4.2.md @@ -1,34 +1,39 @@ --- layout: changelog title: distributed-process-0.4.2 +date: Sun Jan 27 15:12:02 UTC 2013 +status: Released +version: 0.4.2 +commits: distributed-process-0.4.1...v0.4.2 +hackage: https://hackage.haskell.org/package/distributed-process +release: https://cloud-haskell.atlassian.net/browse/DP/fixforversion/10006 feed_url: https://cloud-haskell.atlassian.net/sr/jira.issueviews:searchrequest-rss/temp/SearchRequest.xml?jqlQuery=project+%3D+DP+AND+status+%3D+Closed+AND+fixVersion+%3D+0.4.2&tempMax=1000 --- +--------- +

      Notes

      -### Release Notes - distributed-process - Version 0.4.2 - -

      Bug -

      +This is a small feature release containing process management enhancements and +a new tracing/debugging capability. + +--------- +

      Bugs

        -
      • [DP-60] - Fixes made for the distributed-process with strict bytestrings +
      • [DP-60] - Fixes made for the distributed-process with strict bytestrings
      - -

      Improvement -

      + +

      Improvements

        -
      • [DP-61] - Switched from Binary to cereal, allowed switching between lazy and strict ByteString, fixed some bugs +
      • [DP-61] - Switched from Binary to cereal, allowed switching between lazy and strict ByteString
      • -
      - -

      Task -

      -
        -
      • [DP-31] - Messages from the "main channel" as a receivePort +
      • [DP-31] - Messages from the "main channel" as a receivePort +
      • +
      • [DP-35] - Add variants of exit and kill for the current process
      • -
      • [DP-35] - Add variants of exit and kill for the current process +
      • [DP-50] - Support for killing processes
      • -
      • [DP-50] - killing processes +
      • [DP-51] - provide local versions of spawnLink and spawnMonitor
      • -
      • [DP-51] - provide local versions of spawnLink and spawnMonitor +
      • [ - Tracing and Debugging support]
      diff --git a/changelog/dp-0.5.0.md b/changelog/dp-0.5.0.md new file mode 100644 index 0000000..fddcc38 --- /dev/null +++ b/changelog/dp-0.5.0.md @@ -0,0 +1,68 @@ +--- +layout: changelog +title: distributed-process-0.5.0 +status: Released +date: Wen May 28 12:15:02 UTC 2014 +version: 0.5.0 +commits: distributed-process-0.4.2...master +hackage: https://hackage.haskell.org/package/distributed-process +release: https://cloud-haskell.atlassian.net/browse/DP/fixforversion/10008 +feed_url: https://cloud-haskell.atlassian.net/sr/jira.issueviews:searchrequest-rss/temp/SearchRequest.xml?jqlQuery=project+%3D+DP+AND+status+%3D+Closed+AND+fixVersion+%3D+0.5.0&tempMax=1000 +--- +--------- +

      Notes

      + +This is a full feature release containing important enhancements to inter-process messaging, +process and node management, debugging and tracing. Various bug-fixes have also been made. + +
      Highlights
      + +New advanced messaging APIs provide broader polymorphic primitives for receiving and processing message +regardless of the underlying (when decoded) types. Extended exit handling capabilities have been added, +to facilitate processing *exit signals* when the *exit reason* could be represented by a variety of +underlying types. + +The performance of inter-process messaging has been optimised for intra-node use cases. Messages are no +longer sent over the network-transport infrastructure when the receiving process resides on the same node +as the sender. New `unsafe` APIs have been made available to allow code that uses intra-node messaging to +skip the serialization of messages, facilitating further performance benefits at the risk of altered +error handling semantics. More details are available in the [`UnsafePrimitives` documentation][1]. + +A new [*Management API*][2] has been added, giving user code the ability to receive and respond to a running +node's internal system events. The tracing and debugging support added in 0.4.2 has been [upgraded][3] to use +this API, which is more efficient and flexible. + +--------- + +

      Bugs

      +
        +
      • [DP-68] - Dependency on STM implicitly changed from 1.3 to 1.4, but was not reflected in the .cabal file
      • +
      • [DP-79] - Race condition in local monitoring when using `call`
      • +
      • [DP-94] - mask does not work correctly if unmask is called by another process
      • +
      + +

      Improvements

      +
        +
      • [DP-20] - Improve efficiency of local message passing
      • +
      • [DP-77] - nsend should use local communication channels
      • +
      • [DP-39] - Link Node Controller and Network Listener
      • +
      • [DP-62] - Label spawned processes using labelThread
      • +
      • [DP-85] - Relax upper bound on syb in the cabal manifest
      • +
      • [DP-78] - Bump binary version to include 0.7.*
      • +
      • [DP-91] - Move tests to https://github.com/haskell-distributed/distributed-process-tests
      • +
      • [DP-92] - Expose process info
      • +
      • [DP-92] - Expose node statistics
      • +
      + +

      New Features

      +
        +
      • [DP-7] - Polymorphic expect (see details here)
      • +
      • [DP-57] - Expose Message and broaden the scope of polymorphic expect
      • +
      • [DP-84] - Provide an API for working with internal (system) events
      • +
      • [DP-83] - Report node statistics for monitoring/management
      • +
      + + +[1]: https://hackage.haskell.org/package/distributed-process-0.5.0/docs/Control-Distributed-Process-UnsafePrimitives.html +[2]: https://hackage.haskell.org/package/distributed-process-0.5.0/docs/Control-Distributed-Process-Management.html +[3]: https://hackage.haskell.org/package/distributed-process-0.5.0/docs/Control-Distributed-Process-Debug.html diff --git a/changes.md b/changes.md new file mode 100644 index 0000000..178c615 --- /dev/null +++ b/changes.md @@ -0,0 +1,31 @@ +--- +layout: changes +title: Changelog +--- + +### Viewing Changes + +Each version of each cloud haskell project has a change log, which can be +viewed by clicking on the links to the left hand side of this page. + +### Editing + +Editing this page is pretty simple. This entire website is stored in a git +repository and its dynamic content rendered by github pages using [Jekyll][1]. +You can clone the repository [here][2]. Instructions for using jekyll are +available [online][1], but in general it's just a matter of finding the right +markdown file. Wiki content is all located in the wiki subfolder. + +### Adding new content + +We plan to set up a script that pulls the Jira RSS feed and inserts content +here, however for the time being, adding a new page beneath the `changelog` +folder will be sufficient to pull a new version into the navigation menu. +Our Jira instance is set up to produce HTML release notes which can be tweaked +by hand if necessary and the front matter for change-logs can be copied from +one of the existing pages. + + +[1]: https://github.com/mojombo/jekyll +[2]: https://github.com/haskell-distributed/haskell-distributed.github.com +[3]: https://github.com/mojombo/jekyll/wiki/YAML-Front-Matter diff --git a/static/templates/wikipage.md b/static/templates/wikipage.mdt similarity index 100% rename from static/templates/wikipage.md rename to static/templates/wikipage.mdt From 85d9f4f0f5f64fb23f937f380c57447be69fd7d6 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Wed, 28 May 2014 12:05:35 +0100 Subject: [PATCH 031/108] Update _config.yml --- _config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_config.yml b/_config.yml index 892bf81..b4a2f76 100644 --- a/_config.yml +++ b/_config.yml @@ -1,4 +1,4 @@ -exclude: [".rvmrc", ".rbenv-version", "README.md", "Rakefile", "static/templates/wikipage.mdt"] +exclude: [".rvmrc", ".rbenv-version", "README.md", "Rakefile", "static/templates/*"] lsi: false auto: true pygments: true From e5336b88811b99c4347abe01d89a20925ad36855 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Wed, 28 May 2014 12:06:44 +0100 Subject: [PATCH 032/108] oops - remove illegal char --- changelog/dp-0.4.1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog/dp-0.4.1.md b/changelog/dp-0.4.1.md index cc650ab..8b2fdad 100644 --- a/changelog/dp-0.4.1.md +++ b/changelog/dp-0.4.1.md @@ -17,7 +17,7 @@ This is a small feature release containing various enhancements.
      • Update package boundaries
      • depend on binary-0.6 instead of 0.5
      • -
      • Require http-conduit >= 1.8.1
      • +
      • Require http-conduit gt 1.8.1
      • Implement receiveChanTimeout
      • Changed semantics of register/unregister to match Erlang
      • Fixed de-registration of remote processes when the process terminates
      • From 813ecf435be962a2efc3fb96272573c2b10e3cd4 Mon Sep 17 00:00:00 2001 From: Tim Watson Date: Wed, 28 May 2014 12:07:44 +0100 Subject: [PATCH 033/108] tidy up changelog --- _layouts/changelog.html | 3 ++- changelog/dp-0.4.1.md | 16 +++-------- changelog/dp-0.4.2.md | 37 +++++++++----------------- changelog/dp-0.5.0.md | 59 ++++++++++++++++++----------------------- 4 files changed, 43 insertions(+), 72 deletions(-) diff --git a/_layouts/changelog.html b/_layouts/changelog.html index 6c946b4..3998ae1 100644 --- a/_layouts/changelog.html +++ b/_layouts/changelog.html @@ -31,7 +31,7 @@
        -

        Version {{ page.version }}

        +

        Version {{ page.version }}

        Status: {% if page.status contains 'Released' %} {{ page.status }} {% else %} @@ -41,6 +41,7 @@

        Status: {% if page.status contains 'Released' %}

        Due/Released: {{ page.date }}

        Code Changes (redirects to github)

        {{ content }} +
        Issue RSS Feed

        - - diff --git a/team.md b/team.md index c16b71d..3532de2 100644 --- a/team.md +++ b/team.md @@ -27,7 +27,7 @@ of Cloud Haskell as a whole. [Edsko De Vries][13], a member of Well-Typed and th author of much of the new implementation we have today, is still closely involved as well. -[Tim][6] is the primary author and maintainer of [disributed-process-platform][8]; +[Tim][6] is the primary author and maintainer of [disributed-process][8]; an effort to port many of the benefits of Erlang's [Open Telecom Platform][10] to the Cloud Haskell ecosystem. @@ -48,7 +48,7 @@ Duncan Coutts, Simon Marlow, Ryan Newton, Eric Kow, Adam Foltzer, Nicolas Wu [5]: http://www.haskell.org/haskellwiki/Parallel_GHC_Project [6]: https://github.com/hyperthunk [7]: https://github.com/jepst -[8]: https://github.com/haskell-distributed/disributed-process-platform +[8]: https://github.com/haskell-distributed/disributed-process [9]: http://hackage.haskell.org/trac/ghc/wiki/Contributors [10]: http://en.wikipedia.org/wiki/Open_Telecom_Platform [11]: https://github.com/jepst/distributed-process-global diff --git a/tutorials/1ch.md b/tutorials/1ch.md index 41f00e1..4eb07d2 100644 --- a/tutorials/1ch.md +++ b/tutorials/1ch.md @@ -138,7 +138,7 @@ main = do -- Die immediately - throws a ProcessExitException with the given reason. Nothing -> die "nothing came back!" Just s -> say $ "got " ++ s ++ " back!" - + -- Without the following delay, the process sometimes exits before the messages are exchanged. liftIO $ threadDelay 2000000 {% endhighlight %} @@ -289,9 +289,9 @@ different ways: ------ -[1]: /static/doc/distributed-process/Control-Distributed-Process.html#v:Message -[2]: http://hackage.haskell.org/package/distributed-process -[3]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-Async.html -[4]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.htmlv:callAsync -[5]: http://hackage.haskell.org/packages/archive/distributed-process/latest/doc/html/Control-Distributed-Process-Internal-Primitives.html#t:Match -[6]: http://hackage.haskell.org/packages/archive/distributed-process/latest/doc/html/Control-Distributed-Process-Closure.html +[1]: https://hackage.haskell.org/package/distributed-process-0.6.6/docs/Control-Distributed-Process.html#t:Message +[2]: https://hackage.haskell.org/package/distributed-process +[3]: https://hackage.haskell.org/package/distributed-process-async/docs/Control-Distributed-Process-Async.html +[4]: https://hackage.haskell.org/package/distributed-process-client-server-0.1.3.2/docs/Control-Distributed-Process-ManagedProcess-Client.html#v:callAsync +[5]: https://hackage.haskell.org/packages/archive/distributed-process/latest/doc/html/Control-Distributed-Process-Internal-Primitives.html#t:Match +[6]: https://hackage.haskell.org/packages/archive/distributed-process/latest/doc/html/Control-Distributed-Process-Closure.html diff --git a/tutorials/3ch.md b/tutorials/3ch.md index 680421e..c5b9dc7 100644 --- a/tutorials/3ch.md +++ b/tutorials/3ch.md @@ -177,9 +177,9 @@ proxy :: Serializable a => ProcessId -> (a -> Process Bool) -> Process () {% endhighlight %} Since `matchAny` operates on `(Message -> Process b)` and `handleMessage` operates on -`a -> Process b` we can compose these to make our proxy server. We must not forward +`a -> Process b` we can compose these to make our proxy server. We must not forward messages for which the predicate function evaluates to `Just False`, nor can we sensibly -forward messages which the predicate function is unable to evaluate due to type +forward messages which the predicate function is unable to evaluate due to type incompatibility. This leaves us with the definition found in distributed-process: {% highlight haskell %} @@ -197,7 +197,7 @@ proxy pid proc = do Beyond simple relays and proxies, the raw message handling capabilities available in distributed-process can be utilised to develop highly generic message processing code. -All the richness of the distributed-process-platform APIs (such as `ManagedProcess`) which +All the richness of the distributed-process-client-server APIs (such as `ManagedProcess`) which will be discussed in later tutorials are, in fact, built upon these families of primitives. ### Typed Channels @@ -234,10 +234,10 @@ is terminated. The `ProcessExitException` signal is sent from one process to another, indicating that the receiver is being asked to terminate. A process can choose to tell itself to exit, and the -[`die`][7] primitive simplifies doing so without worrying about the expected type for the +[`die`][7] primitive simplifies doing so without worrying about the expected type for the action. In fact, [`die`][7] has slightly different semantics from [`exit`][5], since the latter involves sending an internal signal to the local node controller. A direct consequence -of this is that the _exit signal_ may not arrive immediately, since the _Node Controller_ could +of this is that the _exit signal_ may not arrive immediately, since the _Node Controller_ could be busy processing other events. On the other hand, the [`die`][7] primitive throws a `ProcessExitException` directly in the calling thread, thus terminating it without delay. In practise, this means the following two functions could behave quite differently at @@ -247,19 +247,19 @@ runtime: -- this will never print anything... demo1 = die "Boom" >> expect >>= say - + -- this /might/ print something before it exits demo2 = do self <- getSelfPid exit self "Boom" - expect >>= say + expect >>= say {% endhighlight %} The `ProcessExitException` type holds a _reason_ field, which is serialised as a raw `Message`. This exception type is exported, so it is possible to catch these _exit signals_ and decide how to respond to them. Catching _exit signals_ is done via a set of primitives in distributed-process, and the use of them forms a key component of the various fault tolerance -strategies provided by distributed-process-platform. +strategies provided by distributed-process-supervisor. A `ProcessKillException` is intended to be an _untrappable_ exit signal, so its type is not exported and therefore you can __only__ handle it by catching all exceptions, which @@ -296,7 +296,7 @@ special case. Since link exit signals cannot be caught directly, if you find you to _trap_ a link failure, you probably want to use a monitor instead. Whilst the built-in `link` primitive terminates the link-ee regardless of exit reason, -distributed-process-platform provides an alternate function `linkOnFailure`, which only +distributed-process-extras provides an alternate function `linkOnFailure`, which only dispatches the `ProcessLinkException` if the link-ed process dies abnormally (i.e., with some `DiedReason` other than `DiedNormal`). @@ -305,7 +305,7 @@ putting a `ProcessMonitorNotification` into the process' mailbox. This signal an constituent fields can be introspected in order to decide what action (if any) the receiver can/should take in response to the monitored process' death. Let's take a look at how monitors can be used to determine both when and _how_ a process has terminated. Tucked -away in distributed-process-platform, the `linkOnFailure` primitive works in exactly this +away in distributed-process-extras, the `linkOnFailure` primitive works in exactly this way, only terminating the caller if the subject terminates abnormally. Let's take a look... {% highlight haskell %} @@ -366,17 +366,17 @@ process. The `ProcessInfo` type it returns contains the local node id and a list registered names, monitors and links for the process. The call returns `Nothing` if the process in question is not alive. -### Monad Transformer Stacks +### Monad Transformer Stacks -It is not generally necessary, but it may be convenient in your application to use a -custom monad transformer stack with the Process monad at the bottom. For example, +It is not generally necessary, but it may be convenient in your application to use a +custom monad transformer stack with the Process monad at the bottom. For example, you may have decided that in various places in your application you will make calls to a network database. You may create a data access module, and it will need configuration information available to it in -order to connect to the database server. A ReaderT can be a nice way to make +order to connect to the database server. A ReaderT can be a nice way to make configuration data available throughout an application without -schlepping it around by hand. +schlepping it around by hand. -This example is a bit contrived and over-simplified but +This example is a bit contrived and over-simplified but illustrates the concept. Consider the `fetchUser` function below, it runs in the `AppProcess` monad which provides the configuration settings required to connect to the database: @@ -409,7 +409,7 @@ openDB = do closeDB :: DB.Connection -> AppProcess () closeDB db = liftIO (DB.close db) - + {% endhighlight %} So this would mostly work but it is not complete. What happens if an exception @@ -423,17 +423,17 @@ In the base library, [bracket][brkt] is defined in Control.Exception with this s bracket :: IO a --^ computation to run first ("acquire resource") -> (a -> IO b) --^ computation to run last ("release resource") -> (a -> IO c) --^ computation to run in-between - -> IO c + -> IO c {% endhighlight %} Great! We pass an IO action that acquires a resource; `bracket` passes that resource to a function which takes the resource and runs another action. -We also provide a release function which `bracket` is guaranteed to run -even if the primary action raises an exception. +We also provide a release function which `bracket` is guaranteed to run +even if the primary action raises an exception. -Unfortunately, we cannot directly use `bracket` in our +Unfortunately, we cannot directly use `bracket` in our `fetchUser` function: openDB (resource acquisition) runs in the `AppProcess` monad. If our functions ran in IO, we could lift the entire bracket computation into our monad transformer stack with liftIO; but we cannot do that for the computations @@ -473,7 +473,7 @@ onException p what = p `catch` \e -> do _ <- what `distributed-process` needs to do this sort of thing to keep its dependency list small, but do we really want to write this for every transformer stack -we use in our own applications? No! And we do not have to, thanks to +we use in our own applications? No! And we do not have to, thanks to the [monad-control][mctrl] and [lifted-base][lbase] libraries. [monad-control][mctrl] provides several typeclasses and helper functions @@ -489,17 +489,17 @@ bracket that looks like this: {% highlight haskell %} -bracket :: MonadBaseControl IO m +bracket :: MonadBaseControl IO m => m a --^ computation to run first ("acquire resource") -> (a -> m b) --^ computation to run last ("release resource") -> (a -> m c) --^ computation to run in-between - -> m c + -> m c {% endhighlight %} It is just the same as the version found in base, except it is generalized to work with actions in any monad that implements [MonadBaseControl IO][mbc]. [monad-control][mctrl] defines -instances for the standard transformers, but that instance requires the base monad +instances for the standard transformers, but that instance requires the base monad (in this case, `Process`) to also have an instance of these classes. To address this the [distributed-process-monad-control][dpmc] package @@ -520,7 +520,7 @@ fetchUser email = Lifted.bracket openDB closeDB $ \db -> liftIO $ DB.query db email - + {% endhighlight %} diff --git a/tutorials/4ch.md b/tutorials/4ch.md index 83dc0ef..26c92c0 100644 --- a/tutorials/4ch.md +++ b/tutorials/4ch.md @@ -8,9 +8,9 @@ title: 4. Managed Process Tutorial ### Introduction The source code for this tutorial is based on the `BlockingQueue` API -from distributed-process-platform and can be accessed [here][1]. +from distributed-process-task and can be accessed [here][1]. Please note that this tutorial is based on the stable (master) branch -of [distributed-process-platform][3]. +of [distributed-process-task][3]. ### Managed Processes @@ -40,8 +40,8 @@ code to be run on termination/shutdown. {% highlight haskell %} myServer :: ProcessDefinition MyStateType -myServer = - ProcessDefinition { +myServer = + ProcessDefinition { -- handle messages sent to us via the call/cast API functions apiHandlers = [ -- a list of Dispatchers, derived by calling on of the various @@ -113,7 +113,7 @@ that math server that does just that: ---- {% highlight haskell %} -module MathServer +module MathServer ( -- client facing API add -- starting/spawning the server process @@ -241,9 +241,9 @@ more interesting and useful. ### Building a Task Queue This section of the tutorial is based on a real module from the -distributed-process-platform library, called `BlockingQueue`. +distributed-process-task library, called `BlockingQueue`. -Let's imagine we want to execute tasks on an arbitrary node, but want +Let's imagine we want to execute tasks on an arbitrary node, but want the caller to block whilst the remote task is executing. We also want to put an upper bound on the number of concurrent tasks/callers that the server will accept. Let's use `ManagedProcess` to implement a generic @@ -275,7 +275,7 @@ typeclass to allow clients to specify the server's location in whatever manner suits them: The type of a task will be `Closure (Process a)` and the server will explicitly return an /either/ value with `Left String` for errors and `Right a` for successful results. - + {% highlight haskell %} -- enqueues the task in the pool and blocks -- the caller until the task is complete @@ -663,8 +663,8 @@ another to monitor the first and handle failures and/or cancellation. Spawning processes is cheap, but not free as each process is a haskell thread, plus some additional book keeping data. -[1]: https://github.com/haskell-distributed/distributed-process-platform/blob/master/src/Control/Distributed/Process/Platform/Task/Queue/BlockingQueue.hs -[2]: /static/doc/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html#t:ProcessDefinition -[3]: https://github.com/haskell-distributed/distributed-process-platform/tree/master/ -[4]: https://github.com/haskell-distributed/distributed-process-platform/tree/master/src/Control/Distributed/Process/Platform/UnsafePrimitives.hs +[1]: https://github.com/haskell-distributed/distributed-process-task/blob/master/src/Control/Distributed/Process/Task/Queue/BlockingQueue.hs +[2]: https://hackage.haskell.org/package/distributed-process-client-server-0.1.3.2/docs/Control-Distributed-Process-ManagedProcess.html#t:ProcessDefinition +[3]: https://github.com/haskell-distributed/distributed-process-task +[4]: https://github.com/haskell-distributed/distributed-process-extras/blob/master/src/Control/Distributed/Process/Extras/UnsafePrimitives.hs [5]: /documentation.html diff --git a/tutorials/5ch.md b/tutorials/5ch.md index 1a56c3f..1811c8f 100644 --- a/tutorials/5ch.md +++ b/tutorials/5ch.md @@ -56,7 +56,7 @@ triggered the shutdown/terminate sequence for the supervisor's process explicitl When a supervisor is told directly to terminate a child process, it uses the `ChildTerminationPolicy` to determine whether the child should be terminated _gracefully_ or _brutally killed_. This _shutdown protocol_ is used throughout -[distributed-process-platform][dpp] and in order for a child process to be managed +[distributed-process-supervisor][dpp] and in order for a child process to be managed effectively by its supervisor, it is imperative that it understands the protocol. When a _graceful_ shutdown is required, the supervisor will send an exit signal to the child process, with the `ExitReason` set to `ExitShutdown`, whence the child process is @@ -70,7 +70,7 @@ provide a timeout value. The supervisor attempts a _gracefull_ shutdown initiall however if the child does not exit within the given time window, the supervisor will automatically revert to a _brutal kill_ using `TerminateImmediately`. If the timeout value is set to `Infinity`, the supervisor will wait indefintiely for the -child to exit cleanly. +child to exit cleanly. When a supervisor detects a child exit, it will attempt a restart. Whilst explicitly terminating a child will **only** terminate the specified child process, unexpected @@ -142,11 +142,10 @@ order, otherwise the dependent children might crash whilst we're restarting othe rely on. It follows that, in this setup, we cannot subsequently (re)start the children in the same order we stopped them either. -[dpp]: https://github.com/haskell-distributed/distributed-process-platform +[dpp]: https://github.com/haskell-distributed/distributed-process-supervisor [sup1]: /img/one-for-one.png [sup2]: /img/one-for-all.png [sup3]: /img/one-for-all-left-to-right.png [alert]: /img/alert.png [info]: /img/info.png [erlsup]: http://www.erlang.org/doc/man/supervisor.html - diff --git a/tutorials/6ch.md b/tutorials/6ch.md index 1269745..29a32c6 100644 --- a/tutorials/6ch.md +++ b/tutorials/6ch.md @@ -56,7 +56,7 @@ look at this in action, revisiting the well-trodden _math server_ example from our previous tutorials: {% highlight haskell %} -module MathServer +module MathServer ( -- client facing API MathServer() , add @@ -561,20 +561,19 @@ the client using the send ports supplied in the request data. > passes data to them (via the `SendPort`) is bound to exactly the same type(s)! > Furthermore, adding reply channels (in the form of a `SendPort`) to the request > types ensures that the replies will be handled correctly as well! As a result, -> there can be no ambiguity about the types involved for _either_ side of the +> there can be no ambiguity about the types involved for _either_ side of the > client-server relationship and therefore no unhandled messages due to runtime > type mismatches - the compiler will catch that sort of thing for us! ------ -[1]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Service-Registry.html -[2]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Async.html -[3]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Service-SystemLog.html -[mgmt]: http://hackage.haskell.org/package/distributed-process/Control-Distributed-Process-Management.html -[dbg]: http://hackage.haskell.org/package/distributed-process/Control-Distributed-Process-Debug.html -[rtbl]: http://hackage.haskell.org/package/distributed-proces-platforms/Control-Distributed-Process-Platform.html#t:Routable -[rsbl]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform.html#t:Resolvable +[1]: https://hackage.haskell.org/package/distributed-process-registry/docs/Control-Distributed-Process-Registry.html +[2]: https://hackage.haskell.org/package/distributed-process-async/docs/Control-Distributed-Process-Async.html +[3]: https://hackage.haskell.org/package/distributed-process-extras/docs/Control-Distributed-Process-Extras-SystemLog.html +[mgmt]: https://hackage.haskell.org/package/distributed-process/Control-Distributed-Process-Management.html +[dbg]: https://hackage.haskell.org/package/distributed-process/Control-Distributed-Process-Debug.html +[rtbl]: https://hackage.haskell.org/package/distributed-process-extras/docs/Control-Distributed-Process-Extras.html#t:Routable +[rsbl]: https://hackage.haskell.org/package/distributed-process-extras/docs/Control-Distributed-Process-Extras.html#t:Resolvable [alert]: /img/alert.png [info]: /img/info.png -[policy]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-ManagedProcess.html#t:UnhandledMessagePolicy -[mailbox]: http://hackage.haskell.org/package/distributed-process-platform/Control-Distributed-Process-Platform-Execution-Mailbox.html - +[policy]: https://hackage.haskell.org/package/distributed-process-client-server/docs/Control-Distributed-Process-ManagedProcess.html#v:unhandledMessagePolicy +[mailbox]: https://hackage.haskell.org/package/distributed-process-execution-0.1.2.2/docs/Control-Distributed-Process-Execution-Mailbox.html From a5312ac246af5b5ed23338a1fda143baf98496a3 Mon Sep 17 00:00:00 2001 From: Tom Hunger Date: Thu, 9 Feb 2017 11:03:34 +0000 Subject: [PATCH 085/108] Opinionated: Remove changelog completely. It's way out of date. --- _includes/nav.html | 1 - _layouts/changelog.html | 59 ----------------------------- _layouts/changes.html | 53 -------------------------- changelog/dp-0.4.1.md | 23 ------------ changelog/dp-0.4.2.md | 28 -------------- changelog/dp-0.5.0.md | 61 ------------------------------ changelog/dpp-0.1.0.md | 83 ----------------------------------------- changelog/ds-0.3.0.0.md | 17 --------- changelog/nt-0.4.0.0.md | 24 ------------ changelog/rd-0.2.0.0.md | 19 ---------- index.md | 2 - 11 files changed, 370 deletions(-) delete mode 100644 _layouts/changelog.html delete mode 100644 _layouts/changes.html delete mode 100644 changelog/dp-0.4.1.md delete mode 100644 changelog/dp-0.4.2.md delete mode 100644 changelog/dp-0.5.0.md delete mode 100644 changelog/dpp-0.1.0.md delete mode 100644 changelog/ds-0.3.0.0.md delete mode 100644 changelog/nt-0.4.0.0.md delete mode 100644 changelog/rd-0.2.0.0.md diff --git a/_includes/nav.html b/_includes/nav.html index 2fa93b1..4e45e25 100644 --- a/_includes/nav.html +++ b/_includes/nav.html @@ -32,7 +32,6 @@