diff --git a/.github/workflows/network-test.yaml b/.github/workflows/network-test.yaml new file mode 100644 index 00000000000..5c45204b501 --- /dev/null +++ b/.github/workflows/network-test.yaml @@ -0,0 +1,132 @@ +name: "Network fault tolerance" + +on: + pull_request: + workflow_dispatch: + inputs: + debug_enabled: + type: boolean + description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)' + required: false + default: false + +jobs: + network-test: + runs-on: ubuntu-latest + strategy: + matrix: + # Note: At present we can only run for 3 peers; to configure this for + # more we need to make the docker-compose spin-up dynamic across + # however many we would like to configure here. + # Currently this is just a label and does not have any functional impact. + peers: [3] + scaling_factor: [10, 50] + netem_loss: [0, 1, 2, 3, 4, 5, 10, 20] + name: "Peers: ${{ matrix.peers }}, scaling: ${{ matrix.scaling_factor }}, loss: ${{ matrix.netem_loss }}" + steps: + - uses: actions/checkout@v4 + with: + submodules: true + + - name: ❄ Prepare nix + uses: cachix/install-nix-action@V27 + with: + extra_nix_config: | + accept-flake-config = true + log-lines = 1000 + + - name: ❄ Cachix cache of nix derivations + uses: cachix/cachix-action@v15 + with: + name: cardano-scaling + authToken: '${{ secrets.CACHIX_CARDANO_SCALING_AUTH_TOKEN }}' + + - name: Build docker images for netem specifically + run: | + nix build .#docker-hydra-node-for-netem + ./result | docker load + + - name: Setup containers for network testing + run: | + cd demo + ./prepare-devnet.sh + docker compose up -d cardano-node + sleep 5 + # :tear: socket permissions. + sudo chown runner:docker devnet/node.socket + ./export-tx-id-and-pparams.sh + # Specify two docker compose yamls; the second one overrides the + # images to use the netem ones specifically + docker compose -f docker-compose.yaml -f docker-compose-netem.yaml up -d hydra-node-{1,2,3} + sleep 3 + docker ps + + - name: Build required nix and docker derivations + run: | + nix build .#legacyPackages.x86_64-linux.hydra-cluster.components.benchmarks.bench-e2e + nix build github:noonio/pumba/noon/add-flake + + # Use tmate to get a shell onto the runner to do some temporary hacking + # + # + # + - name: Setup tmate session + uses: mxschmitt/action-tmate@v3 + if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled }} + with: + limit-access-to-actor: true + + - name: Run pumba and the benchmarks + # Note: We're going to allow everything to fail. In the job on GitHub, + # we will be able to see which ones _did_, in fact, fail. Originally, + # we were keeping track of our expectations with 'include' and + # 'exclude' directives here, but I think it's best to leave those out, + # as some of the tests (say 5%) fail, and overall the conditions of + # failure depend on the scaling factor, the peers, etc, and it becomes + # too complicated to track here. + continue-on-error: true + run: | + # Extract inputs with defaults for non-workflow_dispatch events + percent="${{ matrix.netem_loss }}" + scaling_factor="${{ matrix.scaling_factor }}" + target_peer="hydra-node-1" + other_peers="172.16.238.20 172.16.238.30" + + .github/workflows/network/run_pumba.sh $target_peer $percent $other_peers + + # Run benchmark on demo + mkdir benchmarks + touch benchmarks/test.log + + nix run .#legacyPackages.x86_64-linux.hydra-cluster.components.benchmarks.bench-e2e -- \ + demo \ + --output-directory=benchmarks \ + --scaling-factor="$scaling_factor" \ + --timeout=1000s \ + --testnet-magic 42 \ + --node-socket=demo/devnet/node.socket \ + --hydra-client=localhost:4001 \ + --hydra-client=localhost:4002 \ + --hydra-client=localhost:4003 + + - name: Acquire logs + if: always() + run: | + cd demo + docker compose logs > docker-logs + + - name: 💾 Upload logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: "docker-logs-netem-loss=${{ matrix.netem_loss }},scaling_factor=${{ matrix.scaling_factor }},peers=${{ matrix.peers }}" + path: demo/docker-logs + if-no-files-found: ignore + + - name: 💾 Upload build & test artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: "benchmarks-netem-loss=${{ matrix.netem_loss }},scaling_factor=${{ matrix.scaling_factor }},peers=${{ matrix.peers }}" + path: benchmarks + if-no-files-found: ignore diff --git a/.github/workflows/network/run_pumba.sh b/.github/workflows/network/run_pumba.sh new file mode 100755 index 00000000000..06ee7549aa0 --- /dev/null +++ b/.github/workflows/network/run_pumba.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +target_node_name=$1 + +percent=$2 + +rest_node_names=$3 + +# Build Pumba netem command +# Note: We leave it for 20 minutes; but really it's effectively unlimited. We don't +# expect any of our tests to run longer than that. +nix_command="nix run github:noonio/pumba/noon/add-flake -- -l debug netem --duration 20m" + +while IFS= read -r network; do + nix_command+=" --target $network" +done <<< "$rest_node_names" + +nix_command+=" loss --percent \"$percent\" \"re2:$target_node_name\" &" + +echo "$nix_command" + +# Run Pumba netem command +eval "$nix_command" diff --git a/demo/.gitignore b/demo/.gitignore new file mode 100644 index 00000000000..66a99916b32 --- /dev/null +++ b/demo/.gitignore @@ -0,0 +1,2 @@ +/benchmarks +/datasets diff --git a/demo/docker-compose-netem.yaml b/demo/docker-compose-netem.yaml new file mode 100644 index 00000000000..dcf1f850572 --- /dev/null +++ b/demo/docker-compose-netem.yaml @@ -0,0 +1,9 @@ +services: + hydra-node-1: + image: hydra-node-for-netem + + hydra-node-2: + image: hydra-node-for-netem + + hydra-node-3: + image: hydra-node-for-netem diff --git a/demo/docker-compose.yaml b/demo/docker-compose.yaml index 139ff66767d..eb8da146d96 100644 --- a/demo/docker-compose.yaml +++ b/demo/docker-compose.yaml @@ -48,6 +48,8 @@ services: , "--ledger-protocol-parameters", "/devnet/protocol-parameters.json" , "--testnet-magic", "42" , "--node-socket", "/devnet/node.socket" + , "--persistence-dir", "/devnet/persistence/alice" + , "--contestation-period", "3" ] networks: hydra_net: @@ -83,6 +85,8 @@ services: , "--ledger-protocol-parameters", "/devnet/protocol-parameters.json" , "--testnet-magic", "42" , "--node-socket", "/devnet/node.socket" + , "--persistence-dir", "/devnet/persistence/bob" + , "--contestation-period", "3" ] networks: hydra_net: @@ -118,6 +122,8 @@ services: , "--ledger-protocol-parameters", "/devnet/protocol-parameters.json" , "--testnet-magic", "42" , "--node-socket", "/devnet/node.socket" + , "--persistence-dir", "/devnet/persistence/carol" + , "--contestation-period", "3" ] networks: hydra_net: @@ -188,7 +194,6 @@ services: hydra_net: ipv4_address: 172.16.238.5 - networks: hydra_net: driver: bridge diff --git a/demo/export-tx-id-and-pparams.sh b/demo/export-tx-id-and-pparams.sh new file mode 100755 index 00000000000..37707ecabbd --- /dev/null +++ b/demo/export-tx-id-and-pparams.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +set -eo pipefail + +SCRIPT_DIR=${SCRIPT_DIR:-$(realpath $(dirname $(realpath $0)))} +NETWORK_ID=42 + +CCLI_CMD= +DEVNET_DIR=/devnet +if [[ -n ${1} ]]; then + echo >&2 "Using provided cardano-cli command: ${1}" + $(${1} version > /dev/null) + CCLI_CMD=${1} + DEVNET_DIR=${SCRIPT_DIR}/devnet +fi + +HYDRA_NODE_CMD= +if [[ -n ${2} ]]; then + echo >&2 "Using provided hydra-node command: ${2}" + ${2} --version > /dev/null + HYDRA_NODE_CMD=${2} +fi + +# Invoke hydra-node in a container or via provided executable +function hnode() { + if [[ -n ${HYDRA_NODE_CMD} ]]; then + ${HYDRA_NODE_CMD} ${@} + else + docker run --rm \ + --pull always \ + -v ${SCRIPT_DIR}/devnet:/devnet \ + ghcr.io/cardano-scaling/hydra-node:0.18.1 -- ${@} + fi +} + +function publishReferenceScripts() { + echo >&2 "Publishing reference scripts..." + hnode publish-scripts \ + --testnet-magic ${NETWORK_ID} \ + --node-socket ${DEVNET_DIR}/node.socket \ + --cardano-signing-key devnet/credentials/faucet.sk +} + +# Invoke cardano-cli in running cardano-node container or via provided cardano-cli +function ccli() { + ccli_ ${@} --testnet-magic ${NETWORK_ID} +} +function ccli_() { + if [[ -x ${CCLI_CMD} ]]; then + ${CCLI_CMD} ${@} + else + ${DOCKER_COMPOSE_CMD} exec cardano-node cardano-cli ${@} + fi +} + +function queryPParams() { + echo >&2 "Query Protocol parameters" + if [[ -x ${CCLI_CMD} ]]; then + ccli query protocol-parameters --socket-path ${DEVNET_DIR}/node.socket --out-file /dev/stdout \ + | jq ".txFeeFixed = 0 | .txFeePerByte = 0 | .executionUnitPrices.priceMemory = 0 | .executionUnitPrices.priceSteps = 0" > devnet/protocol-parameters.json + else + docker exec demo-cardano-node-1 cardano-cli query protocol-parameters --testnet-magic ${NETWORK_ID} --socket-path ${DEVNET_DIR}/node.socket --out-file /dev/stdout \ + | jq ".txFeeFixed = 0 | .txFeePerByte = 0 | .executionUnitPrices.priceMemory = 0 | .executionUnitPrices.priceSteps = 0" > devnet/protocol-parameters.json + fi + echo >&2 "Saved in protocol-parameters.json" +} + +queryPParams +echo "HYDRA_SCRIPTS_TX_ID=$(publishReferenceScripts)" > .env +echo >&2 "Environment variable stored in '.env'" +echo >&2 -e "\n\t$(cat .env)\n" diff --git a/demo/seed-devnet.sh b/demo/seed-devnet.sh index 811813de238..a7a9c32c024 100755 --- a/demo/seed-devnet.sh +++ b/demo/seed-devnet.sh @@ -43,18 +43,6 @@ function ccli_() { fi } -# Invoke hydra-node in a container or via provided executable -function hnode() { - if [[ -n ${HYDRA_NODE_CMD} ]]; then - ${HYDRA_NODE_CMD} ${@} - else - docker run --rm -it \ - --pull always \ - -v ${SCRIPT_DIR}/devnet:/devnet \ - ghcr.io/cardano-scaling/hydra-node:0.18.1 -- ${@} - fi -} - # Retrieve some lovelace from faucet function seedFaucet() { ACTOR=${1} @@ -89,26 +77,6 @@ function seedFaucet() { echo >&2 "Done" } -function publishReferenceScripts() { - echo >&2 "Publishing reference scripts..." - hnode publish-scripts \ - --testnet-magic ${NETWORK_ID} \ - --node-socket ${DEVNET_DIR}/node.socket \ - --cardano-signing-key devnet/credentials/faucet.sk -} - -function queryPParams() { - echo >&2 "Query Protocol parameters" - if [[ -x ${CCLI_CMD} ]]; then - ccli query protocol-parameters --socket-path ${DEVNET_DIR}/node.socket --out-file /dev/stdout \ - | jq ".txFeeFixed = 0 | .txFeePerByte = 0 | .executionUnitPrices.priceMemory = 0 | .executionUnitPrices.priceSteps = 0" > devnet/protocol-parameters.json - else - docker exec demo-cardano-node-1 cardano-cli query protocol-parameters --testnet-magic ${NETWORK_ID} --socket-path ${DEVNET_DIR}/node.socket --out-file /dev/stdout \ - | jq ".txFeeFixed = 0 | .txFeePerByte = 0 | .executionUnitPrices.priceMemory = 0 | .executionUnitPrices.priceSteps = 0" > devnet/protocol-parameters.json - fi - echo >&2 "Saved in protocol-parameters.json" -} - echo >&2 "Fueling up hydra nodes of alice, bob and carol..." seedFaucet "alice" 30000000 # 30 Ada to the node seedFaucet "bob" 30000000 # 30 Ada to the node @@ -117,7 +85,5 @@ echo >&2 "Distributing funds to alice, bob and carol..." seedFaucet "alice-funds" 100000000 # 100 Ada to commit seedFaucet "bob-funds" 50000000 # 50 Ada to commit seedFaucet "carol-funds" 25000000 # 25 Ada to commit -queryPParams -echo "HYDRA_SCRIPTS_TX_ID=$(publishReferenceScripts)" > .env -echo >&2 "Environment variable stored in '.env'" -echo >&2 -e "\n\t$(cat .env)\n" + +./export-tx-id-and-pparams.sh diff --git a/hydra-cluster/README.md b/hydra-cluster/README.md index b5433128703..a64fb02e032 100644 --- a/hydra-cluster/README.md +++ b/hydra-cluster/README.md @@ -140,3 +140,14 @@ The benchmark can be run in two modes corresponding to two different commands: * `datasets`: Runs one or more preexisting _datasets_ in sequence and collect their results in a single markdown formatted file. This is useful to track the evolution of hydra-node's performance over some well-known datasets over time and produce a human-readable summary. Check out `cabal bench --benchmark-options --help` for more details. + +# Network Testing + +The benchmark can be also run over the running `demo` hydra-cluster, using `cabal bench` and produces a +`results.csv` file in a work directory. Same as for benchmarks results, you can use the `bench/plot.sh` script to plot the transaction confirmation times. + +To run the benchmark in this mode, the command is: +* `demo`: Runs a single _dataset_ freshly generated and collects its results in a markdown formatted file. The purpose of this setup is to facilitate a variaty of network-resiliance scenarios, such as packet loss or node failures. This is useful to prove the robustness and performance of the hydra-node's network over time and produce a human-readable summary. + +For instance, we make use of this in our [CI](https://github.com/cardano-scaling/hydra/blob/master/.github/workflows/network-test.yaml) to keep track for scenarios that we care about. + diff --git a/hydra-cluster/bench/Bench/EndToEnd.hs b/hydra-cluster/bench/Bench/EndToEnd.hs index 3fc2cd2fd31..d9a77ee7f18 100644 --- a/hydra-cluster/bench/Bench/EndToEnd.hs +++ b/hydra-cluster/bench/Bench/EndToEnd.hs @@ -7,7 +7,7 @@ import Test.Hydra.Prelude import Bench.Summary (Summary (..), makeQuantiles) import CardanoClient (RunningNode (..), awaitTransaction, submitTransaction, submitTx) -import CardanoNode (withCardanoNodeDevnet) +import CardanoNode (findRunningCardanoNode', withCardanoNodeDevnet) import Control.Concurrent.Class.MonadSTM ( MonadSTM (readTVarIO), check, @@ -22,15 +22,16 @@ import Control.Lens (to, (^?)) import Control.Monad.Class.MonadAsync (mapConcurrently) import Data.Aeson (Result (Error, Success), Value, encode, fromJSON, (.=)) import Data.Aeson.Lens (key, _Array, _JSON, _Number, _String) +import Data.Aeson.Types (parseMaybe) import Data.List qualified as List import Data.Map qualified as Map import Data.Scientific (Scientific) import Data.Set ((\\)) import Data.Set qualified as Set import Data.Time (UTCTime (UTCTime), utctDayTime) -import Hydra.Cardano.Api (Tx, TxId, UTxO, getVerificationKey, signTx) -import Hydra.Cluster.Faucet (FaucetLog, publishHydraScriptsAs, seedFromFaucet) -import Hydra.Cluster.Fixture (Actor (Faucet)) +import Hydra.Cardano.Api (NetworkId, SocketPath, Tx, TxId, UTxO, getVerificationKey, signTx) +import Hydra.Cluster.Faucet (FaucetLog (..), publishHydraScriptsAs, returnFundsToFaucet', seedFromFaucet) +import Hydra.Cluster.Fixture (Actor (..)) import Hydra.Cluster.Scenarios ( EndToEndLog (..), headIsInitializingWith, @@ -38,22 +39,12 @@ import Hydra.Cluster.Scenarios ( import Hydra.ContestationPeriod (ContestationPeriod (UnsafeContestationPeriod)) import Hydra.Crypto (generateSigningKey) import Hydra.Generator (ClientDataset (..), ClientKeys (..), Dataset (..)) +import Hydra.HeadId (HeadId) import Hydra.Ledger (txId) -import Hydra.Logging (Tracer, withTracerOutputTo) -import Hydra.Party (deriveParty) -import HydraNode ( - HydraClient, - hydraNodeId, - input, - output, - requestCommitTx, - send, - waitFor, - waitForAllMatch, - waitForNodesConnected, - waitMatch, - withHydraCluster, - ) +import Hydra.Logging (Tracer, traceWith, withTracerOutputTo) +import Hydra.Network (Host) +import Hydra.Party (Party, deriveParty) +import HydraNode (HydraClient, HydraNodeLog, hydraNodeId, input, output, requestCommitTx, send, waitFor, waitForAllMatch, waitForNodesConnected, waitMatch, withConnectionToNodeHost, withHydraCluster) import System.Directory (findExecutable) import System.FilePath (()) import System.IO (hGetLine, hPutStrLn) @@ -63,6 +54,7 @@ import System.Process ( proc, withCreateProcess, ) +import Test.HUnit.Lang (formatFailureReason) import Text.Printf (printf) import Text.Regex.TDFA (getAllTextMatches, (=~)) import Prelude (read) @@ -77,7 +69,7 @@ data Event = Event deriving anyclass (ToJSON) bench :: Int -> NominalDiffTime -> FilePath -> Dataset -> IO Summary -bench startingNodeId timeoutSeconds workDir dataset@Dataset{clientDatasets, title, description} = do +bench startingNodeId timeoutSeconds workDir dataset@Dataset{clientDatasets} = do putStrLn $ "Test logs available in: " <> (workDir "test.log") withFile (workDir "test.log") ReadWriteMode $ \hdl -> withTracerOutputTo hdl "Test" $ \tracer -> @@ -86,66 +78,144 @@ bench startingNodeId timeoutSeconds workDir dataset@Dataset{clientDatasets, titl let cardanoKeys = map (\ClientDataset{clientKeys = ClientKeys{signingKey}} -> (getVerificationKey signingKey, signingKey)) clientDatasets let hydraKeys = generateSigningKey . show <$> [1 .. toInteger (length cardanoKeys)] let parties = Set.fromList (deriveParty <$> hydraKeys) - let clusterSize = fromIntegral $ length clientDatasets withOSStats workDir $ withCardanoNodeDevnet (contramap FromCardanoNode tracer) workDir $ \node@RunningNode{nodeSocket} -> do putTextLn "Seeding network" + seedNetwork node dataset (contramap FromFaucet tracer) + putTextLn "Publishing hydra scripts" + hydraScriptsTxId <- publishHydraScriptsAs node Faucet + putStrLn $ "Starting hydra cluster in " <> workDir let hydraTracer = contramap FromHydraNode tracer - hydraScriptsTxId <- seedNetwork node dataset (contramap FromFaucet tracer) let contestationPeriod = UnsafeContestationPeriod 10 - putStrLn $ "Starting hydra cluster in " <> workDir withHydraCluster hydraTracer workDir nodeSocket startingNodeId cardanoKeys hydraKeys hydraScriptsTxId contestationPeriod $ \(leader :| followers) -> do let clients = leader : followers waitForNodesConnected hydraTracer 20 clients - - putTextLn "Initializing Head" - send leader $ input "Init" [] - headId <- - waitForAllMatch (fromIntegral $ 10 * clusterSize) clients $ - headIsInitializingWith parties - - putTextLn "Comitting initialUTxO from dataset" - expectedUTxO <- commitUTxO node clients dataset - - waitFor hydraTracer (fromIntegral $ 10 * clusterSize) clients $ - output "HeadIsOpen" ["utxo" .= expectedUTxO, "headId" .= headId] - - putTextLn "HeadIsOpen" - processedTransactions <- processTransactions clients dataset - - putTextLn "Closing the Head" - send leader $ input "Close" [] - - deadline <- waitMatch 300 leader $ \v -> do - guard $ v ^? key "tag" == Just "HeadIsClosed" - guard $ v ^? key "headId" == Just (toJSON headId) - v ^? key "contestationDeadline" . _JSON - - -- Expect to see ReadyToFanout within 3 seconds after deadline - remainingTime <- diffUTCTime deadline <$> getCurrentTime - waitFor hydraTracer (remainingTime + 3) [leader] $ - output "ReadyToFanout" ["headId" .= headId] - - putTextLn "Finalizing the Head" - send leader $ input "Fanout" [] - waitMatch 100 leader $ \v -> do - guard (v ^? key "tag" == Just "HeadIsFinalized") - guard $ v ^? key "headId" == Just (toJSON headId) - - let res = mapMaybe analyze . Map.toList $ processedTransactions - aggregates = movingAverage res - - writeResultsCsv (workDir "results.csv") aggregates - - let confTimes = map (\(_, _, a) -> a) res - numberOfTxs = length confTimes - numberOfInvalidTxs = length $ Map.filter (isJust . invalidAt) processedTransactions - averageConfirmationTime = sum confTimes / fromIntegral numberOfTxs - quantiles = makeQuantiles confTimes - summaryTitle = fromMaybe "Baseline Scenario" title - summaryDescription = fromMaybe defaultDescription description - - pure $ Summary{clusterSize, numberOfTxs, averageConfirmationTime, quantiles, summaryTitle, summaryDescription, numberOfInvalidTxs} + scenario hydraTracer node workDir dataset parties leader followers + +benchDemo :: + NetworkId -> + SocketPath -> + NominalDiffTime -> + [Host] -> + FilePath -> + Dataset -> + IO Summary +benchDemo networkId nodeSocket timeoutSeconds hydraClients workDir dataset@Dataset{clientDatasets} = do + putStrLn $ "Test logs available in: " <> (workDir "test.log") + withFile (workDir "test.log") ReadWriteMode $ \hdl -> + withTracerOutputTo hdl "Test" $ \tracer -> + failAfter timeoutSeconds $ do + putTextLn "Starting benchmark" + let cardanoTracer = contramap FromCardanoNode tracer + findRunningCardanoNode' cardanoTracer networkId nodeSocket >>= \case + Nothing -> + error ("Not found running node at socket: " <> show nodeSocket <> ", and network: " <> show networkId) + Just node -> do + putTextLn "Seeding network" + seedNetwork node dataset (contramap FromFaucet tracer) + let clientSks = clientKeys <$> clientDatasets + (`finally` returnFaucetFunds tracer node clientSks) $ do + putStrLn $ "Connecting to hydra cluster: " <> show hydraClients + let hydraTracer = contramap FromHydraNode tracer + withHydraClientConnections hydraTracer (hydraClients `zip` [1 ..]) [] $ \case + [] -> error "no hydra clients provided" + (leader : followers) -> + scenario hydraTracer node workDir dataset mempty leader followers + where + withHydraClientConnections tracer apiHosts connections action = do + case apiHosts of + [] -> action connections + ((apiHost, peerId) : rest) -> do + withConnectionToNodeHost tracer peerId apiHost (Just "/?history=no") $ \con -> do + withHydraClientConnections tracer rest (con : connections) action + + returnFaucetFunds tracer node cKeys = do + putTextLn "Returning funds to faucet" + let faucetTracer = contramap FromFaucet tracer + let senders = concatMap @[] (\(ClientKeys sk esk) -> [sk, esk]) cKeys + mapM_ + ( \sender -> do + returnAmount <- returnFundsToFaucet' faucetTracer node sender + traceWith faucetTracer $ ReturnedFunds{actor = show sender, returnAmount} + ) + senders + +scenario :: + Tracer IO HydraNodeLog -> + RunningNode -> + FilePath -> + Dataset -> + Set Party -> + HydraClient -> + [HydraClient] -> + IO Summary +scenario hydraTracer node workDir Dataset{clientDatasets, title, description} parties leader followers = do + let clusterSize = fromIntegral $ length clientDatasets + let clients = leader : followers + let totalTxs = sum $ map (length . txSequence) clientDatasets + + putTextLn "Initializing Head" + send leader $ input "Init" [] + headId <- + waitForAllMatch (fromIntegral $ 10 * clusterSize) clients $ \v -> + headIsInitializingWith parties v + <|> do + guard $ v ^? key "tag" == Just "HeadIsInitializing" + headId <- v ^? key "headId" + parseMaybe parseJSON headId :: Maybe HeadId + + putTextLn "Comitting initialUTxO from dataset" + expectedUTxO <- commitUTxO node clients clientDatasets + + waitFor hydraTracer (fromIntegral $ 10 * clusterSize) clients $ + output "HeadIsOpen" ["utxo" .= expectedUTxO, "headId" .= headId] + + putTextLn "HeadIsOpen" + processedTransactions <- processTransactions clients clientDatasets + + putTextLn "Closing the Head" + send leader $ input "Close" [] + + deadline <- waitMatch 300 leader $ \v -> do + guard $ v ^? key "tag" == Just "HeadIsClosed" + guard $ v ^? key "headId" == Just (toJSON headId) + v ^? key "contestationDeadline" . _JSON + + -- Expect to see ReadyToFanout within 3 seconds after deadline + remainingTime <- diffUTCTime deadline <$> getCurrentTime + waitFor hydraTracer (remainingTime + 3) [leader] $ + output "ReadyToFanout" ["headId" .= headId] + + putTextLn "Finalizing the Head" + send leader $ input "Fanout" [] + waitMatch 100 leader $ \v -> do + guard (v ^? key "tag" == Just "HeadIsFinalized") + guard $ v ^? key "headId" == Just (toJSON headId) + + let res = mapMaybe analyze . Map.toList $ processedTransactions + aggregates = movingAverage res + + writeResultsCsv (workDir "results.csv") aggregates + + let confTimes = map (\(_, _, a) -> a) res + numberOfTxs = length confTimes + numberOfInvalidTxs = length $ Map.filter (isJust . invalidAt) processedTransactions + averageConfirmationTime = sum confTimes / fromIntegral numberOfTxs + quantiles = makeQuantiles confTimes + summaryTitle = fromMaybe "Baseline Scenario" title + summaryDescription = fromMaybe defaultDescription description + + pure $ + Summary + { clusterSize + , totalTxs + , numberOfTxs + , averageConfirmationTime + , quantiles + , summaryTitle + , summaryDescription + , numberOfInvalidTxs + } defaultDescription :: Text defaultDescription = "" @@ -234,29 +304,26 @@ movingAverage confirmations = in map average fiveSeconds -- | Distribute 100 ADA fuel, starting funds from faucet for each client in the --- dataset, and also publish the hydra scripts. The 'TxId' of the publishing --- transaction is returned. -seedNetwork :: RunningNode -> Dataset -> Tracer IO FaucetLog -> IO TxId +-- dataset. +seedNetwork :: RunningNode -> Dataset -> Tracer IO FaucetLog -> IO () seedNetwork node@RunningNode{nodeSocket, networkId} Dataset{fundingTransaction, clientDatasets} tracer = do fundClients - forM_ clientDatasets fuelWith100Ada - putTextLn "Publishing hydra scripts" - publishHydraScriptsAs node Faucet + forM_ (clientKeys <$> clientDatasets) fuelWith100Ada where fundClients = do putTextLn "Fund scenario from faucet" submitTransaction networkId nodeSocket fundingTransaction void $ awaitTransaction networkId nodeSocket fundingTransaction - fuelWith100Ada ClientDataset{clientKeys = ClientKeys{signingKey}} = do + fuelWith100Ada ClientKeys{signingKey} = do let vk = getVerificationKey signingKey putTextLn $ "Seed client " <> show vk seedFromFaucet node vk 100_000_000 tracer -- | Commit all (expected to exit) 'initialUTxO' from the dataset using the -- (asumed same sequence) of clients. -commitUTxO :: RunningNode -> [HydraClient] -> Dataset -> IO UTxO -commitUTxO node clients Dataset{clientDatasets} = +commitUTxO :: RunningNode -> [HydraClient] -> [ClientDataset] -> IO UTxO +commitUTxO node clients clientDatasets = mconcat <$> forM (zip clients clientDatasets) doCommit where doCommit (client, ClientDataset{initialUTxO, clientKeys = ClientKeys{externalSigningKey}}) = do @@ -265,19 +332,26 @@ commitUTxO node clients Dataset{clientDatasets} = >>= submitTx node pure initialUTxO -processTransactions :: [HydraClient] -> Dataset -> IO (Map.Map TxId Event) -processTransactions clients Dataset{clientDatasets} = do +processTransactions :: [HydraClient] -> [ClientDataset] -> IO (Map.Map TxId Event) +processTransactions clients clientDatasets = do let processors = zip (zip clientDatasets (cycle clients)) [1 ..] mconcat <$> mapConcurrently (uncurry clientProcessDataset) processors where + formatLocation = maybe "" (\loc -> "at " <> prettySrcLoc loc) + clientProcessDataset (ClientDataset{txSequence}, client) clientId = do let numberOfTxs = length txSequence submissionQ <- newTBQueueIO (fromIntegral numberOfTxs) registry <- newRegistry atomically $ forM_ txSequence $ writeTBQueue submissionQ - submitTxs client registry submissionQ - `concurrently_` waitForAllConfirmations client registry (Set.fromList $ map txId txSequence) - `concurrently_` progressReport (hydraNodeId client) clientId numberOfTxs submissionQ + ( submitTxs client registry submissionQ + `concurrently_` waitForAllConfirmations client registry (Set.fromList $ map txId txSequence) + `concurrently_` progressReport (hydraNodeId client) clientId numberOfTxs submissionQ + ) + `catch` \(HUnitFailure sourceLocation reason) -> + putStrLn ("Something went wrong while waiting for all confirmations: " <> formatLocation sourceLocation <> ": " <> formatFailureReason reason) + `catch` \(ex :: SomeException) -> + putStrLn ("Something went wrong while waiting for all confirmations: " <> show ex) readTVarIO (processedTxs registry) progressReport :: Int -> Int -> Int -> TBQueue IO Tx -> IO () diff --git a/hydra-cluster/bench/Bench/Options.hs b/hydra-cluster/bench/Bench/Options.hs index d5d84e3688c..ee5eb7011ee 100644 --- a/hydra-cluster/bench/Bench/Options.hs +++ b/hydra-cluster/bench/Bench/Options.hs @@ -2,27 +2,11 @@ module Bench.Options where import Hydra.Prelude -import Options.Applicative ( - Parser, - ParserInfo, - auto, - command, - fullDesc, - header, - help, - helpDoc, - helper, - hsubparser, - info, - long, - metavar, - option, - progDesc, - short, - str, - strOption, - value, - ) +import Hydra.Cardano.Api (NetworkId, SocketPath) +import Hydra.Chain (maximumNumberOfParties) +import Hydra.Network (Host, readHost) +import Hydra.Options (networkIdParser, nodeSocketParser) +import Options.Applicative (Parser, ParserInfo, auto, command, fullDesc, header, help, helpDoc, helper, hsubparser, info, long, maybeReader, metavar, option, progDesc, short, str, strOption, value) import Options.Applicative.Builder (argument) import Options.Applicative.Help (Doc, align, fillSep, line, (<+>)) @@ -41,6 +25,14 @@ data Options , timeoutSeconds :: NominalDiffTime , startingNodeId :: Int } + | DemoOptions + { outputDirectory :: Maybe FilePath + , scalingFactor :: Int + , timeoutSeconds :: NominalDiffTime + , networkId :: NetworkId + , nodeSocket :: SocketPath + , hydraClients :: [Host] + } benchOptionsParser :: ParserInfo Options benchOptionsParser = @@ -48,6 +40,7 @@ benchOptionsParser = ( hsubparser ( command "single" standaloneOptionsInfo <> command "datasets" datasetOptionsInfo + <> command "demo" demoOptionsInfo ) <**> helper ) @@ -156,6 +149,39 @@ startingNodeIdParser = \ benchmark conflicts with default ports allocation scheme (default: 0)" ) +demoOptionsInfo :: ParserInfo Options +demoOptionsInfo = + info + demoOptionsParser + ( progDesc + "Run bench scenario over local demo. \ + \ This requires having in the background: \ + \ * cardano node running on specified node-socket. \ + \ * hydra nodes listening on specified hosts." + ) + +demoOptionsParser :: Parser Options +demoOptionsParser = + DemoOptions + <$> optional outputDirectoryParser + <*> scalingFactorParser + <*> timeoutParser + <*> networkIdParser + <*> nodeSocketParser + <*> many hydraClientsParser + +hydraClientsParser :: Parser Host +hydraClientsParser = + option (maybeReader readHost) $ + long "hydra-client" + <> help + ( "A hydra node api address to connect to. This is using the form :, \ + \where can be an IP address, or a host name. Can be \ + \provided multiple times, once for each participant (current maximum limit is " + <> show maximumNumberOfParties + <> " )." + ) + datasetOptionsInfo :: ParserInfo Options datasetOptionsInfo = info diff --git a/hydra-cluster/bench/Bench/Summary.hs b/hydra-cluster/bench/Bench/Summary.hs index 28dab93f629..f17700440e4 100644 --- a/hydra-cluster/bench/Bench/Summary.hs +++ b/hydra-cluster/bench/Bench/Summary.hs @@ -6,15 +6,21 @@ module Bench.Summary where import Hydra.Prelude import Data.Fixed (Nano) +import Data.Text (pack) import Data.Time (nominalDiffTimeToSeconds) import Data.Vector (Vector, (!)) +import Hydra.Generator (ClientDataset (..), Dataset (..)) import Statistics.Quantile (def) import Statistics.Quantile qualified as Statistics +import Test.HUnit.Lang (formatFailureReason) +import Test.Hydra.Prelude (HUnitFailure (..)) +import Text.Printf (printf) type Percent = Double data Summary = Summary { clusterSize :: Word64 + , totalTxs :: Int , numberOfTxs :: Int , numberOfInvalidTxs :: Int , averageConfirmationTime :: NominalDiffTime @@ -25,19 +31,37 @@ data Summary = Summary deriving stock (Generic, Eq, Show) deriving anyclass (ToJSON) +errorSummary :: Dataset -> HUnitFailure -> Summary +errorSummary Dataset{title, clientDatasets} (HUnitFailure sourceLocation reason) = + Summary + { clusterSize = fromIntegral $ length clientDatasets + , totalTxs = length $ foldMap (\ClientDataset{txSequence} -> txSequence) clientDatasets + , numberOfTxs = 0 + , numberOfInvalidTxs = 0 + , averageConfirmationTime = 0 + , summaryTitle = maybe "Error Summary" ("Error Summary " <>) title + , summaryDescription = + pack $ "Benchmark failed " <> formatLocation sourceLocation <> ": " <> formatFailureReason reason + , quantiles = mempty + } + where + formatLocation = maybe "" (\loc -> "at " <> prettySrcLoc loc) + makeQuantiles :: [NominalDiffTime] -> Vector Double makeQuantiles times = Statistics.quantilesVec def (fromList [0 .. 99]) 100 (fromList $ map (fromRational . (* 1000) . toRational . nominalDiffTimeToSeconds) times) textReport :: Summary -> [Text] -textReport Summary{numberOfTxs, averageConfirmationTime, quantiles, numberOfInvalidTxs} = - [ "Confirmed txs: " <> show numberOfTxs - , "Average confirmation time (ms): " <> show (nominalDiffTimeToMilliseconds averageConfirmationTime) - , "P99: " <> show (quantiles ! 99) <> "ms" - , "P95: " <> show (quantiles ! 95) <> "ms" - , "P50: " <> show (quantiles ! 50) <> "ms" - , "Invalid txs: " <> show numberOfInvalidTxs - ] +textReport Summary{totalTxs, numberOfTxs, averageConfirmationTime, quantiles, numberOfInvalidTxs} = + let frac :: Double + frac = 100 * fromIntegral numberOfTxs / fromIntegral totalTxs + in [ pack $ printf "Confirmed txs/Total expected txs: %d/%d (%.2f %%)" numberOfTxs totalTxs frac + , "Average confirmation time (ms): " <> show (nominalDiffTimeToMilliseconds averageConfirmationTime) + , "P99: " <> show (quantiles ! 99) <> "ms" + , "P95: " <> show (quantiles ! 95) <> "ms" + , "P50: " <> show (quantiles ! 50) <> "ms" + , "Invalid txs: " <> show numberOfInvalidTxs + ] markdownReport :: UTCTime -> [Summary] -> [Text] markdownReport now summaries = diff --git a/hydra-cluster/bench/Main.hs b/hydra-cluster/bench/Main.hs index e9a10ac60bc..f0930c9ad9a 100644 --- a/hydra-cluster/bench/Main.hs +++ b/hydra-cluster/bench/Main.hs @@ -5,20 +5,23 @@ module Main where import Hydra.Prelude import Test.Hydra.Prelude -import Bench.EndToEnd (bench) +import Bench.EndToEnd (bench, benchDemo) import Bench.Options (Options (..), benchOptionsParser) -import Bench.Summary (Summary (..), markdownReport, textReport) +import Bench.Summary (Summary (..), errorSummary, markdownReport, textReport) import Data.Aeson (eitherDecodeFileStrict', encodeFile) -import Hydra.Generator (Dataset (..), generateConstantUTxODataset) +import Hydra.Cluster.Fixture (Actor (..)) +import Hydra.Cluster.Util (keysFor) +import Hydra.Generator (ClientKeys (..), Dataset (..), generateConstantUTxODataset, generateDemoUTxODataset) import Options.Applicative (execParser) import System.Directory (createDirectoryIfMissing, doesDirectoryExist) import System.Environment (withArgs) -import System.FilePath (takeFileName, ()) +import System.FilePath (takeDirectory, takeFileName, ()) import Test.HUnit.Lang (formatFailureReason) import Test.QuickCheck (generate, getSize, scale) main :: IO () -main = +main = do + hSetBuffering stdout LineBuffering execParser benchOptionsParser >>= \case StandaloneOptions{workDirectory = Just workDir, outputDirectory, timeoutSeconds, startingNodeId, scalingFactor, clusterSize} -> do -- XXX: This option is a bit weird as it allows to re-run a test by @@ -32,38 +35,73 @@ main = workDir <- createSystemTempDirectory "bench" play outputDirectory timeoutSeconds scalingFactor clusterSize startingNodeId workDir DatasetOptions{datasetFiles, outputDirectory, timeoutSeconds, startingNodeId} -> do - run outputDirectory timeoutSeconds startingNodeId datasetFiles + let action = bench startingNodeId timeoutSeconds + run outputDirectory datasetFiles action + DemoOptions{outputDirectory, scalingFactor, timeoutSeconds, networkId, nodeSocket, hydraClients} -> do + let action = benchDemo networkId nodeSocket timeoutSeconds hydraClients + playDemo outputDirectory scalingFactor networkId nodeSocket action where + playDemo outputDirectory scalingFactor networkId nodeSocket action = do + numberOfTxs <- generate $ scale (* scalingFactor) getSize + let actors = [(Alice, AliceFunds), (Bob, BobFunds), (Carol, CarolFunds)] + let toClientKeys (actor, funds) = do + sk <- snd <$> keysFor actor + fundsSk <- snd <$> keysFor funds + pure $ ClientKeys sk fundsSk + clientKeys <- forM actors toClientKeys + dataset <- generateDemoUTxODataset networkId nodeSocket clientKeys numberOfTxs + results <- withTempDir "bench-demo" $ \dir -> do + runSingle dataset action (fromMaybe dir outputDirectory) + summarizeResults outputDirectory [results] + play outputDirectory timeoutSeconds scalingFactor clusterSize startingNodeId workDir = do + (_, faucetSk) <- keysFor Faucet putStrLn $ "Generating single dataset in work directory: " <> workDir numberOfTxs <- generate $ scale (* scalingFactor) getSize - dataset <- generateConstantUTxODataset (fromIntegral clusterSize) numberOfTxs + dataset <- generate $ generateConstantUTxODataset faucetSk (fromIntegral clusterSize) numberOfTxs let datasetPath = workDir "dataset.json" saveDataset datasetPath dataset - run outputDirectory timeoutSeconds startingNodeId [datasetPath] + let action = bench startingNodeId timeoutSeconds + run outputDirectory [datasetPath] action replay outputDirectory timeoutSeconds startingNodeId benchDir = do let datasetPath = benchDir "dataset.json" putStrLn $ "Replaying single dataset from work directory: " <> datasetPath - run outputDirectory timeoutSeconds startingNodeId [datasetPath] + let action = bench startingNodeId timeoutSeconds + run outputDirectory [datasetPath] action - run outputDirectory timeoutSeconds startingNodeId datasetFiles = do + runSingle dataset action dir = do + withArgs [] $ do + try @_ @HUnitFailure (action dir dataset) >>= \case + Left exc -> pure $ Left (dataset, dir, errorSummary dataset exc, TestFailed exc) + Right summary@Summary{totalTxs, numberOfTxs, numberOfInvalidTxs} + | numberOfTxs /= totalTxs -> pure $ Left (dataset, dir, summary, NotEnoughTransactions numberOfTxs totalTxs) + | numberOfInvalidTxs == 0 -> pure $ Right summary + | otherwise -> pure $ Left (dataset, dir, summary, InvalidTransactions numberOfInvalidTxs) + + run outputDirectory datasetFiles action = do results <- forM datasetFiles $ \datasetPath -> do putTextLn $ "Running benchmark with dataset " <> show datasetPath dataset <- loadDataset datasetPath - withTempDir ("bench-" <> takeFileName datasetPath) $ \dir -> - withArgs [] $ do - -- XXX: Wait between each bench run to give the OS time to cleanup resources?? - threadDelay 10 - try @_ @HUnitFailure (bench startingNodeId timeoutSeconds dir dataset) >>= \case - Left exc -> pure $ Left (dataset, dir, TestFailed exc) - Right summary@Summary{numberOfInvalidTxs} - | numberOfInvalidTxs == 0 -> pure $ Right summary - | otherwise -> pure $ Left (dataset, dir, InvalidTransactions numberOfInvalidTxs) + withTempDir ("bench-" <> takeFileName datasetPath) $ \dir -> do + -- XXX: Wait between each bench run to give the OS time to cleanup resources?? + threadDelay 10 + runSingle dataset action dir + summarizeResults outputDirectory results + + summarizeResults :: Maybe FilePath -> [Either (Dataset, FilePath, Summary, BenchmarkFailed) Summary] -> IO () + summarizeResults outputDirectory results = do let (failures, summaries) = partitionEithers results case failures of - [] -> benchmarkSucceeded outputDirectory summaries - errs -> mapM_ (\(_, dir, exc) -> benchmarkFailedWith dir exc) errs >> exitFailure + [] -> writeBenchmarkReport outputDirectory summaries + errs -> + mapM_ + ( \(_, dir, summary, exc) -> + writeBenchmarkReport outputDirectory [summary] + >> benchmarkFailedWith dir exc + ) + errs + >> exitFailure loadDataset :: FilePath -> IO Dataset loadDataset f = do @@ -72,18 +110,22 @@ main = saveDataset :: FilePath -> Dataset -> IO () saveDataset f dataset = do + createDirectoryIfMissing True (takeDirectory f) putStrLn $ "Writing dataset to: " <> f encodeFile f dataset data BenchmarkFailed = TestFailed HUnitFailure | InvalidTransactions Int + | NotEnoughTransactions Int Int benchmarkFailedWith :: FilePath -> BenchmarkFailed -> IO () benchmarkFailedWith benchDir = \case (TestFailed (HUnitFailure sourceLocation reason)) -> do putStrLn $ "Benchmark failed " <> formatLocation sourceLocation <> ": " <> formatFailureReason reason putStrLn $ "To re-run with same dataset, pass '--work-directory=" <> benchDir <> "' to the executable" + (NotEnoughTransactions actual expected) -> do + putStrLn $ "Benchmark resulted in " <> show actual <> " transactions; but wanted " <> show expected <> "." (InvalidTransactions n) -> do putStrLn $ "Benchmark has " <> show n <> " invalid transactions" putStrLn $ @@ -95,8 +137,8 @@ benchmarkFailedWith benchDir = \case where formatLocation = maybe "" (\loc -> "at " <> prettySrcLoc loc) -benchmarkSucceeded :: Maybe FilePath -> [Summary] -> IO () -benchmarkSucceeded outputDirectory summaries = do +writeBenchmarkReport :: Maybe FilePath -> [Summary] -> IO () +writeBenchmarkReport outputDirectory summaries = do dumpToStdout whenJust outputDirectory writeReport where diff --git a/hydra-cluster/hydra-cluster.cabal b/hydra-cluster/hydra-cluster.cabal index e7124c893bf..2e551403b84 100644 --- a/hydra-cluster/hydra-cluster.cabal +++ b/hydra-cluster/hydra-cluster.cabal @@ -221,6 +221,7 @@ benchmark bench-e2e , regex-tdfa , scientific , statistics + , text , time , vector diff --git a/hydra-cluster/src/CardanoClient.hs b/hydra-cluster/src/CardanoClient.hs index 9ad588637ee..c40a978507b 100644 --- a/hydra-cluster/src/CardanoClient.hs +++ b/hydra-cluster/src/CardanoClient.hs @@ -111,8 +111,11 @@ waitForUTxO node utxo = txOut -> error $ "Unexpected TxOut " <> show txOut -mkGenesisTx :: +-- | Helper used to generate transaction datasets for use in hydra-cluster benchmarks. +buildRawTransaction :: NetworkId -> + -- | Initial input from which to spend + TxIn -> -- | Owner of the 'initialFund'. SigningKey PaymentKey -> -- | Amount of initialFunds @@ -120,16 +123,11 @@ mkGenesisTx :: -- | Recipients and amounts to pay in this transaction. [(VerificationKey PaymentKey, Coin)] -> Tx -mkGenesisTx networkId signingKey initialAmount recipients = +buildRawTransaction networkId initialInput signingKey initialAmount recipients = case buildRaw [initialInput] (recipientOutputs <> [changeOutput]) of - Left err -> error $ "Fail to build genesis transations: " <> show err + Left err -> error $ "Fail to build raw transations: " <> show err Right tx -> sign signingKey tx where - initialInput = - genesisUTxOPseudoTxIn - networkId - (unsafeCastHash $ verificationKeyHash $ getVerificationKey signingKey) - totalSent = foldMap snd recipients changeAddr = mkVkAddress networkId (getVerificationKey signingKey) diff --git a/hydra-cluster/src/CardanoNode.hs b/hydra-cluster/src/CardanoNode.hs index 1a0bc1da22f..e613103756a 100644 --- a/hydra-cluster/src/CardanoNode.hs +++ b/hydra-cluster/src/CardanoNode.hs @@ -132,26 +132,32 @@ getCardanoNodeVersion = -- by 'defaultCardanoNodeArgs'. findRunningCardanoNode :: Tracer IO NodeLog -> FilePath -> KnownNetwork -> IO (Maybe RunningNode) findRunningCardanoNode tracer workDir knownNetwork = do - try (queryGenesisParameters knownNetworkId socketPath QueryTip) >>= \case + findRunningCardanoNode' tracer knownNetworkId socketPath + where + knownNetworkId = toNetworkId knownNetwork + + socketPath = File $ workDir nodeSocket + + CardanoNodeArgs{nodeSocket} = defaultCardanoNodeArgs + +-- | Tries to find an communicate with an existing cardano-node running in given +-- network id and socket path. +findRunningCardanoNode' :: Tracer IO NodeLog -> NetworkId -> SocketPath -> IO (Maybe RunningNode) +findRunningCardanoNode' tracer networkId nodeSocket = do + try (queryGenesisParameters networkId nodeSocket QueryTip) >>= \case Left (e :: SomeException) -> traceWith tracer MsgQueryGenesisParametersFailed{err = show e} $> Nothing Right GenesisParameters{protocolParamActiveSlotsCoefficient, protocolParamSlotLength} -> pure $ Just RunningNode - { networkId = knownNetworkId - , nodeSocket = socketPath + { networkId + , nodeSocket , blockTime = computeBlockTime protocolParamSlotLength protocolParamActiveSlotsCoefficient } - where - knownNetworkId = toNetworkId knownNetwork - - socketPath = File $ workDir nodeSocket - - CardanoNodeArgs{nodeSocket} = defaultCardanoNodeArgs -- | Start a single cardano-node devnet using the config from config/ and -- credentials from config/credentials/. Only the 'Faucet' actor will receive diff --git a/hydra-cluster/src/Hydra/Cluster/Faucet.hs b/hydra-cluster/src/Hydra/Cluster/Faucet.hs index 4d62dee0e4e..a2322ea3e83 100644 --- a/hydra-cluster/src/Hydra/Cluster/Faucet.hs +++ b/hydra-cluster/src/Hydra/Cluster/Faucet.hs @@ -25,7 +25,7 @@ import Hydra.Chain.CardanoClient (queryProtocolParameters) import Hydra.Chain.Direct.ScriptRegistry ( publishHydraScripts, ) -import Hydra.Cluster.Fixture (Actor (Faucet), actorName) +import Hydra.Cluster.Fixture (Actor (Faucet)) import Hydra.Cluster.Util (keysFor) import Hydra.Ledger (balance) import Hydra.Ledger.Cardano () @@ -106,19 +106,32 @@ returnFundsToFaucet :: RunningNode -> Actor -> IO () -returnFundsToFaucet tracer RunningNode{networkId, nodeSocket} sender = do +returnFundsToFaucet tracer node sender = do + senderKeys <- keysFor sender + void $ returnFundsToFaucet' tracer node (snd senderKeys) + +returnFundsToFaucet' :: + Tracer IO FaucetLog -> + RunningNode -> + SigningKey PaymentKey -> + IO Coin +returnFundsToFaucet' tracer RunningNode{networkId, nodeSocket} senderSk = do (faucetVk, _) <- keysFor Faucet let faucetAddress = mkVkAddress networkId faucetVk - - (senderVk, senderSk) <- keysFor sender + let senderVk = getVerificationKey senderSk utxo <- queryUTxOFor networkId nodeSocket QueryTip senderVk - unless (null utxo) . retryOnExceptions tracer $ do - let utxoValue = balance @Tx utxo - let allLovelace = selectLovelace utxoValue - tx <- sign senderSk <$> buildTxBody utxo faucetAddress - submitTransaction networkId nodeSocket tx - void $ awaitTransaction networkId nodeSocket tx - traceWith tracer $ ReturnedFunds{actor = actorName sender, returnAmount = allLovelace} + returnAmount <- + if null utxo + then pure 0 + else retryOnExceptions tracer $ do + let utxoValue = balance @Tx utxo + let allLovelace = selectLovelace utxoValue + tx <- sign senderSk <$> buildTxBody utxo faucetAddress + submitTransaction networkId nodeSocket tx + void $ awaitTransaction networkId nodeSocket tx + pure allLovelace + traceWith tracer $ ReturnedFunds{actor = show senderVk, returnAmount} + pure returnAmount where buildTxBody utxo faucetAddress = -- Here we specify no outputs in the transaction so that a change output with the diff --git a/hydra-cluster/src/Hydra/Generator.hs b/hydra-cluster/src/Hydra/Generator.hs index c1c9b6eaf2f..04ac56eb503 100644 --- a/hydra-cluster/src/Hydra/Generator.hs +++ b/hydra-cluster/src/Hydra/Generator.hs @@ -5,13 +5,15 @@ import Hydra.Prelude hiding (size) import Cardano.Api.Ledger (PParams) import Cardano.Api.UTxO qualified as UTxO -import CardanoClient (mkGenesisTx) +import CardanoClient (QueryPoint (QueryTip), buildRawTransaction, buildTransaction, queryUTxOFor, sign) import Control.Monad (foldM) import Data.Aeson (object, withObject, (.:), (.=)) import Data.Default (def) -import Hydra.Cluster.Fixture (Actor (Faucet), availableInitialFunds) +import Hydra.Cluster.Faucet (FaucetException (..)) +import Hydra.Cluster.Fixture (Actor (..), availableInitialFunds) import Hydra.Cluster.Util (keysFor) -import Hydra.Ledger.Cardano (genSigningKey, generateOneTransfer) +import Hydra.Ledger (balance) +import Hydra.Ledger.Cardano (genSigningKey, generateOneRandomTransfer, generateOneSelfTransfer) import Test.QuickCheck (choose, generate, sized) networkId :: NetworkId @@ -33,7 +35,7 @@ data Dataset = Dataset instance Arbitrary Dataset where arbitrary = sized $ \n -> do sk <- genSigningKey - genDatasetConstantUTxO sk (n `div` 10) n + generateConstantUTxODataset sk (n `div` 10) n data ClientKeys = ClientKeys { signingKey :: SigningKey PaymentKey @@ -80,53 +82,108 @@ defaultProtocolParameters = def -- The sequence of transactions generated consist only of simple payments from -- and to arbitrary keys controlled by the individual clients. generateConstantUTxODataset :: - -- | Number of clients - Int -> - -- | Number of transactions - Int -> - IO Dataset -generateConstantUTxODataset nClients nTxs = do - (_, faucetSk) <- keysFor Faucet - generate $ genDatasetConstantUTxO faucetSk nClients nTxs - -genDatasetConstantUTxO :: - -- | The faucet signing key + -- | Faucet signing key SigningKey PaymentKey -> -- | Number of clients Int -> -- | Number of transactions Int -> Gen Dataset -genDatasetConstantUTxO faucetSk nClients nTxs = do - clientKeys <- replicateM nClients arbitrary +generateConstantUTxODataset faucetSk nClients nTxs = do + allClientKeys <- replicateM nClients arbitrary -- Prepare funding transaction which will give every client's -- 'externalSigningKey' "some" lovelace. The internal 'signingKey' will get -- funded in the beginning of the benchmark run. - clientFunds <- forM clientKeys $ \ClientKeys{externalSigningKey} -> do - amount <- Coin <$> choose (1, availableInitialFunds `div` fromIntegral nClients) - pure (getVerificationKey externalSigningKey, amount) + clientFunds <- genClientFunds allClientKeys availableInitialFunds let fundingTransaction = - mkGenesisTx + buildRawTransaction networkId + initialInput faucetSk (Coin availableInitialFunds) clientFunds - clientDatasets <- forM clientKeys (generateClientDataset fundingTransaction) + let dataset clientKeys = + generateClientDataset networkId fundingTransaction clientKeys nTxs generateOneRandomTransfer + clientDatasets <- forM allClientKeys dataset pure Dataset{fundingTransaction, clientDatasets, title = Nothing, description = Nothing} where - generateClientDataset fundingTransaction clientKeys@ClientKeys{externalSigningKey} = do - let vk = getVerificationKey externalSigningKey - keyPair = (vk, externalSigningKey) - -- NOTE: The initialUTxO must all UTXO we will later commit. We assume - -- that everything owned by the externalSigningKey will get committed - -- into the head. - initialUTxO = - utxoProducedByTx fundingTransaction - & UTxO.filter ((== mkVkAddress networkId vk) . txOutAddress) - txSequence <- - reverse - . thrd - <$> foldM (generateOneTransfer networkId) (initialUTxO, keyPair, []) [1 .. nTxs] - pure ClientDataset{clientKeys, initialUTxO, txSequence} - - thrd (_, _, c) = c + initialInput = + genesisUTxOPseudoTxIn + networkId + (unsafeCastHash $ verificationKeyHash $ getVerificationKey faucetSk) + +-- | Generate 'Dataset' which does not grow the per-client UTXO set over time. +-- This queries the network to fetch the current funds available in the faucet +-- to be distributed among the peers. +-- The sequence of transactions generated consist only of simple self payments. +generateDemoUTxODataset :: + NetworkId -> + SocketPath -> + -- | Number of clients + [ClientKeys] -> + -- | Number of transactions + Int -> + IO Dataset +generateDemoUTxODataset network nodeSocket allClientKeys nTxs = do + (faucetVk, faucetSk) <- keysFor Faucet + faucetUTxO <- queryUTxOFor network nodeSocket QueryTip faucetVk + let (Coin fundsAvailable) = selectLovelace (balance @Tx faucetUTxO) + -- Prepare funding transaction which will give every client's + -- 'externalSigningKey' "some" lovelace. The internal 'signingKey' will get + -- funded in the beginning of the benchmark run. + clientFunds <- generate $ genClientFunds allClientKeys fundsAvailable + fundingTransaction <- do + let changeAddress = mkVkAddress network faucetVk + let recipientOutputs = + flip map clientFunds $ \(vk, ll) -> + TxOut + (mkVkAddress network vk) + (lovelaceToValue ll) + TxOutDatumNone + ReferenceScriptNone + buildTransaction network nodeSocket changeAddress faucetUTxO [] recipientOutputs >>= \case + Left e -> throwIO $ FaucetFailedToBuildTx{reason = e} + Right body -> do + let signedTx = sign faucetSk body + pure signedTx + let dataset clientKeys = + generateClientDataset network fundingTransaction clientKeys nTxs generateOneSelfTransfer + generate $ do + clientDatasets <- forM allClientKeys dataset + pure Dataset{fundingTransaction, clientDatasets, title = Nothing, description = Nothing} + +-- * Helpers +thrd :: (a, b, c) -> c +thrd (_, _, c) = c + +withInitialUTxO :: SigningKey PaymentKey -> Tx -> UTxO +withInitialUTxO externalSigningKey fundingTransaction = + let vk = getVerificationKey externalSigningKey + in -- NOTE: The initialUTxO must all UTXO we will later commit. We assume + -- that everything owned by the externalSigningKey will get committed + -- into the head. + utxoProducedByTx fundingTransaction + & UTxO.filter ((== mkVkAddress networkId vk) . txOutAddress) + +genClientFunds :: [ClientKeys] -> Integer -> Gen [(VerificationKey PaymentKey, Coin)] +genClientFunds clientKeys availableFunds = + forM clientKeys $ \ClientKeys{externalSigningKey} -> do + amount <- Coin <$> choose (1, availableFunds `div` fromIntegral nClients) + pure (getVerificationKey externalSigningKey, amount) + where + nClients = length clientKeys + +generateClientDataset :: + NetworkId -> + Tx -> + ClientKeys -> + Int -> + (NetworkId -> (UTxO, SigningKey PaymentKey, [Tx]) -> Int -> Gen (UTxO, SigningKey PaymentKey, [Tx])) -> + Gen ClientDataset +generateClientDataset network fundingTransaction clientKeys@ClientKeys{externalSigningKey} nTxs action = do + let initialUTxO = withInitialUTxO externalSigningKey fundingTransaction + txSequence <- + reverse + . thrd + <$> foldM (action network) (initialUTxO, externalSigningKey, []) [1 .. nTxs] + pure ClientDataset{clientKeys, initialUTxO, txSequence} diff --git a/hydra-cluster/src/HydraNode.hs b/hydra-cluster/src/HydraNode.hs index cb79b183a6d..fa520897e3a 100644 --- a/hydra-cluster/src/HydraNode.hs +++ b/hydra-cluster/src/HydraNode.hs @@ -48,6 +48,7 @@ import Prelude qualified data HydraClient = HydraClient { hydraNodeId :: Int + , apiHost :: Host , connection :: Connection , tracer :: Tracer IO HydraNodeLog } @@ -172,22 +173,22 @@ waitForAll tracer delay nodes expected = do -- | Helper to make it easy to obtain a commit tx using some wallet utxo. -- Create a commit tx using the hydra-node for later submission. requestCommitTx :: HydraClient -> UTxO -> IO Tx -requestCommitTx HydraClient{hydraNodeId} utxos = +requestCommitTx HydraClient{apiHost = Host{hostname, port}} utxos = runReq defaultHttpConfig request <&> commitTx . responseBody where request = Req.req POST - (Req.http "127.0.0.1" /: "commit") + (Req.http hostname /: "commit") (ReqBodyJson $ SimpleCommitRequest @Tx utxos) (Proxy :: Proxy (JsonResponse (DraftCommitTxResponse Tx))) - (Req.port $ 4_000 + hydraNodeId) + (Req.port (fromInteger . toInteger $ port)) -- | Submit a decommit transaction to the hydra-node. postDecommit :: HydraClient -> Tx -> IO () -postDecommit HydraClient{hydraNodeId} decommitTx = do +postDecommit HydraClient{apiHost = Host{hostname, port}} decommitTx = do void $ - parseUrlThrow ("POST http://127.0.0.1:" <> show (4000 + hydraNodeId) <> "/decommit") + parseUrlThrow ("POST http://" <> T.unpack hostname <> ":" <> show port <> "/decommit") <&> setRequestBodyJSON decommitTx >>= httpLbs @@ -195,19 +196,19 @@ postDecommit HydraClient{hydraNodeId} decommitTx = do -- avoid parsing responses using the same data types as the system under test, -- this parses the response as a 'UTxO' type as we often need to pick it apart. getSnapshotUTxO :: HydraClient -> IO UTxO -getSnapshotUTxO HydraClient{hydraNodeId} = +getSnapshotUTxO HydraClient{apiHost = Host{hostname, port}} = runReq defaultHttpConfig request <&> responseBody where request = Req.req GET - (Req.http "127.0.0.1" /: "snapshot" /: "utxo") + (Req.http hostname /: "snapshot" /: "utxo") NoReqBody (Proxy :: Proxy (JsonResponse UTxO)) - (Req.port $ 4_000 + hydraNodeId) + (Req.port (fromInteger . toInteger $ port)) getMetrics :: HasCallStack => HydraClient -> IO ByteString -getMetrics HydraClient{hydraNodeId} = do +getMetrics HydraClient{hydraNodeId, apiHost = Host{hostname}} = do failAfter 3 $ try (runReq defaultHttpConfig request) >>= \case Left (e :: HttpException) -> failure $ "Request for hydra-node metrics failed: " <> show e @@ -216,7 +217,7 @@ getMetrics HydraClient{hydraNodeId} = do request = Req.req GET - (Req.http "127.0.0.1" /: "metrics") + (Req.http hostname /: "metrics") NoReqBody Req.bsResponse (Req.port $ 6_000 + hydraNodeId) @@ -402,7 +403,14 @@ withHydraNode' tracer chainConfig workDir hydraNodeId hydraSKey hydraVKeys allNo ] withConnectionToNode :: forall a. Tracer IO HydraNodeLog -> Int -> (HydraClient -> IO a) -> IO a -withConnectionToNode tracer hydraNodeId action = do +withConnectionToNode tracer hydraNodeId = + withConnectionToNodeHost tracer hydraNodeId Host{hostname, port} Nothing + where + hostname = "127.0.0.1" + port = fromInteger $ 4_000 + toInteger hydraNodeId + +withConnectionToNodeHost :: forall a. Tracer IO HydraNodeLog -> Int -> Host -> Maybe String -> (HydraClient -> IO a) -> IO a +withConnectionToNodeHost tracer hydraNodeId apiHost@Host{hostname, port} queryParams action = do connectedOnce <- newIORef False tryConnect connectedOnce (200 :: Int) where @@ -420,12 +428,15 @@ withConnectionToNode tracer hydraNodeId action = do , Handler $ retryOrThrow (Proxy @HandshakeException) ] - doConnect connectedOnce = runClient "127.0.0.1" (4_000 + hydraNodeId) "/" $ \connection -> do - atomicWriteIORef connectedOnce True - traceWith tracer (NodeStarted hydraNodeId) - res <- action $ HydraClient{hydraNodeId, connection, tracer} - sendClose connection ("Bye" :: Text) - pure res + historyMode = fromMaybe "/" queryParams + + doConnect connectedOnce = runClient (T.unpack hostname) (fromInteger . toInteger $ port) historyMode $ + \connection -> do + atomicWriteIORef connectedOnce True + traceWith tracer (NodeStarted hydraNodeId) + res <- action $ HydraClient{hydraNodeId, apiHost, connection, tracer} + sendClose connection ("Bye" :: Text) + pure res hydraNodeProcess :: RunOptions -> CreateProcess hydraNodeProcess = proc "hydra-node" . toArgs diff --git a/hydra-cluster/test/Test/GeneratorSpec.hs b/hydra-cluster/test/Test/GeneratorSpec.hs index f6418177816..93d602cad4b 100644 --- a/hydra-cluster/test/Test/GeneratorSpec.hs +++ b/hydra-cluster/test/Test/GeneratorSpec.hs @@ -13,7 +13,7 @@ import Hydra.Cluster.Util (keysFor) import Hydra.Generator ( ClientDataset (..), Dataset (..), - genDatasetConstantUTxO, + generateConstantUTxODataset, ) import Hydra.Ledger (ChainSlot (ChainSlot), applyTransactions) import Hydra.Ledger.Cardano (Tx, cardanoLedger) @@ -45,7 +45,7 @@ prop_keepsUTxOConstant = let ledgerEnv = newLedgerEnv defaultPParams -- XXX: non-exhaustive pattern match pure $ - forAll (genDatasetConstantUTxO faucetSk 1 n) $ + forAll (generateConstantUTxODataset faucetSk 1 n) $ \Dataset{fundingTransaction, clientDatasets = [ClientDataset{txSequence}]} -> let initialUTxO = utxoFromTx fundingTransaction finalUTxO = foldl' (apply defaultGlobals ledgerEnv) initialUTxO txSequence diff --git a/hydra-node/src/Hydra/Ledger/Cardano.hs b/hydra-node/src/Hydra/Ledger/Cardano.hs index 4b6c36f7953..986ab0399e4 100644 --- a/hydra-node/src/Hydra/Ledger/Cardano.hs +++ b/hydra-node/src/Hydra/Ledger/Cardano.hs @@ -282,32 +282,50 @@ genSequenceOfSimplePaymentTransactions = do genFixedSizeSequenceOfSimplePaymentTransactions :: Int -> Gen (UTxO, [Tx]) genFixedSizeSequenceOfSimplePaymentTransactions numTxs = do - keyPair@(vk, _) <- genKeyPair + (vk, sk) <- genKeyPair utxo <- genOneUTxOFor vk txs <- reverse . thrd - <$> foldM (generateOneTransfer testNetworkId) (utxo, keyPair, []) [1 .. numTxs] + <$> foldM (generateOneRandomTransfer testNetworkId) (utxo, sk, []) [1 .. numTxs] pure (utxo, txs) where thrd (_, _, c) = c testNetworkId = Testnet $ NetworkMagic 42 -generateOneTransfer :: +generateOneRandomTransfer :: NetworkId -> - (UTxO, (VerificationKey PaymentKey, SigningKey PaymentKey), [Tx]) -> + (UTxO, SigningKey PaymentKey, [Tx]) -> Int -> - Gen (UTxO, (VerificationKey PaymentKey, SigningKey PaymentKey), [Tx]) -generateOneTransfer networkId (utxo, (_, sender), txs) _ = do + Gen (UTxO, SigningKey PaymentKey, [Tx]) +generateOneRandomTransfer networkId senderUtxO nbrTx = do recipient <- genKeyPair + pure $ mkOneTransfer networkId (snd recipient) senderUtxO nbrTx + +generateOneSelfTransfer :: + NetworkId -> + (UTxO, SigningKey PaymentKey, [Tx]) -> + Int -> + Gen (UTxO, SigningKey PaymentKey, [Tx]) +generateOneSelfTransfer networkId senderUtxO nbrTx = do + let (_, recipientSk, _) = senderUtxO + pure $ mkOneTransfer networkId recipientSk senderUtxO nbrTx + +mkOneTransfer :: + NetworkId -> + SigningKey PaymentKey -> + (UTxO, SigningKey PaymentKey, [Tx]) -> + Int -> + (UTxO, SigningKey PaymentKey, [Tx]) +mkOneTransfer networkId recipientSk (utxo, sender, txs) _ = do + let recipientVk = getVerificationKey recipientSk -- NOTE(AB): elements is partial, it crashes if given an empty list, We don't expect -- this function to be ever used in production, and crash will be caught in tests case UTxO.pairs utxo of [txin] -> - case mkSimpleTx txin (mkVkAddress networkId (fst recipient), balance @Tx utxo) sender of + case mkSimpleTx txin (mkVkAddress networkId recipientVk, balance @Tx utxo) sender of Left e -> error $ "Tx construction failed: " <> show e <> ", utxo: " <> show utxo - Right tx -> - pure (utxoFromTx tx, recipient, tx : txs) + Right tx -> (utxoFromTx tx, recipientSk, tx : txs) _ -> error "Couldn't generate transaction sequence: need exactly one UTXO." diff --git a/nix/hydra/docker.nix b/nix/hydra/docker.nix index cd0d6cfca66..8f34730f5dc 100644 --- a/nix/hydra/docker.nix +++ b/nix/hydra/docker.nix @@ -18,6 +18,19 @@ in }; }; + hydra-node-for-netem = pkgs.dockerTools.streamLayeredImage { + name = "hydra-node-for-netem"; + tag = "latest"; + created = "now"; + contents = [ + pkgs.iproute2 + pkgs.busybox + ]; + config = { + Entrypoint = [ "${hydraPackages.hydra-node-static}/bin/hydra-node" ]; + }; + }; + hydra-tui = pkgs.dockerTools.streamLayeredImage { name = "hydra-tui"; tag = "latest";