diff --git a/bench/cardano-profile/data/ci-test-bage.json b/bench/cardano-profile/data/ci-test-bage.json index 5e84168f7ba..804eabcb1ff 100644 --- a/bench/cardano-profile/data/ci-test-bage.json +++ b/bench/cardano-profile/data/ci-test-bage.json @@ -66,6 +66,36 @@ "ekg": false, "withresources": false }, + "cluster": { + "nomad": { + "namespace": "default", + "class": "", + "resources": { + "producer": { + "cores": 2, + "memory": 15000, + "memory_max": 16000 + }, + "explorer": { + "cores": 2, + "memory": 15000, + "memory_max": 16000 + } + }, + "fetch_logs_ssh": false + }, + "aws": { + "instance_type": { + "producer": "c5.2xlarge", + "explorer": "m5.4xlarge" + } + }, + "minimun_storage": { + "producer": 12582912, + "explorer": 14155776 + }, + "keep_running": false + }, "desc": "Miniature dataset, CI-friendly duration, test scale", "name": "ci-test-bage" diff --git a/bench/cardano-profile/src/Cardano/Benchmarking/Profile.hs b/bench/cardano-profile/src/Cardano/Benchmarking/Profile.hs index edb56d25bab..a5ba73ddda6 100644 --- a/bench/cardano-profile/src/Cardano/Benchmarking/Profile.hs +++ b/bench/cardano-profile/src/Cardano/Benchmarking/Profile.hs @@ -6,6 +6,9 @@ module Cardano.Benchmarking.Profile ( Types.Profile (Profile) + + , defaults + -- Name and description. , name, desc @@ -40,6 +43,11 @@ module Cardano.Benchmarking.Profile ( -- Generator params. , generatorTps + -- Cluster params. + , clusterMinimunStorage, clusterKeepRunningOn + , nomadNamespace, nomadClass, nomadResources, nomadSSHLogsOn + , awsInstanceTypes + -- Analysis params. , analysisOff, analysisStandard, analysisPerformance , analysisSizeSmall, analysisSizeModerate, analysisSizeFull @@ -56,6 +64,110 @@ import qualified Cardano.Benchmarking.Profile.Types as Types -------------------------------------------------------------------------------- +defaults :: Types.Profile +defaults = Types.Profile { + Types.name = "" + , Types.desc = Nothing + , Types.composition = Types.Composition { + Types.locations = [] + , Types.n_bft_hosts = 0 + , Types.n_singular_hosts = 0 + , Types.n_dense_hosts = 0 + , Types.dense_pool_density = 0 + , Types.with_proxy = False + , Types.with_explorer = False + , Types.topology = Types.Line + , Types.with_chaindb_server = Nothing + , Types.n_hosts = 0 + , Types.n_pools = 0 + , Types.n_singular_pools = 0 + , Types.n_dense_pools = 0 + , Types.n_pool_hosts = 0 + } + , Types.era = Types.Allegra + , Types.genesis = Types.Genesis { + Types.network_magic = 0 + , Types.single_shot = False + , Types.per_pool_balance = 0 + , Types.funds_balance = 0 + , Types.utxo = 0 + , Types.active_slots_coeff = 0 + , Types.epoch_length = 0 + , Types.parameter_k = 0 + , Types.slot_duration = 0 + , Types.extra_future_offset = 0 + , Types.pparamsEpoch = 0 + , Types.delegators = 0 + , Types.shelley = mempty + , Types.alonzo = mempty + , Types.pool_coin = 0 + , Types.delegator_coin = 0 + } + , Types.scenario = Types.Idle + , Types.node = Types.Node { + Types.rts_flags_override = [] + , Types.shutdown_on_slot_synced = Nothing + , Types.shutdown_on_block_synced = Nothing + , Types.tracing_backend = "" + , Types.nodeTracer = False + , Types.verbatim = Types.NodeVerbatim Nothing + } + , Types.tracer = Types.Tracer { + Types.rtview = False + , Types.ekg = False + , Types.withresources = False + } + , Types.generator = Types.Generator { + Types.add_tx_size = 0 + , Types.init_cooldown = 0 + , Types.inputs_per_tx = 0 + , Types.outputs_per_tx = 0 + , Types.tx_fee = 0 + , Types.epochs = 0 + , Types.tps = 0 + , Types.plutus = Nothing + , Types.tx_count = 0 + } + , Types.cluster = Types.Cluster { + Types.nomad = Types.Nomad { + Types.namespace = "default" + , Types.nomad_class = "" + , Types.resources = Types.ByNodeType { + Types.producer = Types.Resources 2 15000 16000 + , Types.explorer = Just $ Types.Resources 2 15000 16000 + } + , Types.fetch_logs_ssh = False + } + , Types.aws = Types.ClusterAWS { + Types.instance_type = Types.ByNodeType { + Types.producer = "c5.2xlarge" + , Types.explorer = Just "m5.4xlarge" + } + } + , Types.minimun_storage = Just $ Types.ByNodeType { + Types.producer = 12582912 + , Types.explorer = Just 14155776 + } + , Types.keep_running = False + } + , Types.analysis = Types.Analysis { + Types.analysisType = Nothing + , Types.cluster_base_startup_overhead_s = 0 + , Types.start_log_spread_s = 0 + , Types.last_log_spread_s = 0 + , Types.silence_since_last_block_s = 0 + , Types.tx_loss_ratio = Scientific.fromFloatDigits (0 :: Double) + , Types.finish_patience = 0 + , Types.filters = [] + , Types.filter_exprs = [] + , Types.minimum_chain_density = Scientific.fromFloatDigits (0 :: Double) + , Types.cluster_startup_overhead_s = 0 + } +-- , Types.overlay = mempty +} + +-------------------------------------------------------------------------------- + name :: String -> Types.Profile -> Types.Profile name str = \p -> p {Types.name = str} @@ -235,6 +347,38 @@ generatorTps tps = generator (\g -> g {Types.tps = tps}) -------------------------------------------------------------------------------- +cluster :: (Types.Cluster -> Types.Cluster) -> Types.Profile -> Types.Profile +cluster f p = p {Types.cluster = f (Types.cluster p)} + +clusterMinimunStorage :: (Maybe (Types.ByNodeType Int)) -> Types.Profile -> Types.Profile +clusterMinimunStorage ms = cluster (\c -> c {Types.minimun_storage = ms}) + +clusterKeepRunningOn :: Types.Profile -> Types.Profile +clusterKeepRunningOn = cluster (\c -> c {Types.keep_running = True}) + +nomad :: (Types.Nomad -> Types.Nomad) -> Types.Profile -> Types.Profile +nomad f p = cluster (\c -> c {Types.nomad = f (Types.nomad c)}) p + +nomadNamespace :: String -> Types.Profile -> Types.Profile +nomadNamespace ns = nomad (\n -> n {Types.namespace = ns}) + +nomadClass :: String -> Types.Profile -> Types.Profile +nomadClass nc = nomad (\n -> n {Types.nomad_class = nc}) + +nomadResources :: (Types.ByNodeType Types.Resources) -> Types.Profile -> Types.Profile +nomadResources r = nomad (\n -> n {Types.resources = r}) + +nomadSSHLogsOn :: Types.Profile -> Types.Profile +nomadSSHLogsOn = nomad (\n -> n {Types.fetch_logs_ssh = True}) + +aws :: (Types.ClusterAWS -> Types.ClusterAWS) -> Types.Profile -> Types.Profile +aws f p = cluster (\c -> c {Types.aws = f (Types.aws c)}) p + +awsInstanceTypes :: (Types.ByNodeType String) -> Types.Profile -> Types.Profile +awsInstanceTypes i = aws (\n -> n {Types.instance_type = i}) + +-------------------------------------------------------------------------------- + analysis :: (Types.Analysis -> Types.Analysis) -> Types.Profile -> Types.Profile analysis f p = p {Types.analysis = f (Types.analysis p)} diff --git a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Map.hs b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Map.hs index 8512aae3aea..5fa5d62751c 100644 --- a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Map.hs +++ b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Map.hs @@ -15,7 +15,6 @@ import Prelude import Data.Function ((&)) import qualified Data.Map.Strict as Map -import qualified Data.Scientific as Scientific import qualified Cardano.Benchmarking.Profile as P import qualified Cardano.Benchmarking.Profile.Types as Types @@ -47,102 +46,69 @@ profiles = foldMap ) profilesNoEra --- The defaults +-- Aux functions -------------------------------------------------------------------------------- -dummy :: Types.Profile -dummy = Types.Profile { - Types.name = "" - , Types.desc = Nothing - , Types.composition = Types.Composition { - Types.locations = [] - , Types.n_bft_hosts = 0 - , Types.n_singular_hosts = 0 - , Types.n_dense_hosts = 0 - , Types.dense_pool_density = 0 - , Types.with_proxy = False - , Types.with_explorer = False - , Types.topology = Types.Line - , Types.with_chaindb_server = Nothing - , Types.n_hosts = 0 - , Types.n_pools = 0 - , Types.n_singular_pools = 0 - , Types.n_dense_pools = 0 - , Types.n_pool_hosts = 0 - } - , Types.era = Types.Allegra - , Types.genesis = Types.Genesis { - Types.network_magic = 0 - , Types.single_shot = False - , Types.per_pool_balance = 0 - , Types.funds_balance = 0 - , Types.utxo = 0 - , Types.active_slots_coeff = 0 - , Types.epoch_length = 0 - , Types.parameter_k = 0 - , Types.slot_duration = 0 - , Types.extra_future_offset = 0 - , Types.pparamsEpoch = 0 - , Types.delegators = 0 - , Types.shelley = mempty - , Types.alonzo = mempty - , Types.pool_coin = 0 - , Types.delegator_coin = 0 - } - , Types.scenario = Types.Idle - , Types.node = Types.Node { - Types.rts_flags_override = [] - , Types.shutdown_on_slot_synced = Nothing - , Types.shutdown_on_block_synced = Nothing - , Types.tracing_backend = "" - , Types.nodeTracer = False - , Types.verbatim = Types.NodeVerbatim Nothing - } - , Types.tracer = Types.Tracer { - Types.rtview = False - , Types.ekg = False - , Types.withresources = False - } - , Types.generator = Types.Generator { - Types.add_tx_size = 0 - , Types.init_cooldown = 0 - , Types.inputs_per_tx = 0 - , Types.outputs_per_tx = 0 - , Types.tx_fee = 0 - , Types.epochs = 0 - , Types.tps = 0 - , Types.plutus = Nothing - , Types.tx_count = 0 - } - , Types.analysis = Types.Analysis { - Types.analysisType = Nothing - , Types.cluster_base_startup_overhead_s = 0 - , Types.start_log_spread_s = 0 - , Types.last_log_spread_s = 0 - , Types.silence_since_last_block_s = 0 - , Types.tx_loss_ratio = Scientific.fromFloatDigits (0 :: Double) - , Types.finish_patience = 0 - , Types.filters = [] - , Types.filter_exprs = [] - , Types.minimum_chain_density = Scientific.fromFloatDigits (0 :: Double) - , Types.cluster_startup_overhead_s = 0 - } --- , Types.overlay = mempty -} - nomadPerf :: Types.Profile -> Types.Profile -nomadPerf = P.regions - [ - Types.AWS Types.EU_CENTRAL_1 - , Types.AWS Types.US_EAST_1 - , Types.AWS Types.AP_SOUTHEAST_2 - ] +nomadPerf = + P.regions + [ + Types.AWS Types.EU_CENTRAL_1 + , Types.AWS Types.US_EAST_1 + , Types.AWS Types.AP_SOUTHEAST_2 + ] + . + P.nomadNamespace "perf" + . + P.nomadClass "perf" + . + P.nomadResources (Types.ByNodeType { + Types.producer = Types.Resources 8 15400 16000 + , Types.explorer = Just $ Types.Resources 16 32000 64000 + }) + . + P.nomadSSHLogsOn + . + P.clusterKeepRunningOn + . + P.awsInstanceTypes (Types.ByNodeType { + Types.producer = "c5.2xlarge" + , Types.explorer = Just "m5.4xlarge" + }) + . + P.clusterMinimunStorage (Just $ Types.ByNodeType { + Types.producer = 12582912 + , Types.explorer = Just 14155776 + }) nomadSsd :: Types.Profile -> Types.Profile -nomadSsd = P.regions - [ - Types.AWS Types.EU_CENTRAL_1 - ] +nomadSsd = + P.regions + [ + Types.AWS Types.EU_CENTRAL_1 + ] + . + P.nomadNamespace "perf-ssd" + . + P.nomadClass "perf-ssd" + . + P.nomadResources (Types.ByNodeType { + Types.producer = Types.Resources 32 64000 64000 + , Types.explorer = Just $ Types.Resources 32 64000 64000 + }) + . + P.nomadSSHLogsOn + . + P.clusterKeepRunningOn + . + P.awsInstanceTypes (Types.ByNodeType { + Types.producer = "c5.9xlarge" + , Types.explorer = Nothing + }) + . + P.clusterMinimunStorage Nothing + +-------------------------------------------------------------------------------- -- TODO: forge-stress and forge-stress-light have the same .node.shutdown_on_slot_synced -- Adding a P.nameSuffix was abandoned to keep the code `grep` friendly! @@ -153,7 +119,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- fast: 2 nodes, FixedLoaded and "--shutdown-on-block-synced 1" ------------------------------------------------------------------------------ - let fast = dummy + let fast = P.defaults & P.utxo 0 . P.delegators 0 . P.epochLength 600 . P.parameterK 3 . P.fixedLoaded . P.generatorTps 15 @@ -176,7 +142,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- ci-test: 2 nodes, FixedLoaded and "--shutdown-on-block-synced 3" ------------------------------------------------------------------------------ - let ciTest = dummy + let ciTest = P.defaults & P.hosts 2 . P.utxo 0 . P.delegators 0 . P.epochLength 600 . P.parameterK 3 @@ -203,7 +169,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- ci-test-dense: 10 pools, FixedLoaded and "--shutdown-on-block-synced 3" ------------------------------------------------------------------------------ - let ciTestDense = dummy + let ciTestDense = P.defaults & P.uniCircle . P.pools 10 . P.loopback . P.p2pOff @@ -220,7 +186,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- epoch transition: 2 nodes, FixedLoaded and "--shutdown-on-slot-synced 900" ------------------------------------------------------------------------------ - let epochTransition = dummy + let epochTransition = P.defaults & P.uniCircle . P.hosts 2 . P.loopback . P.utxo 0 . P.delegators 0 @@ -236,7 +202,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- ci-bench: 2|5|10 nodes, FixedLoaded and "--shutdown-on-block-synced 15" ------------------------------------------------------------------------------ - let ciBench = dummy + let ciBench = P.defaults & P.fixedLoaded . P.generatorTps 15 . P.epochLength 600 . P.parameterK 3 . P.shutdownOnBlock 15 @@ -278,7 +244,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- forge-stress ------------------------------------------------------------------------------ - let forgeStress = dummy + let forgeStress = P.defaults & P.uniCircle . P.loopback . P.p2pOff @@ -327,7 +293,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- TODO: This is a special case of forge-stress. Mix both? Still used? ------------------------------------------------------------------------------ - let dish = dummy + let dish = P.defaults & P.uniCircle . P.hosts 3 . P.loopback . P.p2pOff @@ -348,7 +314,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- k3: 3 nodes and no "--shutdown-on-slot-synced" and no "--shutdown-on-block-synced" ------------------------------------------------------------------------------ - let k3 = dummy + let k3 = P.defaults & P.uniCircle . P.hosts 3 . P.loopback . P.p2pOff @@ -368,7 +334,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- 6 nodes and no "--shutdown-on-slot-synced" and no "--shutdown-on-block-synced" ------------------------------------------------------------------------------ - let idle = dummy + let idle = P.defaults & P.uniCircle . P.hosts 6 . P.loopback . P.p2pOff @@ -384,7 +350,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- 6 nodes and no "--shutdown-on-slot-synced" and no "--shutdown-on-block-synced" ------------------------------------------------------------------------------ - let tracerOnly = dummy + let tracerOnly = P.defaults & P.uniCircle . P.hosts 6 . P.loopback . P.p2pOff @@ -400,7 +366,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- 6 nodes and no "--shutdown-on-slot-synced" and no "--shutdown-on-block-synced" ------------------------------------------------------------------------------ - let noCliStop = dummy + let noCliStop = P.defaults & P.hosts 6 . P.fixedLoaded . P.utxo 0 . P.delegators 6 @@ -426,7 +392,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- model: 4 nodes, FixedLoaded and "--shutdown-on-slot-synced 56000" ------------------------------------------------------------------------------ - let model = dummy + let model = P.defaults & P.uniCircle . P.hosts 4 . P.loopback . P.epochLength 8000 . P.parameterK 40 @@ -445,7 +411,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- plutuscall: 6 nodes, FixedLoaded and "--shutdown-on-slot-synced 9000" ------------------------------------------------------------------------------ - let plutusCall = dummy + let plutusCall = P.defaults & P.uniCircle . P.hosts 6 . P.loopback . P.p2pOff @@ -470,7 +436,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- plutuscall: 6 nodes, FixedLoaded and "--shutdown-on-slot-synced 1200" ------------------------------------------------------------------------------ - let traceFull = dummy + let traceFull = P.defaults & P.torus . P.hosts 6 . P.loopback . P.p2pOff @@ -488,7 +454,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- cloud: (52 + 1) nodes, FixedLoaded and "--shutdown-on-slot-synced 56000" ------------------------------------------------------------------------------ - let cloud = dummy + let cloud = P.defaults & P.torusDense . P.hosts 52 . P.withExplorerNode . nomadPerf . P.utxo 4000000 . P.delegators 1000000 @@ -512,7 +478,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- ------------------------------------------------------------------------------ - let latency = dummy + let latency = P.defaults & P.torusDense . P.hosts 52 . P.withExplorerNode . nomadPerf . P.utxo 0 . P.delegators 0 @@ -527,7 +493,7 @@ profilesNoEra = Map.fromList $ map (\p -> (Types.name p, p)) $ ------------------------------------------------------------------------------ -- chainsync ------------------------------------------------------------------------------ - let chainsync = dummy + let chainsync = P.defaults & P.uniCircle . P.hostsChainsync 1 . P.withChaindbServer . P.withExplorerNode . P.loopback . P.utxo 0 . P.delegators 0 diff --git a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Types.hs b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Types.hs index 14b2130fc5e..5e644c8eded 100644 --- a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Types.hs +++ b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Types.hs @@ -13,6 +13,7 @@ module Cardano.Benchmarking.Profile.Types ( , Node (..), NodeVerbatim (..) , Tracer (..) , Generator (..), Plutus (..) +, Cluster (..), Nomad (..), ClusterAWS (..), ByNodeType (..), Resources (..) , Analysis (..), AnalysisFilterExpression (..), AnalysisFilterExpressionContent (..) ) where @@ -40,6 +41,7 @@ data Profile = Profile , node :: Node , tracer :: Tracer , generator :: Generator + , cluster :: Cluster , analysis :: Analysis -- TODO -- , cli_args :: Aeson.Object @@ -335,6 +337,77 @@ instance Aeson.FromJSON Plutus where -------------------------------------------------------------------------------- +-- | The cluster properties (if used). +data Cluster = Cluster + { nomad :: Nomad + , aws :: ClusterAWS + , minimun_storage :: Maybe (ByNodeType Int) + , keep_running :: Bool + } + deriving (Eq, Show, Generic) + +instance Aeson.ToJSON Cluster + +instance Aeson.FromJSON Cluster + +data Nomad = Nomad + { namespace :: String + , nomad_class :: String + , resources :: ByNodeType Resources + , fetch_logs_ssh :: Bool + } + deriving (Eq, Show, Generic) + +instance Aeson.ToJSON Nomad where + toJSON p@(Nomad _ _ _ _) = + Aeson.object + [ "namespace" Aeson..= namespace p + , "class" Aeson..= nomad_class p + , "resources" Aeson..= resources p + , "fetch_logs_ssh" Aeson..= fetch_logs_ssh p + ] + +instance Aeson.FromJSON Nomad where + parseJSON = + Aeson.withObject "Nomad" $ \o -> do + Nomad + <$> o Aeson..: "namespace" + <*> o Aeson..: "class" + <*> o Aeson..: "resources" + <*> o Aeson..: "fetch_logs_ssh" + +data ClusterAWS = ClusterAWS + { instance_type :: ByNodeType String + } + deriving (Eq, Show, Generic) + +instance Aeson.ToJSON ClusterAWS + +instance Aeson.FromJSON ClusterAWS + +data ByNodeType a = ByNodeType + { producer :: a + , explorer :: Maybe a + } + deriving (Eq, Show, Generic) + +instance Aeson.ToJSON a => Aeson.ToJSON (ByNodeType a) + +instance Aeson.FromJSON a => Aeson.FromJSON (ByNodeType a) + +data Resources = Resources + { cores :: Int + , memory :: Int + , memory_max :: Int + } + deriving (Eq, Show, Generic) + +instance Aeson.ToJSON Resources + +instance Aeson.FromJSON Resources + +-------------------------------------------------------------------------------- + -- | A topology as it's used to define benchmarking profiles. data Analysis = Analysis { analysisType :: Maybe String -- TODO: Rename in workbench/bash to "analysis_type" diff --git a/bench/cardano-profile/test/Main.hs b/bench/cardano-profile/test/Main.hs index 9059c6409e7..42ddb513c03 100644 --- a/bench/cardano-profile/test/Main.hs +++ b/bench/cardano-profile/test/Main.hs @@ -119,6 +119,28 @@ ciTestBage = Types.Profile { }) , Types.tx_count = 9000 } + , Types.cluster = Types.Cluster { + Types.nomad = Types.Nomad { + Types.namespace = "default" + , Types.nomad_class = "" + , Types.resources = Types.ByNodeType { + Types.producer = Types.Resources 2 15000 16000 + , Types.explorer = Just $ Types.Resources 2 15000 16000 + } + , Types.fetch_logs_ssh = False + } + , Types.aws = Types.ClusterAWS { + Types.instance_type = Types.ByNodeType { + Types.producer = "c5.2xlarge" + , Types.explorer = Just "m5.4xlarge" + } + } + , Types.minimun_storage = Just $ Types.ByNodeType { + Types.producer = 12582912 + , Types.explorer = Just 14155776 + } + , Types.keep_running = False + } , Types.analysis = Types.Analysis { Types.analysisType = Just "standard" , Types.cluster_base_startup_overhead_s = 40 @@ -265,6 +287,23 @@ profilesMap = Tasty.testGroup ) ) ---------------------------------------------------------------------- + -- Show the first profile with differences in the Tracer type. + ---------------------------------------------------------------------- + mapM_ + (uncurry $ assertEqual + ("Profile == (decode \"" ++ fp ++ "\") - Cluster") + ) + -- Map.Map to keep the key / profile name. + (zip + (Map.assocs $ Map.map + (\p -> Types.cluster p) + (ans :: Map.Map String Types.Profile) + ) + (Map.assocs $ Map.map + (\p -> Types.cluster p) Profiles.profiles + ) + ) + ---------------------------------------------------------------------- -- Show the first profile with differences in the Analysis type (partial). ---------------------------------------------------------------------- mapM_