Skip to content

Commit

Permalink
defaultCluster
Browse files Browse the repository at this point in the history
  • Loading branch information
fmaste committed Jun 19, 2024
1 parent 0fdf3b7 commit 970c67a
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 105 deletions.
57 changes: 31 additions & 26 deletions bench/cardano-profile/src/Cardano/Benchmarking/Profile.hs
Original file line number Diff line number Diff line change
Expand Up @@ -181,31 +181,7 @@ empty = Types.Profile {
, Types.ekg = False
, Types.withresources = False
}
, Types.cluster = Types.Cluster {
Types.nomad = Types.ClusterNomad {
Types.namespace = "default"
, Types.nomad_class = ""
, Types.resources = Types.ByNodeType {
Types.producer = Types.Resources 0 0 0
, Types.explorer = Just $ Types.Resources 0 0 0
}
, Types.host_volumes = Nothing
, Types.fetch_logs_ssh = False
}
, Types.aws = Types.ClusterAWS {
Types.instance_type = Types.ByNodeType {
Types.producer = ""
, Types.explorer = Nothing
}
, Types.use_public_routing = False
}
, Types.minimun_storage = Just $ Types.ByNodeType {
Types.producer = 0
, Types.explorer = Nothing
}
, Types.ssd_directory = Nothing
, Types.keep_running = False
}
, Types.cluster = Nothing
, Types.analysis = Types.Analysis {
Types.analysisType = Nothing
, Types.cluster_base_startup_overhead_s = 40 -- TODO: Make it zero as default!
Expand Down Expand Up @@ -664,8 +640,37 @@ tracerWithresources = tracer (\t -> t {Types.withresources = True})
-- Cluster.
--------------------------------------------------------------------------------

emptyCluster :: Types.Cluster
emptyCluster = Types.Cluster
{
Types.nomad = Types.ClusterNomad {
Types.namespace = ""
, Types.nomad_class = ""
, Types.resources = Types.ByNodeType {
Types.producer = Types.Resources 0 0 0
, Types.explorer = Nothing
}
, Types.host_volumes = Nothing
, Types.fetch_logs_ssh = False
}
, Types.aws = Types.ClusterAWS {
Types.instance_type = Types.ByNodeType {
Types.producer = ""
, Types.explorer = Nothing
}
, Types.use_public_routing = False
}
, Types.minimun_storage = Nothing
, Types.ssd_directory = Nothing
, Types.keep_running = False
}

cluster :: (Types.Cluster -> Types.Cluster) -> Types.Profile -> Types.Profile
cluster f p = p {Types.cluster = f (Types.cluster p)}
cluster f p =
let c = case Types.cluster p of
(Just j) -> j
Nothing -> emptyCluster
in p { Types.cluster = (Just $ f c) }

clusterMinimunStorage :: (Maybe (Types.ByNodeType Int)) -> Types.Profile -> Types.Profile
clusterMinimunStorage ms = cluster (\c -> c {Types.minimun_storage = ms})
Expand Down
55 changes: 6 additions & 49 deletions bench/cardano-profile/src/Cardano/Benchmarking/Profile/Map.hs
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ profileNoEraCIEmpty = map
------------------------------------------------------------------------------
let fast = P.empty
& P.shutdownOnBlock 1
fastLocal = fast & P.pparamsEpoch 300 . P.uniCircle . P.hosts 2 . P.loopback . clusterDefault -- TODO: "cluster" should be "null" here.
fastLocal = fast & P.pparamsEpoch 300 . P.uniCircle . P.hosts 2 . P.loopback
-- TODO: Change to `P.pparamsEpoch 300`, like `ci-test-nomadperf`
fastPerf = fast & P.pparamsEpoch 366 . P.v8Preview . P.torusDense . P.hosts 52 . P.withExplorerNode . nomadPerf
fastSsd = fast & P.pparamsEpoch 366 . P.v8Preview . P.torusDense . P.hosts 52 . P.withExplorerNode . nomadSsd . clusterNomadSsd
Expand Down Expand Up @@ -278,7 +278,7 @@ profileNoEraCIEmpty = map
. P.pparamsEpoch 300
. P.shutdownOnBlock 3
-- TODO: Why are not both using UniCircle ????
ciTestLocal = ciTest & P.uniCircle . P.loopback . clusterDefault -- TODO: "cluster" should be "null" here.
ciTestLocal = ciTest & P.uniCircle . P.loopback
ciTestNomadPerf = ciTest & P.torus . nomadPerf . P.withExplorerNode
in [
-- Local
Expand All @@ -301,7 +301,6 @@ profileNoEraCIEmpty = map
& P.uniCircle . P.pools 10 . P.loopback
. P.pparamsEpoch 300
. P.shutdownOnBlock 3
. clusterDefault -- TODO: "cluster" should be "null" here.
in [
(ciTestDense & P.name "ci-test-dense10" . valueLocal . P.tracerOn . P.newTracing . P.p2pOff)
]
Expand All @@ -314,7 +313,6 @@ profileNoEraCIEmpty = map
. P.pparamsEpoch 300
. P.shutdownOnBlock 15
. P.tracerWithresources
. clusterDefault -- TODO: "cluster" should be "null" here.
in [
-- 6 nodes, local
(traceBench & P.name "trace-bench" . valueLocal . P.tracerOn . P.newTracing . P.p2pOff )
Expand All @@ -331,7 +329,6 @@ profileNoEraCIEmpty = map
. P.pparamsEpoch 300
. P.shutdownOnSlot 1200
. P.tracerWithresources
. clusterDefault -- TODO: "cluster" should be "null" here.
in [
(traceFull & P.name "trace-full" . valueLocal . P.tracerOn . P.newTracing . P.p2pOff )
, (traceFull & P.name "trace-full-rtview" . valueLocal . P.tracerOn . P.newTracing . P.p2pOff . P.tracerRtview)
Expand All @@ -344,7 +341,6 @@ profileNoEraCIEmpty = map
& P.uniCircle . P.hosts 2 . P.loopback
. P.pparamsEpoch 300
. P.shutdownOnSlot 900
. clusterDefault -- TODO: "cluster" should be "null" here.
in [
(epochTransition & P.name "epoch-transition" . valueLocal . P.tracerOn . P.newTracing)
]
Expand All @@ -367,7 +363,6 @@ profilesNoEraForgeStress =
. P.generatorEpochs 3 . P.initCooldown 5
. P.newTracing
. P.analysisStandard
. clusterDefault -- TODO: "cluster" should be "null" here.
-- TODO: Why this are the only "forge-stress-*" not using epoch 300 ???
forgeStress1 = P.v8Preview . P.pparamsEpoch 366 . P.hosts 1
forgeStress3 = P.pparamsEpoch 300 . P.hosts 3
Expand Down Expand Up @@ -427,13 +422,13 @@ profilesNoEraTheRest =
ciBench02 = ciBench & P.hosts 2 . P.utxo 500000 . P.delegators 100000
ciBench10 = ciBench & P.hosts 10 . P.utxo 500000 . P.delegators 100000
-- TODO: Why are not all using UniCircle ????
ciBench02Local = ciBench02 & P.uniCircle . P.loopback . clusterDefault -- TODO: "cluster" should be "null" here.
ciBench02Local = ciBench02 & P.uniCircle . P.loopback
ciBench02LocalValue = ciBench02Local & P.pparamsEpoch 300
-- TODO: Why this are the only "fast-*|ci-*" not using epoch 300 ???
-- Fix and move to `profileNoEraCIEmpty`.
ciBench02LocalPlutus = ciBench02Local & P.v8Preview . P.pparamsEpoch 366
ciBench02NomadPerf = ciBench02 & P.pparamsEpoch 300 . P.torus . nomadPerf . P.withExplorerNode
ciBench10Local = ciBench10 & P.pparamsEpoch 300 . P.uniCircle . P.loopback . clusterDefault -- TODO: "cluster" should be "null" here.
ciBench10Local = ciBench10 & P.pparamsEpoch 300 . P.uniCircle . P.loopback
in [
-- 2 nodes, local
(ciBench02LocalValue & P.name "ci-bench" . valueLocal . P.dreps 0 . P.tracerOn . P.newTracing . P.p2pOff )
Expand Down Expand Up @@ -474,7 +469,6 @@ profilesNoEraTheRest =
. P.dreps 0
. P.tracerOn . P.newTracing
. P.analysisStandard
. clusterDefault -- TODO: "cluster" should be "null" here.
dish30M = dish & P.utxo 30000000 . P.delegators 0
dish10M = dish & P.utxo 10000000 . P.delegators 0
in [
Expand All @@ -499,7 +493,6 @@ profilesNoEraTheRest =
. P.shutdownOnOff . P.generatorEpochs 3 . P.initCooldown 5
. P.tracerOn . P.newTracing
. P.analysisStandard . P.analysisUnitary
. clusterDefault -- TODO: "cluster" should be "null" here.
in [
(k3 & P.name "k3-3ep-5kTx-10000kU-1300kD-64kbs-fixed-loaded" . P.slotDuration 0.2 . P.tps 12)
, (k3 & P.name "k3-3ep-9kTx-10000kU-1300kD-64kbs-5tps-fixed-loaded" . P.slotDuration 1 . P.tps 5)
Expand All @@ -520,7 +513,6 @@ profilesNoEraTheRest =
. P.idle . valueCloud -- TODO: Why TPS=12 ? for "idle" all zeroes!
. P.shutdownOnOff . P.generatorEpochs 3 . P.initCooldown 5
. P.analysisUnitary
. clusterDefault -- TODO: "cluster" should be "null" here.
updateQuorum = P.shelley (KeyMap.insert "updateQuorum" (Aeson.Number 1))
in [
(idle & P.name "devops" . P.slotDuration 0.2 . P.epochLength 1000 . P.activeSlotsCoeff 0.1 . P.parameterK 10 . P.extraFutureOffset 10 . P.tracerOn . P.newTracing . P.p2pOff . P.analysisOff . updateQuorum)
Expand All @@ -541,7 +533,6 @@ profilesNoEraTheRest =
. P.tracerOnly . valueCloud -- TODO: Why TPS=12 ? for "tracer" all zeroes!
. P.shutdownOnOff . P.generatorEpochs 3 . P.initCooldown 5
. P.analysisStandard . P.analysisUnitary
. clusterDefault -- TODO: "cluster" should be "null" here.
in [
(tracerOnly & P.name "tracer-only" . P.tracerOn . P.newTracing . P.p2pOff)
]
Expand All @@ -559,7 +550,7 @@ profilesNoEraTheRest =
. P.analysisStandard
-- TODO: Why are not all using Torus ????
-- TODO: Why all using the cloud saturation value ???
noCliStopLocal = noCliStop & P.uniCircle . P.loopback . clusterDefault -- TODO: "cluster" should be "null" here.
noCliStopLocal = noCliStop & P.uniCircle . P.loopback
noCliStopLocal120 = noCliStopLocal & P.pparamsEpoch 300
noCliStopLocal002 = noCliStopLocal & P.v8Preview . P.pparamsEpoch 366
noCliStopNomadPerf = noCliStop & P.pparamsEpoch 300 . P.torus . nomadPerf . P.withExplorerNode
Expand Down Expand Up @@ -592,7 +583,6 @@ profilesNoEraTheRest =
. P.shutdownOnSlot 56000 . P.generatorEpochs 7 . P.initCooldown 45
. P.tracerOn . P.newTracing
. P.analysisStandard . P.analysisEpoch3Plus
. clusterDefault -- TODO: "cluster" should be "null" here.
in [
-- TODO: Different pparamsEpoch introduced in last changes!
-- TODO: TX fee went from 1025000 to 1008000 ????
Expand Down Expand Up @@ -620,7 +610,6 @@ profilesNoEraTheRest =
. P.shutdownOnSlot 9000 . P.generatorEpochs 15 . P.initCooldown 5
. P.tracerOn . P.newTracing
. P.analysisStandard . P.analysisSizeModerate . P.analysisEpoch3Plus
. clusterDefault -- TODO: "cluster" should be "null" here.
loop = plutusCall & P.plutusType "LimitSaturationLoop" . P.plutusScript "Loop" . redeemerLoop
ecdsa = plutusCall & P.plutusType "LimitTxPerBlock_8" . P.plutusScript "EcdsaSecp256k1Loop" . redeemerECDSA
schnorr = plutusCall & P.plutusType "LimitTxPerBlock_8" . P.plutusScript "SchnorrSecp256k1Loop" . redeemerSchnorr
Expand Down Expand Up @@ -697,7 +686,6 @@ profilesNoEraTheRest =
. P.generatorEpochs 3 . P.initCooldown 5
. P.tracerOn . P.newTracing
. P.analysisStandard
. clusterDefault -- TODO: "cluster" should be "null" here.
in [
(latency & P.name "latency-nomadperf" . P.p2pOn)
, (latency & P.name "latency-nomadperfssd" . P.p2pOn . clusterNomadSsd)
Expand All @@ -716,7 +704,6 @@ profilesNoEraTheRest =
. P.chainsync . valueCloud -- TODO: Why TPS=12 ? for "chainsync" all zeroes!
. P.generatorEpochs 3 . P.initCooldown 5
. P.analysisPerformance
. clusterDefault -- TODO: "cluster" should be "null" here.
byron = chainsync & P.shutdownOnSlot 237599
alonzo = chainsync & P.shutdownOnSlot 38901589
in [
Expand Down Expand Up @@ -747,7 +734,7 @@ profilesNoEraTheRest =
. clusterNomadSsd
. P.analysisSizeFull . P.analysisEpoch3Plus
fast = P.loopback
. P.shutdownOnBlock 1 . P.generatorEpochs 3 . valueLocal . clusterDefault -- TODO: "cluster" should be "null" here.
. P.shutdownOnBlock 1 . P.generatorEpochs 3 . valueLocal
in [
(utxoScale & P.name "utxoscale-solo-12M16G-nomadperfssd" . euCentral1 . P.epochLength 1200 . P.parameterK 6 . P.utxo 12000000 . P.delegators 1200000 . P.poolBalance 1000000000000000 . P.funds 20000000000000 . P.pparamsEpoch 366 . P.v8Preview . P.rtsHeapLimit 16384 . P.heapLimit 16384)
, (utxoScale & P.name "utxoscale-solo-12M64G-nomadperfssd" . euCentral1 . P.epochLength 1200 . P.parameterK 6 . P.utxo 12000000 . P.delegators 1200000 . P.poolBalance 1000000000000000 . P.funds 20000000000000 . P.pparamsEpoch 366 . P.v8Preview )
Expand Down Expand Up @@ -829,36 +816,6 @@ clusterNomadSsd =
.
P.clusterMinimunStorage Nothing

clusterDefault :: Types.Profile -> Types.Profile
clusterDefault p =
p {Types.cluster =
Types.Cluster {
Types.nomad = Types.ClusterNomad {
Types.namespace = "default"
, Types.nomad_class = ""
, Types.resources = Types.ByNodeType {
Types.producer = Types.Resources 2 15000 16000
, Types.explorer = Just $ Types.Resources 2 15000 16000
}
, Types.host_volumes = Nothing
, Types.fetch_logs_ssh = False
}
, Types.aws = Types.ClusterAWS {
Types.instance_type = Types.ByNodeType {
Types.producer = "c5.2xlarge"
, Types.explorer = Just "m5.4xlarge"
}
, Types.use_public_routing = False
}
, Types.minimun_storage = Just $ Types.ByNodeType {
Types.producer = 12582912
, Types.explorer = Just 14155776
}
, Types.ssd_directory = Nothing
, Types.keep_running = False
}
}

--------------------------------------------------------------------------------

{--
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ data Profile = Profile
, generator :: Generator

, tracer :: Tracer
, cluster :: Cluster
, cluster :: Maybe Cluster
, analysis :: Analysis
, derived :: Derived
, cli_args :: CliArgs
Expand Down
2 changes: 1 addition & 1 deletion bench/cardano-profile/test/Main.hs
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ ciTestBage = Types.Profile {
}
, Types.tx_count = Just 9000
}
, Types.cluster = Types.Cluster {
, Types.cluster = Just $ Types.Cluster {
Types.nomad = Types.ClusterNomad {
Types.namespace = "default"
, Types.nomad_class = ""
Expand Down
29 changes: 1 addition & 28 deletions nix/workbench/profile/prof0-defaults.jq
Original file line number Diff line number Diff line change
Expand Up @@ -92,34 +92,7 @@ def era_defaults($era):
, withresources: false # enable resource tracing for cardano-tracer
}

, cluster:
{ nomad:
{ namespace: "default"
, class: ""
# As they will be used in the "group.*.resources" of the Nomad Job JSON.
, resources:
{ producer: {cores: 2, memory: 15000, memory_max: 16000}
, explorer: {cores: 2, memory: 15000, memory_max: 16000}
}
# Volumes like {source: "ssd1", destination: "/ssd1", read_only: false}
, host_volumes: null
, fetch_logs_ssh: false
}
, aws:
{ instance_type:
{ producer: "c5.2xlarge"
, explorer: "m5.4xlarge"
}
# "attr.unique.platform.aws.public-ipv4" to bind and service definition.
, use_public_routing: false
}
, minimun_storage:
{ producer: 12582912 # 12×1024×1024
, explorer: 14155776 # 13.5×1024×1024
}
, keep_running: false
, ssd_directory: null
}
, cluster: null
}

} | (.common * (.[$era] // {}));
3 changes: 3 additions & 0 deletions nix/workbench/profile/prof1-variants.jq
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ def all_profile_variants:
{ nomad:
{ namespace: "perf"
, class: "perf"
# As they will be used in the "group.*.resources" of the Nomad Job JSON.
, resources:
{
# Producer nodes use this specs, make sure they are available!
Expand All @@ -231,6 +232,7 @@ def all_profile_variants:
{ producer: "c5.2xlarge"
, explorer: "m5.4xlarge"
}
# "attr.unique.platform.aws.public-ipv4" to bind and service definition.
, use_public_routing: true
}
# We are requiring 10.5GB on the explorer node and 9GB on the others.
Expand All @@ -256,6 +258,7 @@ def all_profile_variants:
{ producer: {cores: 16, memory: 120000, memory_max: 124000}
, explorer: {cores: 16, memory: 120000, memory_max: 124000}
}
# Volumes like {source: "ssd1", destination: "/ssd1", read_only: false}
, host_volumes: [
{source: "ssd1", destination: "/ssd1", read_only: false}
, {source: "ssd2", destination: "/ssd2", read_only: false}
Expand Down

0 comments on commit 970c67a

Please sign in to comment.