diff --git a/community/app/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala b/community/app/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala index 9145517b3..4b26af1d8 100644 --- a/community/app/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala +++ b/community/app/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala @@ -36,6 +36,7 @@ import com.digitalasset.canton.time._ import com.digitalasset.canton.tracing.TracingConfig import com.typesafe.config.ConfigException.UnresolvedSubstitution import com.typesafe.config.{Config, ConfigException, ConfigFactory, ConfigRenderOptions} +import com.typesafe.scalalogging.LazyLogging import pureconfig._ import pureconfig.error.{CannotConvert, FailureReason} import pureconfig.generic.{DerivedConfigWriter, FieldCoproductHint, ProductHint} @@ -46,6 +47,7 @@ import scala.annotation.nowarn import scala.concurrent.duration._ import scala.jdk.DurationConverters._ import scala.reflect.ClassTag +import monocle.macros.syntax.lens._ /** Configuration for a check */ sealed trait CheckConfig @@ -120,10 +122,26 @@ final case class MonitoringConfig( metrics: MetricsConfig = MetricsConfig(), delayLoggingThreshold: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(20), tracing: TracingConfig = TracingConfig(), - logMessagePayloads: Boolean = false, + // TODO(i9014) remove (breaking change) + @Deprecated // use logging.api.messagePayloads instead + logMessagePayloads: Option[Boolean] = None, logQueryCost: Option[QueryCostMonitoringConfig] = None, logSlowFutures: Boolean = false, -) + logging: LoggingConfig = LoggingConfig(), +) extends LazyLogging { + + // merge in backwards compatible config options + def getLoggingConfig: LoggingConfig = (logMessagePayloads, logging.api.messagePayloads) match { + case (Some(fst), _) => + if (!logging.api.messagePayloads.forall(_ == fst)) + logger.error( + "Broken config validation: logging.api.message-payloads differs from logMessagePayloads" + ) + logging.focus(_.api.messagePayloads).replace(Some(fst)) + case _ => logging + } + +} /** Configuration for console command timeouts * @@ -304,7 +322,7 @@ trait CantonConfig { DomainNodeParameters( monitoring.tracing, monitoring.delayLoggingThreshold, - monitoring.logMessagePayloads, + monitoring.getLoggingConfig, monitoring.logQueryCost, parameters.enableAdditionalConsistencyChecks, features.enablePreviewCommands, @@ -329,7 +347,7 @@ trait CantonConfig { ParticipantNodeParameters( monitoring.tracing, monitoring.delayLoggingThreshold, - monitoring.logMessagePayloads, + monitoring.getLoggingConfig, monitoring.logQueryCost, parameters.enableAdditionalConsistencyChecks, features.enablePreviewCommands, @@ -793,6 +811,10 @@ object CantonConfig { lazy implicit val metricsConfigReader: ConfigReader[MetricsConfig] = deriveReader[MetricsConfig] lazy implicit val queryCostMonitoringConfigReader: ConfigReader[QueryCostMonitoringConfig] = deriveReader[QueryCostMonitoringConfig] + lazy implicit val apiLoggingConfigReader: ConfigReader[ApiLoggingConfig] = + deriveReader[ApiLoggingConfig] + lazy implicit val loggingConfigReader: ConfigReader[LoggingConfig] = + deriveReader[LoggingConfig] lazy implicit val monitoringConfigReader: ConfigReader[MonitoringConfig] = deriveReader[MonitoringConfig] lazy implicit val consoleCommandTimeoutReader: ConfigReader[ConsoleCommandTimeout] = @@ -1124,6 +1146,10 @@ object CantonConfig { lazy implicit val metricsConfigWriter: ConfigWriter[MetricsConfig] = deriveWriter[MetricsConfig] lazy implicit val queryCostMonitoringConfig: ConfigWriter[QueryCostMonitoringConfig] = deriveWriter[QueryCostMonitoringConfig] + lazy implicit val apiLoggingConfigWriter: ConfigWriter[ApiLoggingConfig] = + deriveWriter[ApiLoggingConfig] + lazy implicit val loggingConfigWriter: ConfigWriter[LoggingConfig] = + deriveWriter[LoggingConfig] lazy implicit val monitoringConfigWriter: ConfigWriter[MonitoringConfig] = deriveWriter[MonitoringConfig] lazy implicit val consoleCommandTimeoutWriter: ConfigWriter[ConsoleCommandTimeout] = diff --git a/community/app/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala b/community/app/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala index f8becd030..09afeb5e6 100644 --- a/community/app/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala +++ b/community/app/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala @@ -21,7 +21,12 @@ object CommunityConfigValidations extends ConfigValidations[CantonCommunityConfi type Validation = CantonCommunityConfig => ValidatedNel[String, Unit] override protected val validations: List[Validation] = - List[Validation](noDuplicateStorage, atLeastOneNode) + List[Validation](noDuplicateStorage, atLeastOneNode) ++ genericValidations[ + CantonCommunityConfig + ] + + private[config] def genericValidations[C <: CantonConfig]: List[C => ValidatedNel[String, Unit]] = + List(backwardsCompatibleLoggingConfig) /** Group node configs by db access to find matching db storage configs. * Overcomplicated types used are to work around that at this point nodes could have conflicting names so we can't just @@ -111,4 +116,22 @@ object CommunityConfigValidations extends ConfigValidations[CantonCommunityConfi } + /** Check that logging configs are backwards compatible but consistent */ + private def backwardsCompatibleLoggingConfig( + config: CantonConfig + ): ValidatedNel[String, Unit] = { + (config.monitoring.logMessagePayloads, config.monitoring.logging.api.messagePayloads) match { + case (Some(fst), Some(snd)) => + Validated.condNel( + fst == snd, + (), + backwardsCompatibleLoggingConfigErr, + ) + case _ => Valid + } + } + + private[config] val backwardsCompatibleLoggingConfigErr = + "Inconsistent configuration of canton.monitoring.log-message-payloads and canton.monitoring.logging.api.message-payloads. Please use the latter in your configuration" + } diff --git a/community/app/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala b/community/app/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala index 6b4a421aa..c64280cfd 100644 --- a/community/app/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala +++ b/community/app/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala @@ -973,19 +973,17 @@ trait ParticipantAdministration extends FeatureFlagFilter { @Help.Summary( "Test whether a participant is connected to and permissioned on a domain where we have a healthy subscription." ) - def active(domain: DomainAlias): Boolean = { + def active(domain: DomainAlias): Boolean = list_connected().find(_.domainAlias == domain) match { case Some(item) if item.healthy => topology.participant_domain_states.active(item.domainId, id) case _ => false } - } @Help.Summary( - "Test whether a participant is connected to and permissioned on a domain reference", - FeatureFlag.Testing, + "Test whether a participant is connected to and permissioned on a domain reference" ) - def active(reference: DomainAdministration): Boolean = check(FeatureFlag.Testing) { + def active(reference: DomainAdministration): Boolean = { val domainId = reference.id list_connected().find(_.domainId == domainId) match { case None => false @@ -994,12 +992,10 @@ trait ParticipantAdministration extends FeatureFlagFilter { } } @Help.Summary( - "Test whether a participant is connected to a domain reference", - FeatureFlag.Testing, + "Test whether a participant is connected to a domain reference" ) - def is_connected(reference: DomainAdministration): Boolean = check(FeatureFlag.Testing) { + def is_connected(reference: DomainAdministration): Boolean = list_connected().exists(_.domainId == reference.id) - } private def confirm_agreement(domainAlias: DomainAlias): Unit = { diff --git a/community/app/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala b/community/app/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala index ba12054e9..5a8ab70d7 100644 --- a/community/app/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala +++ b/community/app/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala @@ -49,10 +49,19 @@ class PartiesAdministrationGroup(runner: AdminCommandRunner, consoleEnvironment: "List active parties, their active participants, and the participants' permissions on domains." ) @Help.Description( - """This command allows you to deeply inspect the topology state used for synchronisation. - |The response is built from the timestamped topology transactions of each domain. - |The filterDomain parameter is used to filter the results by domain id; - |the result only contains entries whose domain id starts with `filterDomain`.""" + """Inspect the parties known by this participant as used for synchronisation. + |The response is built from the timestamped topology transactions of each domain, excluding the + |authorized store of the given node. For each known party, the list of active + |participants and their permission on the domain for that party is given. + | + filterParty: Filter by parties starting with the given string. + filterParticipant: Filter for parties that are hosted by a participant with an id starting with the given string + filterDomain: Filter by domains whose id starts with the given string. + asOf: Optional timestamp to inspect the topology state at a given point in time. + limit: Limit on the number of parties fetched (defaults to 100). + + Example: participant1.parties.list(filterParty="alice") + """ ) def list( filterParty: String = "", @@ -81,16 +90,31 @@ class ParticipantPartiesAdministrationGroup( consoleEnvironment: ConsoleEnvironment, ) extends PartiesAdministrationGroup(runner, consoleEnvironment) { - @Help.Summary("List parties managed by this participant") - @Help.Description("""The filterDomain parameter is used to filter the results by domain id; - |the result only contains entries whose domain id starts with `filterDomain`. - |Inactive participants hosting the party are not shown in the result.""") + @Help.Summary("List parties hosted by this participant") + @Help.Description("""Inspect the parties hosted by this participant as used for synchronisation. + |The response is built from the timestamped topology transactions of each domain, excluding the + |authorized store of the given node. The search will include all hosted parties and is equivalent + |to running the `list` method using the participant id of the invoking participant. + | + filterParty: Filter by parties starting with the given string. + filterDomain: Filter by domains whose id starts with the given string. + asOf: Optional timestamp to inspect the topology state at a given point in time. + limit: How many items to return. Defaults to 100. + + Example: participant1.parties.hosted(filterParty="alice")""") def hosted( filterParty: String = "", filterDomain: String = "", asOf: Option[Instant] = None, + limit: Int = 100, ): Seq[ListPartiesResult] = { - list(filterParty, filterParticipant = participantId.filterString, filterDomain, asOf) + list( + filterParty, + filterParticipant = participantId.filterString, + filterDomain = filterDomain, + asOf = asOf, + limit = limit, + ) } @Help.Summary("Enable/add party to participant") diff --git a/community/app/src/main/scala/com/digitalasset/canton/environment/Environment.scala b/community/app/src/main/scala/com/digitalasset/canton/environment/Environment.scala index 83d4aadb2..551b01b1c 100644 --- a/community/app/src/main/scala/com/digitalasset/canton/environment/Environment.scala +++ b/community/app/src/main/scala/com/digitalasset/canton/environment/Environment.scala @@ -318,7 +318,7 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing { protected def createParticipant( name: String, participantConfig: config.ParticipantConfigType, - ): ParticipantNodeBootstrap = + ): ParticipantNodeBootstrap = { participantNodeFactory .create( name, @@ -332,6 +332,7 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing { loggerFactory, ) .valueOr(err => throw new RuntimeException(s"Failed to create participant bootstrap: $err")) + } @VisibleForTesting protected def createDomain( diff --git a/community/app/src/pack/deployment/docker/README.md b/community/app/src/pack/deployment/docker/README.md deleted file mode 100644 index 3d0457918..000000000 --- a/community/app/src/pack/deployment/docker/README.md +++ /dev/null @@ -1,121 +0,0 @@ - -# Docker Compose Connect Setup - -This docker compose example allows you to run a dockerized version of daml -connect building blocks for a participant node including a local domain. So far, -it contains -- connect node with ledger api on 4011 -- a local domain (not exposed outside of the docker environment) -- navigator at http://localhost:4000 -- json api at http://localhost:4001 -- trigger service at http://localhost:4002 -- a postgres database at port 4032 - -## Configuring - -The deployment can be customized using the directory `data`. You can amend the Canton configuration using the -configuration mixin in `data/canton/config.canton` or the configuration file `data/canton/participant.conf`. However, -do not change the ports, as some node services depend on it. - -There are a few environment variables that control the versions used: -``` - CANTON_VERSION=latest - SDK_VERSION=1.18.1 - CANTON_TYPE=community -``` -You can set them before starting up to define which docker images will be used in your deployment. - -### Parties and Domain Connections - -The simplest way to define parties and domain connections is to configure them using environment variables. -You can allocate new parties by defining an environment variable (party hints separated using `;`) -``` - CANTON_ALLOCATE_PARTIES="alice;bob" -``` -and you can set domain connections (also separated using `;`) -``` - CANTON_CONNECT_DOMAINS="mydomain#http://localhost:10018" -``` - -Please note that the domain connections will get the priority equivalent to their -position in the environment string. Therefore, the most important domain should be mentioned last. -You need to separate the alias from the URL using '#'. - -Docker-compose will let you define environment variables using an `.env` file in the working directory. -However, be careful to not use quotes "" in such a file, as the quotes will be escaped and added to the -string. - -Parties and domain connections can also be configured in `data/canton/bootstrap.canton`. - -### Exposed Ports - -You can define the exposed ports using the environment variable `BASE_PORT`. The default value is 40. -As a result, the ports exposed on the host machine will be at `BASE_PORT + "x"`: -- Ledger Api at `BASE_PORT + 11, default 4011` -- Admin Api at `BASE_PORT + 12, default 4012` -- JSON Api at `BASE_PORT + 01, default 4001` -- Trigger Service at `BASE_PORT + 02, default 4002` -- Navigator at `BASE_PORT + 00, default 4000` -- Postgres at `BASE_PORT + 32, default 4032` - -This way, you can run several deployments on the same host. - -### Dars - -Dars will automatically be uploaded if placed in the directory `data/dars`. If you need to upload -a DAR on a running system, use the Canton console to connect to the participant and run -``` - myparticipant.dars.upload("") -``` - -### Static Content - -You can drop your static content to `data/static-content` and access it on the JSON API using `localhost:4001/static`. - -## Starting - -Enter the example directory where you find the `docker-compose.yml` and run the compose -commands there: `docker-compose up` - -Please note that the Docker user must be able to write to the `data` directory (and its subdirectories). -If you just need to fix the permissions for a demo, you can use ``chmod -R 777 data`` - -### Triggers - -In order to start triggers, you can use the small helper utilities: - -``` - ./utils/trigger_upload_dar.sh -``` - -and - -``` - ./utils/trigger_start.sh dars/CantonExamples.dar alice "testtrigger:digger" -``` - -### JSON Api - -There are a few jwt tokens generated on the fly for all local parties which can be used to access the JSON api. - -``` -curl -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $(cat shared/alice.jwt)" localhost:4001/v1/query -``` - -## Inspecting - -You can access the Canton console using the `bin/node-console.sh` script. - -## Resetting - -The postgres data is stored on the Docker pgdata volume. You need to wipe this Docker volume to reset your deployment. - -A quick and easy way to reset the entire deployment is to prune the volumes and containers: - -`docker container prune -f && docker volume prune -f` - -You can also remove everything, including any downloaded image: - -`docker system prune -a` - - diff --git a/community/app/src/pack/deployment/docker/bin/node-console.sh b/community/app/src/pack/deployment/docker/bin/node-console.sh deleted file mode 100755 index 701460197..000000000 --- a/community/app/src/pack/deployment/docker/bin/node-console.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -# Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -docker-compose exec connect.node ./bin/canton -c examples/03-advanced-configuration/remote/participant1.conf,examples/03-advanced-configuration/remote/domain1.conf diff --git a/community/app/src/pack/deployment/docker/data/canton/bootstrap.canton b/community/app/src/pack/deployment/docker/data/canton/bootstrap.canton deleted file mode 100644 index 49e767c55..000000000 --- a/community/app/src/pack/deployment/docker/data/canton/bootstrap.canton +++ /dev/null @@ -1,86 +0,0 @@ -// define your parties here (hint, display name) -val localParties : Seq[(String, String)] = Seq() - -// define your domain connections here -val domainConnections : Seq[(String, String, Int, Boolean)] = Seq() - -// ------------------------------------------------------ -// don't change unless you know what you are doing -// ------------------------------------------------------ -val logger = org.slf4j.LoggerFactory.getLogger("console") -val myparticipant = participants.local.head -console.set_command_timeout(3.minutes) - -logger.info("hello world!") -val myParties = (localParties ++ sys.env.getOrElse("CANTON_ALLOCATE_PARTIES", "").split(";").toList.filter(_.nonEmpty).map(x => (x,x))).map { - case (party, hint) => - val matching = myparticipant.parties.hosted().map(_.party.toLf).find(_.startsWith(party + "::")) - matching match { - case None => logger.info(s"Adding party $party") - val tt = myparticipant.parties.enable(party, Some(hint)) - (party, tt.toLf) - case Some(existing) => (party, existing) - } -}.toList - -val registered = myparticipant.domains.list_registered().map(_._1.domain.unwrap) -val domainsFromEnv = sys.env.getOrElse("CANTON_CONNECT_DOMAINS", "").split(";").map(_.split("#")).map(x => (x(0), x(1))).zipWithIndex.map(y => (y._1._1, y._1._2, y._2, true)).toList -(domainConnections ++ domainsFromEnv).foreach { - case (alias, url, priority, active) => - if(!registered.contains(alias) && active) { - myparticipant.domains.connect(alias, url, priority = priority) - } -} - -val baseDir = os.Path("data", base=os.pwd) -val dars = os.walk(baseDir).filter(x => x.ext == "dar").sorted -dars.foreach { dar => - logger.info(s"Uploading dar ${dar}") - myparticipant.dars.upload(dar.toString) -} - -// if there is a domain connection, check that it is running -if(myparticipant.domains.list_registered().nonEmpty) { - utils.retry_until_true { - myparticipant.domains.list_connected().nonEmpty - } - myparticipant.health.ping(myparticipant.id) -} - -// create jwt access token -import com.daml.ledger.api.auth.{CustomDamlJWTPayload, AuthServiceJWTCodec} -import com.daml.jwt.JwtSigner -import com.daml.jwt.domain.{DecodedJwt, Jwt} -import com.digitalasset.canton.util.TextFileUtil -def buildUnsafeJwtToken(participantId: String, applicationId: String, secret: String, admin: Boolean, readAs: List[String], actAs: List[String]): String = { - val payload = CustomDamlJWTPayload( - Some(participantId), - Some(participantId), - Some(applicationId), - Some(java.time.Instant.now().plusSeconds(31536000)), - admin = admin, - readAs = readAs, - actAs = actAs, - ) - // stolen from com.daml.ledger.api.auth.Main - val jwtPayload = AuthServiceJWTCodec.compactPrint(payload) - val jwtHeader = s"""{"alg": "HS256", "typ": "JWT"}""" - val signed: Jwt = JwtSigner.HMAC256 - .sign(DecodedJwt(jwtHeader, jwtPayload), secret) - .valueOr(err => throw new RuntimeException(err.message)) - signed.value -} - -myParties.foreach { - case (hint, pid) => - TextFileUtil.writeStringToFile( - new java.io.File(s"data/shared/${hint}.jwt"), - buildUnsafeJwtToken(myparticipant.id.uid.id.unwrap, "testing", "secret", true, List(pid), List(pid)) - ) -} - -TextFileUtil.writeStringToFile( - new java.io.File(s"data/shared/parties.txt"), - myParties.map(x => x._1 + " " + x._2).mkString("\n") + "\n" -) - diff --git a/community/app/src/pack/deployment/docker/data/canton/config.canton b/community/app/src/pack/deployment/docker/data/canton/config.canton deleted file mode 100644 index 1b069fefb..000000000 --- a/community/app/src/pack/deployment/docker/data/canton/config.canton +++ /dev/null @@ -1,6 +0,0 @@ -# empty config file for overrides - -# adjust the ledger api configuration -canton.participants.participant1.ledger-api.address=0.0.0.0 -# enable tracing -canton.monitoring.tracing.propagation = enabled diff --git a/community/app/src/pack/deployment/docker/data/canton/domain.conf b/community/app/src/pack/deployment/docker/data/canton/domain.conf deleted file mode 100644 index 057993b3b..000000000 --- a/community/app/src/pack/deployment/docker/data/canton/domain.conf +++ /dev/null @@ -1,16 +0,0 @@ -canton { - domains { - mydomain { - storage = ${_shared.storage} - storage.config.properties.databaseName = "mydomain" - public-api { - port = 10018 - address = 127.0.0.1 // default value if omitted - } - admin-api { - port = 10019 - address = 127.0.0.1 // default value if omitted - } - } - } -} diff --git a/community/app/src/pack/deployment/docker/data/canton/participant.conf b/community/app/src/pack/deployment/docker/data/canton/participant.conf deleted file mode 100644 index c76a5b7c5..000000000 --- a/community/app/src/pack/deployment/docker/data/canton/participant.conf +++ /dev/null @@ -1,16 +0,0 @@ -canton { - participants { - participant1 { - storage = ${_shared.storage} - storage.config.properties.databaseName = "participant1" - admin-api { - port = 10012 - address = 0.0.0.0 - } - ledger-api { - port = 10011 - address = 0.0.0.0 - } - } - } -} \ No newline at end of file diff --git a/community/app/src/pack/deployment/docker/data/docker-entrypoint-initdb.d/create-additional-dbs.sh b/community/app/src/pack/deployment/docker/data/docker-entrypoint-initdb.d/create-additional-dbs.sh deleted file mode 100755 index e316525ce..000000000 --- a/community/app/src/pack/deployment/docker/data/docker-entrypoint-initdb.d/create-additional-dbs.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -# Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -set -e - -psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL - CREATE DATABASE jsonapi; - CREATE DATABASE triggers; - CREATE DATABASE mydomain; - GRANT ALL PRIVILEGES ON DATABASE jsonapi TO canton; - GRANT ALL PRIVILEGES ON DATABASE triggers TO canton; - GRANT ALL PRIVILEGES ON DATABASE mydomain TO canton; -EOSQL diff --git a/community/app/src/pack/deployment/docker/data/logs/.gitignore b/community/app/src/pack/deployment/docker/data/logs/.gitignore deleted file mode 100644 index e69de29bb..000000000 diff --git a/community/app/src/pack/deployment/docker/data/shared/.gitignore b/community/app/src/pack/deployment/docker/data/shared/.gitignore deleted file mode 100644 index e69de29bb..000000000 diff --git a/community/app/src/pack/deployment/docker/data/static-content/index.html b/community/app/src/pack/deployment/docker/data/static-content/index.html deleted file mode 100644 index 92a8c093f..000000000 --- a/community/app/src/pack/deployment/docker/data/static-content/index.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - -

Hello World

- - diff --git a/community/app/src/pack/deployment/docker/data/utils/cleanup.sh b/community/app/src/pack/deployment/docker/data/utils/cleanup.sh deleted file mode 100755 index 3d971dca8..000000000 --- a/community/app/src/pack/deployment/docker/data/utils/cleanup.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -if [ ! -e "shared" ]; then - echo "ERROR. There is no shared directory. Please ensure that all the files this docker deployment needs are accessible" - pwd - ls -l - exit 1 -fi - -if [ ! -w "shared" ] || [ ! -w "logs" ]; then - echo "ERROR shared or logs directory are not writable. please change the permissions such that the docker process can write into these" - exit 1 -fi - -# if the connect node is running, don't do anything (assuming we are restarting some processes) -nc -z -v connect.node 10011 > /dev/null 2>&1 -ret=$? - -# remove access tokens from shared file (at startup) -if [ $ret -ne 0 ]; then - echo "connect node is not running. resetting shared directory" - rm -f shared/* - chmod 777 shared -else - echo "connect node is running, therefore i won't reset the shared directory" -fi - diff --git a/community/app/src/pack/deployment/docker/data/utils/navigator_dummy_config_remove_after_daml_1_9_0.conf b/community/app/src/pack/deployment/docker/data/utils/navigator_dummy_config_remove_after_daml_1_9_0.conf deleted file mode 100644 index e69de29bb..000000000 diff --git a/community/app/src/pack/deployment/docker/data/utils/postgresql.conf b/community/app/src/pack/deployment/docker/data/utils/postgresql.conf deleted file mode 100644 index 8d062313c..000000000 --- a/community/app/src/pack/deployment/docker/data/utils/postgresql.conf +++ /dev/null @@ -1,6 +0,0 @@ - -listen_addresses = '*' -logging_collector = on -log_directory = '/data/logs/postgresql/' -log_file_mode = 0644 -log_filename = 'postgresql-%Y-%m-%d-%H.log' \ No newline at end of file diff --git a/community/app/src/pack/deployment/docker/data/utils/sdk-logback.xml b/community/app/src/pack/deployment/docker/data/utils/sdk-logback.xml deleted file mode 100644 index 1b3ae4f4f..000000000 --- a/community/app/src/pack/deployment/docker/data/utils/sdk-logback.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - ${STDOUT_LOG_LEVEL:-INFO} - - - - ${LOG_FILE:-logs/daml.log} - true - - - ${LOG_FILE:-logs/daml.log}.%d{yyyy-MM-dd-HH}.gz - - 12 - - - %date [%thread] %-5level %logger{35} - %msg%n - - - - - - - - - - - - - diff --git a/community/app/src/pack/deployment/docker/data/utils/trigger_start.sh b/community/app/src/pack/deployment/docker/data/utils/trigger_start.sh deleted file mode 100755 index 71e77689a..000000000 --- a/community/app/src/pack/deployment/docker/data/utils/trigger_start.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -# Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -# this script starts a certain trigger -if [ -z $3 ]; then - echo "usage: $(basename $0) " - exit 1 -fi - -if [ ! -e shared/parties.txt ]; then - echo "missing shared/parties.txt, is the system running?" - exit 1 -fi - -function find_package_id { - local dar=$1 - daml damlc inspect $dar | grep "^package" | tail -n 1 | awk '{print $2}' -} - -function find_party { - local party=$1 - grep -E "^${party}" shared/parties.txt | tail -n 1 | awk '{print $2}' -} - -if [ ! -e $1 ]; then - echo "no such dar $1" - exit 1 -fi - -package_id=$(find_package_id $1) -party=$(find_party $2) - -if [ -z $party ]; then - echo "no such party: $2" - exit 1 -fi - - -curl \ - -X POST localhost:4002/v1/triggers \ - -H "Content-type: application/json" \ - -H "Accept: application/json" \ - -d "{\"triggerName\":\"${package_id}:$3\", \"party\": \"${party}\"}" diff --git a/community/app/src/pack/deployment/docker/data/utils/trigger_upload_dar.sh b/community/app/src/pack/deployment/docker/data/utils/trigger_upload_dar.sh deleted file mode 100755 index 964202502..000000000 --- a/community/app/src/pack/deployment/docker/data/utils/trigger_upload_dar.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -if [ -z $1 ]; then - echo "usage: $(basename $0) " - echo "upload dar to trigger service" - exit 1 -fi - -if [ ! -e $1 ]; then - echo "file $1 does not exist!" - exit 1 -fi - -curl -F "dar=@$1" localhost:4002/v1/packages diff --git a/community/app/src/pack/deployment/docker/data/utils/wait_for_file.sh b/community/app/src/pack/deployment/docker/data/utils/wait_for_file.sh deleted file mode 100755 index f56706c97..000000000 --- a/community/app/src/pack/deployment/docker/data/utils/wait_for_file.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -RETRIES=180 - -if [ -z "$1" ]; then - echo "usage: wait_for_file.sh file" - exit 1 -fi - -ret=0 -while [ ! -e $1 ]; do - let RETRIES=$RETRIES-1 - if [ $RETRIES -eq 0 ]; then - echo "file $1 did not appear after many retries. giving up." - exit 1 - fi - sleep 1 -done diff --git a/community/app/src/pack/deployment/docker/data/utils/wait_until_alive.sh b/community/app/src/pack/deployment/docker/data/utils/wait_until_alive.sh deleted file mode 100755 index eca17ddba..000000000 --- a/community/app/src/pack/deployment/docker/data/utils/wait_until_alive.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -if [ -z "$2" ]; then - echo "usage: wait_until_alive.sh host port" - exit 1 -fi - -ret=1 -while [ $ret -ne 0 ]; do - nc -z -v $1 $2 > /dev/null 2>&1 - ret=$? - if [ $ret -ne 0 ]; then - echo "process on $1 $2 is not yet up, trying again" - sleep 2 - fi -done -echo "process on $1 $2 appeared" diff --git a/community/app/src/pack/deployment/docker/docker-compose.yml b/community/app/src/pack/deployment/docker/docker-compose.yml deleted file mode 100644 index b11338697..000000000 --- a/community/app/src/pack/deployment/docker/docker-compose.yml +++ /dev/null @@ -1,121 +0,0 @@ -# -# Copyright 2022 Digital Asset (Switzerland) GmbH and/or its affiliates -# -# SPDX-License-Identifier: Apache-2.0 -# -version: '3' - -# ------------------------------------------------------------ -# demo participant deployment -# -# this deployment sets up -# - postgres database -# - canton participant -# - trigger service -# - json api -# ------------------------------------------------------------ - -volumes: - pgdata: - -services: - - postgres.database: - image: "postgres:11" - ports: - - "${BASE_PORT:-40}32:5432" - environment: - - POSTGRES_USER=canton - - POSTGRES_PASSWORD=supersafe - - POSTGRES_DB=participant1 - command: [ - "-c" , "config_file=/data/utils/postgresql.conf" - ] - volumes: - - pgdata:/var/lib/postgresql/data - - ./data/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d - - ./data:/data:cached - - connect.cleanup: - image: digitalasset/canton-${CANTON_TYPE:-open-source}:${CANTON_VERSION:-latest} - entrypoint: [] - command: sh ./utils/cleanup.sh - volumes: - - ./data/utils:/canton/utils - - ./data/shared:/canton/shared - - ./data/logs:/canton/logs - - connect.node: - image: digitalasset/canton-${CANTON_TYPE:-open-source}:${CANTON_VERSION:-latest} - ports: - - "${BASE_PORT:-40}11:10011" - - "${BASE_PORT:-40}12:10012" - tty: true - environment: - - POSTGRES_HOST=postgres.database - - POSTGRES_USER=canton - - POSTGRES_PASSWORD=supersafe - - CANTON_AUTO_APPROVE_AGREEMENTS=yes - - CANTON_ALLOCATE_PARTIES=${CANTON_ALLOCATE_PARTIES:-alice;bob} - - CANTON_CONNECT_DOMAINS=${CANTON_CONNECT_DOMAINS:-mydomain#http://localhost:10018} - - JAVA_OPTS="-Xmx3G" - command: ["daemon", "--log-profile=container", "--log-file-name=./data/logs/canton.log", - "-c" ,"data/canton/config.canton", - "-c" ,"examples/03-advanced-configuration/storage/postgres.conf", - "-c" , "${CANTON_CONFIG:-data/canton/participant.conf,data/canton/domain.conf}", - "--bootstrap=data/canton/bootstrap.canton"] - volumes: - - ./data:/canton/data - links: - - postgres.database - depends_on: - - postgres.database - - connect.cleanup - - connect.navigator: - image: digitalasset/daml-sdk:${SDK_VERSION:-2.0.0-snapshot.20220127.9042.0.4038d0a7} - environment: - - _JAVA_OPTIONS="-Dlogback.configurationFile=./data/utils/sdk-logback.xml" - - LOG_FILE=./data/logs/navigator.log - entrypoint: [] - command: sh -c "./data/utils/wait_for_file.sh data/shared/parties.txt && daml ledger navigator --host connect.node --port 10011 --port ${BASE_PORT:-40}00 -c ./data/utils/navigator_dummy_config_remove_after_daml_1_9_0.conf" - ports: - - "${BASE_PORT:-40}00:${BASE_PORT:-40}00" - volumes: - - ./data:/home/daml/data - depends_on: - - connect.node - links: - - connect.node - - connect.json: - image: digitalasset/daml-sdk:${SDK_VERSION:-2.0.0-snapshot.20220127.9042.0.4038d0a7} - ports: - - "${BASE_PORT:-40}01:4001" - environment: - - _JAVA_OPTIONS="-Dlogback.configurationFile=./data/utils/sdk-logback.xml" - - LOG_FILE=./data/logs/json.log - entrypoint: [] - command: sh -c "./data/utils/wait_for_file.sh data/shared/parties.txt && daml json-api --ledger-host connect.node --ledger-port 10011 --address 0.0.0.0 --port-file ./data/shared/json.port --http-port 4001 --allow-insecure-tokens --static-content \"prefix=static,directory=/home/daml/data/static-content\" --query-store-jdbc-config \"driver=org.postgresql.Driver,url=jdbc:postgresql://postgres.database:5432/jsonapi?&ssl=false,user=canton,password=supersafe,start-mode=create-and-start\"" - volumes: - - ./data:/home/daml/data - depends_on: - - connect.node - links: - - connect.node - - connect.trigger: - image: digitalasset/daml-sdk:${SDK_VERSION:-2.0.0-snapshot.20220127.9042.0.4038d0a7} - environment: - - _JAVA_OPTIONS="-Dlogback.configurationFile=./data/utils/sdk-logback.xml" - - LOG_FILE=./data/logs/triggers.log - ports: - - "${BASE_PORT:-40}02:4002" - entrypoint: [] - command: sh -c "./data/utils/wait_for_file.sh data/shared/parties.txt && daml trigger-service --ledger-host connect.node --ledger-port 10011 --address 0.0.0.0 --http-port 4002 --jdbc \"driver=org.postgresql.Driver,url=jdbc:postgresql://postgres.database:5432/triggers?&ssl=false,user=canton,password=supersafe\"" - volumes: - - ./data:/home/daml/data - depends_on: - - connect.node - links: - - connect.node diff --git a/community/app/src/pack/examples/01-simple-topology/simple-topology.conf b/community/app/src/pack/examples/01-simple-topology/simple-topology.conf index dc6de0858..4015e8eb5 100644 --- a/community/app/src/pack/examples/01-simple-topology/simple-topology.conf +++ b/community/app/src/pack/examples/01-simple-topology/simple-topology.conf @@ -18,6 +18,4 @@ canton { admin-api.port = 5019 } } - // enable ledger_api commands for our getting started guide - features.enable-testing-commands = yes } diff --git a/community/app/src/pack/examples/05-composability/composability.conf b/community/app/src/pack/examples/05-composability/composability.conf index a2f57c84e..66709f468 100644 --- a/community/app/src/pack/examples/05-composability/composability.conf +++ b/community/app/src/pack/examples/05-composability/composability.conf @@ -6,7 +6,7 @@ canton { } monitoring { tracing.propagation = enabled - log-message-payloads = true + logging.api.message-payloads = true } domains { iou { diff --git a/community/app/src/pack/examples/06-messaging/contact/daml.yaml b/community/app/src/pack/examples/06-messaging/contact/daml.yaml index 2502faf9e..72bafd7d3 100644 --- a/community/app/src/pack/examples/06-messaging/contact/daml.yaml +++ b/community/app/src/pack/examples/06-messaging/contact/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220324.9615.0.467b8fbb +sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad sandbox-options: - --wall-clock-time name: contact diff --git a/community/app/src/pack/examples/06-messaging/message/daml.yaml b/community/app/src/pack/examples/06-messaging/message/daml.yaml index 7373ebd48..74a8cb5bf 100644 --- a/community/app/src/pack/examples/06-messaging/message/daml.yaml +++ b/community/app/src/pack/examples/06-messaging/message/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220324.9615.0.467b8fbb +sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad sandbox-options: - --wall-clock-time name: message diff --git a/community/app/src/test/resources/documentation-snippets/logging-event-details.conf b/community/app/src/test/resources/documentation-snippets/logging-event-details.conf new file mode 100644 index 000000000..e7de12891 --- /dev/null +++ b/community/app/src/test/resources/documentation-snippets/logging-event-details.conf @@ -0,0 +1,10 @@ +canton.monitoring.logging { + event-details = true + api { + message-payloads = true + max-method-length = 1000 + max-message-lines = 10000 + max-string-length = 10000 + max-metadata-size = 10000 + } +} diff --git a/community/app/src/test/resources/documentation-snippets/storage-queue-size.conf b/community/app/src/test/resources/documentation-snippets/storage-queue-size.conf new file mode 100644 index 000000000..44c5d752f --- /dev/null +++ b/community/app/src/test/resources/documentation-snippets/storage-queue-size.conf @@ -0,0 +1 @@ +canton.participants.participant1.storage.config.queueSize = 10000 \ No newline at end of file diff --git a/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala b/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala index 5cba57705..b5b7cac99 100644 --- a/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala +++ b/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala @@ -40,12 +40,12 @@ class NodesTest extends AnyWordSpec with BaseTest with HasExecutionContext { override def enablePreviewFeatures: Boolean = ??? override def enableAdditionalConsistencyChecks: Boolean = ??? override def processingTimeouts: ProcessingTimeout = DefaultProcessingTimeouts.testing - override def logMessagePayloads: Boolean = ??? override def logQueryCost: Option[QueryCostMonitoringConfig] = ??? override def tracing: TracingConfig = ??? override def sequencerClient: SequencerClientConfig = ??? override def cachingConfigs: CachingConfigs = ??? override def nonStandardConfig: Boolean = ??? + override def loggingConfig: LoggingConfig = ??? } class TestNodeBootstrap extends CantonNodeBootstrap[TestNode] { override def name: InstanceName = ??? diff --git a/community/app/src/test/scripts/test-docker-compose.sh b/community/app/src/test/scripts/test-docker-compose.sh deleted file mode 100755 index 9933fc6e8..000000000 --- a/community/app/src/test/scripts/test-docker-compose.sh +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/bash -# Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 - -set -e - -cd $(dirname $0) -cd ../../../src/pack/deployment/docker - -export CANTON_VERSION=${CANTON_VERSION:-"dev"} -export BASE_PORT="80" - -# figure out what our current SDK version is -EXPECTED_VERSION=$(cat ../../../../../../project/project/DamlVersions.scala | grep "val version: String =" | awk '{print $5}' | tr -d '"') -if [ -z $EXPECTED_VERSION ]; then - echo "unable to determine expected version" - exit 1 -fi - -echo "Expected versions Daml: $EXPECTED_VERSION Canton: $CANTON_VERSION" - -# fixing permissions for docker on CI -cd data -mkdir -p shared -mkdir -p logs -chmod 777 shared logs -rm -f shared/* -cd .. - -# wipe any preexisting state from docker; primarily useful when running locally - not in CI -docker container prune -f && docker volume prune -f - -docker-compose -version - -# pull non canton images. canton images will be generated locally in CI. -docker-compose ps --services | grep -Ev 'connect.(node|cleanup)' | xargs docker-compose pull - -# if you need to deploy what is happening here, remove the `-d`. the test will fail but you will see the errors -# alternatively, have a look at the log artifacts -docker-compose up -d - -cd data - -echo "waiting for system to come up" -./utils/wait_for_file.sh shared/json.port -echo "json api appeared" - -# -------------------------------------------- -# test json api and full deployment by creating a contract -# -------------------------------------------- -function find_party { - local party=$1 - grep -E "^${party}" shared/parties.txt | tail -n 1 | awk '{print $2}' -} - -MYSTRING="My greatest test so far." - -party=$(find_party "alice") -cat > cmd.txt < tmp.txt - grep -q "${MYSTRING}" tmp.txt - ret=$? - if [ $ret -ne 0 ]; then - echo "contract not found yet" - let retries=$retries-1 - if [ $retries -eq 0 ]; then - echo "giving up!" - exit 1 - fi - fi - done - echo "contract has been found ..." - -} - -test_contract_presence 30 - -rm tmp.txt cmd.txt - -# ------------------------------------------------ -# check if navigator and trigger service are running -# ------------------------------------------------ -./utils/wait_until_alive.sh localhost ${BASE_PORT}00 -./utils/wait_until_alive.sh localhost ${BASE_PORT}02 -./utils/wait_until_alive.sh localhost ${BASE_PORT}11 -./utils/wait_until_alive.sh localhost ${BASE_PORT}32 - -cd ../ - -# -------------------------------------------------- -# check that we are running the right SDK version -# -------------------------------------------------- -USED_VERSION=$(docker-compose exec connect.navigator daml version | grep default | awk '{print $1}') -if [ "$USED_VERSION" != "$EXPECTED_VERSION" ]; then - echo -e "\033[0;31m ⚠️⚠️⚠️ WARNING: Until #8353 is addressed, docker-compose is still referring to SDK version $USED_VERSION, while the project is based on $EXPECTED_VERSION ⚠️⚠️⚠️ \033[0m" - # TODO(#8353): daml-repo ("split") releases contain neither the daml-sdk nor the daml-sdk docker images used by this test - # exit 1 -fi - - -docker-compose down - -# ------------------------------------------------ -# check if we can restart the system -# ------------------------------------------------ -docker-compose up -d - -cd data - -./utils/wait_for_file.sh shared/json.port - -./utils/wait_until_alive.sh localhost ${BASE_PORT}00 -./utils/wait_until_alive.sh localhost ${BASE_PORT}02 - -echo "checking if the contract is still there" -test_contract_presence 0 - -docker-compose down diff --git a/community/common/src/main/daml/CantonExamples/daml.yaml b/community/common/src/main/daml/CantonExamples/daml.yaml index eb2ef331f..df428e2df 100644 --- a/community/common/src/main/daml/CantonExamples/daml.yaml +++ b/community/common/src/main/daml/CantonExamples/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220324.9615.0.467b8fbb +sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad name: CantonExamples parties: - Alice diff --git a/community/common/src/main/scala/com/digitalasset/canton/config/ApiLoggingConfig.scala b/community/common/src/main/scala/com/digitalasset/canton/config/ApiLoggingConfig.scala new file mode 100644 index 000000000..fa21d6c86 --- /dev/null +++ b/community/common/src/main/scala/com/digitalasset/canton/config/ApiLoggingConfig.scala @@ -0,0 +1,43 @@ +// Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +import com.digitalasset.canton.logging.pretty.CantonPrettyPrinter + +/** Control logging of the ApiRequestLogger + * + * Every GRPC service invocation is logged through the ApiRequestLogger. This allows + * to monitor all incoming traffic to a node (ledger API, sequencer API, admin API). + * + * @param messagePayloads Indicates whether to log message payloads. (To be disabled in production!) + * Also applies to metadata. None is equivalent to false. + * @param maxMethodLength indicates how much to abbreviate the name of the called method. + * E.g. "com.digitalasset.canton.MyMethod" may get abbreviated to "c.d.c.MyMethod". + * The last token will never get abbreviated. + * @param maxMessageLines maximum number of lines to log for a message + * @param maxStringLength maximum number of characters to log for a string within a message + * @param maxMetadataSize maximum size of metadata + */ +final case class ApiLoggingConfig( + // TODO(i9014) change to boolean (breaking change) + messagePayloads: Option[Boolean] = None, + maxMethodLength: Int = ApiLoggingConfig.defaultMaxMethodLength, + maxMessageLines: Int = ApiLoggingConfig.defaultMaxMessageLines, + maxStringLength: Int = ApiLoggingConfig.defaultMaxStringLength, + maxMetadataSize: Int = ApiLoggingConfig.defaultMaxMetadataSize, +) { + + def logMessagePayloads: Boolean = messagePayloads.getOrElse(false) + + /** Pretty printer for logging event details */ + lazy val printer = new CantonPrettyPrinter(maxStringLength, maxMessageLines) + +} + +object ApiLoggingConfig { + val defaultMaxMethodLength: Int = 30 + val defaultMaxMessageLines: Int = 10 + val defaultMaxStringLength: Int = 20 + val defaultMaxMetadataSize: Int = 200 +} diff --git a/community/common/src/main/scala/com/digitalasset/canton/config/BatchAggregatorConfig.scala b/community/common/src/main/scala/com/digitalasset/canton/config/BatchAggregatorConfig.scala index 4d8c12beb..d17528d3d 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/config/BatchAggregatorConfig.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/config/BatchAggregatorConfig.scala @@ -22,7 +22,7 @@ object BatchAggregatorConfig { def apply( maximumInFlight: PositiveNumeric[Int] = BatchAggregatorConfig.defaultMaximumInFlight, maximumBatchSize: PositiveNumeric[Int] = BatchAggregatorConfig.defaultMaximumBatchSize, - ) = + ): Batching = Batching( maximumInFlight = maximumInFlight, maximumBatchSize = maximumBatchSize, diff --git a/community/common/src/main/scala/com/digitalasset/canton/config/LocalNodeConfig.scala b/community/common/src/main/scala/com/digitalasset/canton/config/LocalNodeConfig.scala index 240a0bffd..b0f715f79 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/config/LocalNodeConfig.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/config/LocalNodeConfig.scala @@ -36,7 +36,7 @@ trait LocalNodeParameters { def tracing: TracingConfig def delayLoggingThreshold: NonNegativeFiniteDuration def logQueryCost: Option[QueryCostMonitoringConfig] - def logMessagePayloads: Boolean + def loggingConfig: LoggingConfig def enableAdditionalConsistencyChecks: Boolean def enablePreviewFeatures: Boolean def processingTimeouts: ProcessingTimeout diff --git a/community/common/src/main/scala/com/digitalasset/canton/config/LoggingConfig.scala b/community/common/src/main/scala/com/digitalasset/canton/config/LoggingConfig.scala new file mode 100644 index 000000000..ff8d2718d --- /dev/null +++ b/community/common/src/main/scala/com/digitalasset/canton/config/LoggingConfig.scala @@ -0,0 +1,16 @@ +// Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.config + +/** Detailed logging configurations + * + * This section allows to configure additional data such as transaction details to be logged to the standard logback system + * + * @param api Configuration settings for the ApiRequestLogger + * @param eventDetails If set to true, we will log substantial details of internal messages being processed. To be disabled in production! + */ +final case class LoggingConfig( + api: ApiLoggingConfig = ApiLoggingConfig(), + eventDetails: Boolean = false, +) {} diff --git a/community/common/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala b/community/common/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala index 477088728..b7d946332 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala @@ -73,7 +73,7 @@ trait ServerConfig extends Product with Serializable { /** Use the configuration to instantiate the interceptors for this server */ def instantiateServerInterceptors( tracingConfig: TracingConfig, - logMessagePayloads: Boolean, + apiLoggingConfig: ApiLoggingConfig, metrics: MetricHandle.Factory, loggerFactory: NamedLoggerFactory, ): CantonServerInterceptors @@ -83,10 +83,10 @@ trait ServerConfig extends Product with Serializable { trait CommunityServerConfig extends ServerConfig { override def instantiateServerInterceptors( tracingConfig: TracingConfig, - logMessagePayloads: Boolean, + apiLoggingConfig: ApiLoggingConfig, metrics: MetricHandle.Factory, loggerFactory: NamedLoggerFactory, - ) = new CantonCommunityServerInterceptors(tracingConfig, logMessagePayloads, loggerFactory) + ) = new CantonCommunityServerInterceptors(tracingConfig, apiLoggingConfig, loggerFactory) } object ServerConfig { diff --git a/community/common/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala b/community/common/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala index aef67eb91..5c7f8433b 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/data/CantonTimestamp.scala @@ -57,11 +57,11 @@ case class CantonTimestamp(underlying: LfTimestamp) def +(duration: NonNegativeFiniteDuration): CantonTimestamp = plus(duration.unwrap) def -(duration: NonNegativeFiniteDuration): CantonTimestamp = minus(duration.unwrap) - def <=(other: CantonTimestampSecond): Boolean = this <= other.toTs - def <(other: CantonTimestampSecond): Boolean = this < other.toTs + def <=(other: CantonTimestampSecond): Boolean = this <= other.forgetSecond + def <(other: CantonTimestampSecond): Boolean = this < other.forgetSecond - def >=(other: CantonTimestampSecond): Boolean = this >= other.toTs - def >(other: CantonTimestampSecond): Boolean = this > other.toTs + def >=(other: CantonTimestampSecond): Boolean = this >= other.forgetSecond + def >(other: CantonTimestampSecond): Boolean = this > other.forgetSecond } object CantonTimestamp { diff --git a/community/common/src/main/scala/com/digitalasset/canton/data/CantonTimestampSecond.scala b/community/common/src/main/scala/com/digitalasset/canton/data/CantonTimestampSecond.scala index a26a27afb..a53f193a4 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/data/CantonTimestampSecond.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/data/CantonTimestampSecond.scala @@ -26,7 +26,7 @@ sealed abstract case class CantonTimestampSecond(underlying: LfTimestamp) require(microsOverSecond() == 0, s"Timestamp $underlying must be rounded to the second") - def toTs: CantonTimestamp = CantonTimestamp(underlying) + def forgetSecond: CantonTimestamp = CantonTimestamp(underlying) def plusSeconds(seconds: Long): CantonTimestampSecond = new CantonTimestampSecond(underlying.add(Duration.ofSeconds(seconds))) {} @@ -48,11 +48,11 @@ sealed abstract case class CantonTimestampSecond(underlying: LfTimestamp) underlying.add(Duration.ZERO.minus(duration.duration)) ) {} - def >(other: CantonTimestamp): Boolean = toTs > other - def >=(other: CantonTimestamp): Boolean = toTs >= other + def >(other: CantonTimestamp): Boolean = forgetSecond > other + def >=(other: CantonTimestamp): Boolean = forgetSecond >= other - def <(other: CantonTimestamp): Boolean = toTs < other - def <=(other: CantonTimestamp): Boolean = toTs <= other + def <(other: CantonTimestamp): Boolean = forgetSecond < other + def <=(other: CantonTimestamp): Boolean = forgetSecond <= other } diff --git a/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala b/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala index c0e0697c5..62d0052a7 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.environment import akka.actor.ActorSystem import cats.data.{EitherT, OptionT} import cats.syntax.option._ +import com.digitalasset.canton import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService import com.digitalasset.canton.config.RequireTypes.InstanceName import com.digitalasset.canton.config.{LocalNodeConfig, LocalNodeParameters, ProcessingTimeout} @@ -24,9 +25,22 @@ import com.digitalasset.canton.resource.StorageFactory import com.digitalasset.canton.store.IndexedStringStore import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology._ -import com.digitalasset.canton.topology.admin.grpc.GrpcInitializationService -import com.digitalasset.canton.topology.admin.v0.InitializationServiceGrpc -import com.digitalasset.canton.topology.store.{InitializationStore, TopologyStoreFactory} +import com.digitalasset.canton.topology.admin.grpc.{ + GrpcInitializationService, + GrpcTopologyAggregationService, + GrpcTopologyManagerReadService, + GrpcTopologyManagerWriteService, +} +import com.digitalasset.canton.topology.admin.v0.{ + InitializationServiceGrpc, + TopologyManagerWriteServiceGrpc, +} +import com.digitalasset.canton.topology.client.IdentityProvidingServiceClient +import com.digitalasset.canton.topology.store.{ + InitializationStore, + TopologyStore, + TopologyStoreFactory, +} import com.digitalasset.canton.topology.transaction._ import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext import com.digitalasset.canton.tracing.{NoTracing, TraceContext, TracerProvider} @@ -37,8 +51,8 @@ import io.grpc.protobuf.services.ProtoReflectionService import io.opentelemetry.api.trace.Tracer import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} -import scala.concurrent.{Future, blocking} import scala.concurrent.duration._ +import scala.concurrent.{Future, blocking} /** When a canton node is created it first has to obtain an identity before most of its services can be started. * This process will begin when `start` is called and will try to perform as much as permitted by configuration automatically. @@ -152,6 +166,7 @@ abstract class CantonNodeBootstrapBase[ val certificateGenerator = new X509CertificateGenerator(crypto, loggerFactory) protected val topologyStoreFactory = TopologyStoreFactory(storage, timeouts, loggerFactory) + protected val ips = new IdentityProvidingServiceClient() protected def isActive: Boolean @@ -170,7 +185,7 @@ abstract class CantonNodeBootstrapBase[ nodeMetrics, executionContext, loggerFactory, - parameterConfig.logMessagePayloads, + parameterConfig.loggingConfig.api, parameterConfig.tracing, ) @@ -191,12 +206,54 @@ abstract class CantonNodeBootstrapBase[ ) ) .addService(ProtoReflectionService.newInstance(), false) + .addService( + canton.topology.admin.v0.TopologyAggregationServiceGrpc + .bindService( + new GrpcTopologyAggregationService( + topologyStoreFactory.allNonDiscriminated, + ips, + loggerFactory, + ), + executionContext, + ) + ) + .addService( + canton.topology.admin.v0.TopologyManagerReadServiceGrpc + .bindService( + new GrpcTopologyManagerReadService( + topologyStoreFactory.allNonDiscriminated, + ips, + loggerFactory, + ), + executionContext, + ) + ) .build .start() (Lifecycle.toCloseableServer(server, logger, "AdminServer"), registry) } - protected def startWithStoredId(id: NodeId): EitherT[Future, String, Unit] = { + protected def startTopologyManagementWriteService[E <: CantonError]( + topologyManager: TopologyManager[E], + authorizedStore: TopologyStore, + ): Unit = { + adminServerRegistry + .addService( + TopologyManagerWriteServiceGrpc + .bindService( + new GrpcTopologyManagerWriteService( + topologyManager, + authorizedStore, + crypto.cryptoPublicStore, + loggerFactory, + ), + executionContext, + ) + ) + .discard + } + + protected def startWithStoredNodeId(id: NodeId): EitherT[Future, String, Unit] = { if (nodeId.compareAndSet(None, Some(id))) { logger.info(s"Resuming as existing instance with uid=${id}") @@ -254,7 +311,7 @@ abstract class CantonNodeBootstrapBase[ ) skipInitialization } - )(startWithStoredId) + )(startWithStoredNodeId) } yield { // if we're still not initialized and support a replica doing on our behalf, start a watcher to handle that happening if (getId.isEmpty && supportsReplicaInitialization) waitForReplicaInitialization() @@ -295,7 +352,11 @@ abstract class CantonNodeBootstrapBase[ override def onClosed(): Unit = blocking { synchronized { if (isRunningVar.getAndSet(false)) { - val stores = List(initializationStore, indexedStringStore, topologyStoreFactory) + val stores = List( + initializationStore, + indexedStringStore, + topologyStoreFactory, + ) val instances = List( Lifecycle.toCloseableOption(initializationWatcherRef.get()), adminServerRegistry, @@ -374,7 +435,7 @@ abstract class CantonNodeBootstrapBase[ logger.debug("A stored id has been found but the id has already been set so ignoring") } else { logger.info("Starting node as we have found a stored id") - startWithStoredId(id).value.foreach { + startWithStoredNodeId(id).value.foreach { case Left(error) => // if we are already successfully initialized likely this was just called twice due to a race between // the waiting and an initialize call diff --git a/community/common/src/main/scala/com/digitalasset/canton/error/CantonError.scala b/community/common/src/main/scala/com/digitalasset/canton/error/CantonError.scala index bdbc1ebbd..18b2cc7a7 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/error/CantonError.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/error/CantonError.scala @@ -7,7 +7,7 @@ import cats.data.NonEmptyList import com.daml.error._ import com.digitalasset.canton.logging.ErrorLoggingContext import com.google.rpc.error_details.ErrorInfo -import io.grpc.StatusRuntimeException +import io.grpc.{Status, StatusRuntimeException} import scala.util.Try import scala.util.matching.Regex @@ -72,6 +72,21 @@ trait BaseCantonError extends BaseError { /** The error code, usually passed in as implicit where the error class is defined */ def code: ErrorCode + + def rpcStatus( + overrideCode: Option[Status.Code] = None + )(implicit loggingContext: ErrorLoggingContext): com.google.rpc.status.Status = { + import scala.jdk.CollectionConverters._ + val status0: com.google.rpc.Status = code.asGrpcStatus(this) + val details: Seq[com.google.protobuf.Any] = status0.getDetailsList.asScala.toSeq + val detailsScalapb = details.map(com.google.protobuf.any.Any.fromJavaProto) + + com.google.rpc.status.Status( + overrideCode.map(_.value()).getOrElse(status0.getCode), + status0.getMessage, + detailsScalapb, + ) + } } object CantonErrorResource { diff --git a/community/common/src/main/scala/com/digitalasset/canton/error/TransactionError.scala b/community/common/src/main/scala/com/digitalasset/canton/error/TransactionError.scala index 56fb607d7..343af4a8f 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/error/TransactionError.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/error/TransactionError.scala @@ -14,8 +14,6 @@ import com.google.rpc.code.Code import com.google.rpc.status.{Status => RpcStatus} import io.grpc.Status -import scala.jdk.CollectionConverters._ - abstract class ErrorCodeWithEnum[T](id: String, category: ErrorCategory, val protoCode: T)(implicit parent: ErrorClass ) extends ErrorCode(id, category) { @@ -37,20 +35,6 @@ trait TransactionError extends BaseCantonError { def definiteAnswer: Boolean = false final override def definiteAnswerO: Option[Boolean] = Some(definiteAnswer) - - def rpcStatus( - overrideCode: Option[Status.Code] = None - )(implicit loggingContext: ErrorLoggingContext): RpcStatus = { - val status0: com.google.rpc.Status = code.asGrpcStatus(this) - val details: Seq[com.google.protobuf.Any] = status0.getDetailsList.asScala.toSeq - val detailsScalapb = details.map(com.google.protobuf.any.Any.fromJavaProto) - - com.google.rpc.status.Status( - overrideCode.map(_.value()).getOrElse(status0.getCode), - status0.getMessage, - detailsScalapb, - ) - } } /** Transaction errors are derived from BaseCantonError and need to be logged explicitly */ diff --git a/community/common/src/main/scala/com/digitalasset/canton/logging/pretty/CantonPrettyPrinter.scala b/community/common/src/main/scala/com/digitalasset/canton/logging/pretty/CantonPrettyPrinter.scala new file mode 100644 index 000000000..3e1f26c4b --- /dev/null +++ b/community/common/src/main/scala/com/digitalasset/canton/logging/pretty/CantonPrettyPrinter.scala @@ -0,0 +1,55 @@ +// Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.logging.pretty + +import com.digitalasset.canton.logging.pretty.Pretty.{ + DefaultEscapeUnicode, + DefaultIndent, + DefaultShowFieldNames, + DefaultWidth, +} +import com.google.protobuf.ByteString +import pprint.{PPrinter, Tree} + +/** Adhoc pretty printer to nicely print the full structure of a class that does not have an explicit pretty definition */ +class CantonPrettyPrinter(maxStringLength: Int, maxMessageLines: Int) { + + @SuppressWarnings(Array("org.wartremover.warts.Null")) + def printAdHoc(message: Any): String = + message match { + case null => "" + case product: Product => + pprinter(product).toString + case _: Any => + import com.digitalasset.canton.logging.pretty.Pretty._ + message.toString.limit(maxStringLength).toString + } + + private lazy val pprinter: PPrinter = PPrinter.BlackWhite.copy( + defaultWidth = DefaultWidth, + defaultHeight = maxMessageLines, + defaultIndent = DefaultIndent, + defaultEscapeUnicode = DefaultEscapeUnicode, + defaultShowFieldNames = DefaultShowFieldNames, + additionalHandlers = { + case _: ByteString => Tree.Literal("ByteString") + case s: String => + import com.digitalasset.canton.logging.pretty.Pretty._ + s.limit(maxStringLength).toTree + case Some(p) => + pprinter.treeify( + p, + escapeUnicode = DefaultEscapeUnicode, + showFieldNames = DefaultShowFieldNames, + ) + case Seq(single) => + pprinter.treeify( + single, + escapeUnicode = DefaultEscapeUnicode, + showFieldNames = DefaultShowFieldNames, + ) + }, + ) + +} diff --git a/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ApiRequestLogger.scala b/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ApiRequestLogger.scala index e409e6205..3e25a1a0c 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ApiRequestLogger.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/ApiRequestLogger.scala @@ -3,52 +3,36 @@ package com.digitalasset.canton.networking.grpc -import com.digitalasset.canton.logging.pretty.Pretty.{ - DefaultEscapeUnicode, - DefaultIndent, - DefaultShowFieldNames, - DefaultWidth, -} +import com.digitalasset.canton.config.ApiLoggingConfig import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import com.digitalasset.canton.util.ShowUtil._ import com.google.common.annotations.VisibleForTesting -import com.google.protobuf.ByteString import io.grpc.ForwardingServerCall.SimpleForwardingServerCall import io.grpc.ForwardingServerCallListener.SimpleForwardingServerCallListener import io.grpc.Status.Code._ import io.grpc._ -import pprint.{PPrinter, Tree} import java.util.concurrent.atomic.AtomicBoolean import scala.util.Try /** Server side interceptor that logs incoming and outgoing traffic. * - * @param logMessagePayloads Indicates whether to log message payloads. (To be disabled in production!) - * Also applies to metadata. - * @param maxMethodLength indicates how much to abbreviate the name of the called method. - * E.g. "com.digitalasset.canton.MyMethod" may get abbreviated to "c.d.c.MyMethod". - * The last token will never get abbreviated. - * @param maxMessageLines maximum number of lines to log for a message - * @param maxStringLength maximum number of characters to log for a string within a message - * @param maxMetadataSize maximum size of metadata + * @param config Configuration to tailor the output */ @SuppressWarnings(Array("org.wartremover.warts.Null")) class ApiRequestLogger( override protected val loggerFactory: NamedLoggerFactory, - logMessagePayloads: Boolean, - maxMethodLength: Int = 30, - maxMessageLines: Int = 10, - maxStringLength: Int = 20, - maxMetadataSize: Int = 200, + config: ApiLoggingConfig, ) extends ServerInterceptor with NamedLogging { @VisibleForTesting private[networking] val cancelled: AtomicBoolean = new AtomicBoolean(false) + private lazy val printer = config.printer + override def interceptCall[ReqT, RespT]( call: ServerCall[ReqT, RespT], headers: Metadata, @@ -61,7 +45,7 @@ class ApiRequestLogger( val method = call.getMethodDescriptor.getFullMethodName def createLogMessage(message: String): String = - show"Request ${method.readableLoggerName(maxMethodLength)} by ${sender.unquoted}: ${message.unquoted}" + show"Request ${method.readableLoggerName(config.maxMethodLength)} by ${sender.unquoted}: ${message.unquoted}" logger.trace(createLogMessage(s"received headers ${stringOfMetadata(headers)}"))( requestTraceContext @@ -205,59 +189,23 @@ class ApiRequestLogger( } } - private lazy val pprinter: PPrinter = PPrinter.BlackWhite.copy( - defaultWidth = DefaultWidth, - defaultHeight = maxMessageLines, - defaultIndent = DefaultIndent, - defaultEscapeUnicode = DefaultEscapeUnicode, - defaultShowFieldNames = DefaultShowFieldNames, - additionalHandlers = { - case _: ByteString => Tree.Literal("ByteString") - case s: String => - import com.digitalasset.canton.logging.pretty.Pretty._ - s.limit(maxStringLength).toTree - case Some(p) => - pprinter.treeify( - p, - escapeUnicode = DefaultEscapeUnicode, - showFieldNames = DefaultShowFieldNames, - ) - case Seq(single) => - pprinter.treeify( - single, - escapeUnicode = DefaultEscapeUnicode, - showFieldNames = DefaultShowFieldNames, - ) - }, - ) - @SuppressWarnings(Array("org.wartremover.warts.Product")) private def cutMessage(message: Any): String = - if (logMessagePayloads) { - message match { - case null => "" - case product: Product => - pprinter(product).toString - case _: Any => - import com.digitalasset.canton.logging.pretty.Pretty._ - message.toString.limit(maxStringLength).toString - } - } else { - "" - } + if (config.logMessagePayloads) printer.printAdHoc(message) + else "" private def stringOfTrailers(trailers: Metadata): String = - if (!logMessagePayloads || trailers == null || trailers.keys().isEmpty) { + if (!config.logMessagePayloads || trailers == null || trailers.keys().isEmpty) { "" } else { s"\n Trailers: ${stringOfMetadata(trailers)}" } private def stringOfMetadata(metadata: Metadata): String = - if (!logMessagePayloads || metadata == null) { + if (!config.logMessagePayloads || metadata == null) { "" } else { - metadata.toString.limit(maxMetadataSize).toString + metadata.toString.limit(config.maxMetadataSize).toString } private def enhance(status: Status): Status = { diff --git a/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala b/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala index 415655799..1834682bd 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/CantonCommunityServerInterceptors.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.networking.grpc +import com.digitalasset.canton.config.ApiLoggingConfig import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.tracing.{TraceContextGrpc, TracingConfig} import io.grpc.ServerInterceptors.intercept @@ -19,7 +20,7 @@ trait CantonServerInterceptors { class CantonCommunityServerInterceptors( tracingConfig: TracingConfig, - logMessagePayloads: Boolean, + apiLoggingConfig: ApiLoggingConfig, loggerFactory: NamedLoggerFactory, ) extends CantonServerInterceptors { private def interceptForLogging( @@ -27,7 +28,7 @@ class CantonCommunityServerInterceptors( withLogging: Boolean, ): ServerServiceDefinition = if (withLogging) { - intercept(service, new ApiRequestLogger(loggerFactory, logMessagePayloads)) + intercept(service, new ApiRequestLogger(loggerFactory, apiLoggingConfig)) } else { service } diff --git a/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala b/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala index 934bd5dec..57fd6c0ce 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala @@ -128,7 +128,7 @@ object CantonServerBuilder { nodeMetrics: MetricHandle.Factory, executor: Executor, loggerFactory: NamedLoggerFactory, - logMessagePayloads: Boolean, + apiLoggingConfig: ApiLoggingConfig, tracing: TracingConfig, ): CantonServerBuilder = { val builder = @@ -147,7 +147,7 @@ object CantonServerBuilder { new BaseBuilder( reifyBuilder(configureKeepAlive(config.keepAliveServer, builderWithSsl)), loggerFactory, - config.instantiateServerInterceptors(tracing, logMessagePayloads, nodeMetrics, loggerFactory), + config.instantiateServerInterceptors(tracing, apiLoggingConfig, nodeMetrics, loggerFactory), ) } diff --git a/community/common/src/main/scala/com/digitalasset/canton/package.scala b/community/common/src/main/scala/com/digitalasset/canton/package.scala index 184f4da8c..18d97d7e5 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/package.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/package.scala @@ -68,8 +68,8 @@ package object canton { type LfCreateCommand = LfCommand.CreateByTemplate val LfCreateCommand: LfCommand.CreateByTemplate.type = LfCommand.CreateByTemplate - type LfExerciseCommand = LfCommand.ExerciseTemplate - val LfExerciseCommand: LfCommand.ExerciseTemplate.type = LfCommand.ExerciseTemplate + type LfExerciseCommand = LfCommand.LenientExercise + val LfExerciseCommand: LfCommand.LenientExercise.type = LfCommand.LenientExercise type LfExerciseByKeyCommand = LfCommand.ExerciseTemplateByKey val LfExerciseByKeyCommand: LfCommand.ExerciseTemplateByKey.type = LfCommand.ExerciseTemplateByKey diff --git a/community/common/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala b/community/common/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala index 34aaf7fda..72975aa99 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala @@ -8,16 +8,14 @@ import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout} import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.resource.DbStorage.{Profile, RetryConfig} +import com.digitalasset.canton.resource.DbStorage.RetryConfig import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ResourceUtil import com.digitalasset.canton.util.retry.RetryEither -import com.digitalasset.canton.util.{LoggerUtil, ResourceUtil} import io.functionmeta.functionFullName import org.flywaydb.core.Flyway import org.flywaydb.core.api.FlywayException -import org.slf4j.event.Level import slick.jdbc.JdbcBackend.Database -import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton import slick.jdbc.hikaricp.HikariCPJdbcDataSource import slick.jdbc.{DataSourceJdbcDataSource, JdbcBackend, JdbcDataSource} @@ -211,161 +209,8 @@ trait DbMigrations { this: NamedLogging => def checkDbVersion( timeouts: ProcessingTimeout, standardConfig: Boolean, - )(implicit tc: TraceContext): Either[DbMigrations.Error, Unit] = { - - withDb { db => - logger.debug(s"Performing version checks") - val profile = DbStorage.profile(dbConfig) - val either: Either[DbMigrations.Error, Unit] = profile match { - - case Profile.Postgres(jdbc) => - val expectedPostgresVersions = Seq(10, 11) - val expectedPostgresVersionsStr = Seq(10, 11).mkString(" or ") - @SuppressWarnings(Array("org.wartremover.warts.TraversableOps")) - val maxPostgresVersion = expectedPostgresVersions.max - - // See https://www.postgresql.org/docs/9.1/sql-show.html - val query = sql"show server_version".as[String] - // Block on the query result, because `withDb` does not support running functions that return a - // future (at the time of writing). - val vector = timeouts.network.await(functionFullName)(db.run(query)) - val stringO = vector.headOption - val either = for { - versionString <- stringO.toRight(left = s"Could not read Postgres version") - // An example `versionString` is 12.9 (Debian 12.9-1.pgdg110+1) - majorVersion <- versionString - .split('.') - .headOption - .toRight(left = - s"Could not parse Postgres version string $versionString. Are you using the recommended Postgres version 11 ?" - ) - .flatMap(str => - Try(str.toInt).toEither.leftMap(exn => - s"Exception in parsing Postgres version string $versionString: $exn" - ) - ) - _unit <- { - if (expectedPostgresVersions.contains(majorVersion)) Right(()) - else if (majorVersion > maxPostgresVersion) { - val level = if (standardConfig) Level.WARN else Level.INFO - LoggerUtil.logAtLevel( - level, - s"Expected Postgres version $expectedPostgresVersionsStr but got higher version $versionString", - ) - Right(()) - } else - Left( - s"Expected Postgres version $expectedPostgresVersionsStr but got lower version $versionString" - ) - } - } yield () - either.leftMap(DbMigrations.DatabaseVersionError) - - case Profile.Oracle(jdbc) => - def checkOracleVersion(): Either[String, Unit] = { - - val expectedOracleVersion = 19 - val expectedOracleVersionPrefix = - " 19." // Leading whitespace is intentional, see the example bannerString - - // See https://docs.oracle.com/en/database/oracle/oracle-database/18/refrn/V-VERSION.html - val oracleVersionQuery = sql"select banner from v$$version".as[String].headOption - val stringO = timeouts.network.await(functionFullName)(db.run(oracleVersionQuery)) - stringO match { - case Some(bannerString) => - // An example `bannerString` is "Oracle Database 18c Express Edition Release 18.0.0.0.0 - Production" - if (bannerString.contains(expectedOracleVersionPrefix)) { - logger.debug( - s"Check for oracle version $expectedOracleVersion passed: using $bannerString" - ) - Right(()) - } else { - Left(s"Expected Oracle version $expectedOracleVersion but got $bannerString") - } - case None => - Left(s"Database version check failed: could not read Oracle version") - } - } - - // Checks that the NLS parameter `param` is set to one of the `expected` strings - // - The DB setting must be set - // - The session setting may be empty - def checkNlsParameter( - param: String, - expected: Seq[String], - ): Either[String, Unit] = { - def prettyExpected: String = - if (expected.size == 1) expected(0) - else s"one of ${expected.mkString(", ")}" - - logger.debug(s"Checking NLS parameter $param") - - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) - val queryDbSetting = - sql"SELECT value from nls_database_parameters where parameter=$param" - .as[String] - .headOption - val dbSettingO = - timeouts.network.await(functionFullName + s"-database-$param")(db.run(queryDbSetting)) - - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) - val querySessionSetting = - sql"SELECT value from nls_session_parameters where parameter=$param" - .as[String] - .headOption - val sessionSettingO = timeouts.network.await(functionFullName + s"-session-$param")( - db.run(querySessionSetting) - ) - - for { - // Require to find the setting for the database, but leave it optional for the session - dbSetting <- dbSettingO.toRight( - s"Oracle NLS database parameter $param is not set, but should be $prettyExpected" - ) - _ <- Either.cond( - expected.contains(dbSetting.toUpperCase), - logger.debug(s"NLS database parameter $param is set to $dbSetting"), - s"Oracle NLS database parameter $param is $dbSetting, but should be $prettyExpected", - ) - - _ <- sessionSettingO.fold( - Either.right[String, Unit](logger.debug(s"NLS session parameter $param is unset")) - ) { sessionSetting => - Either.cond( - expected.contains(sessionSetting.toUpperCase), - logger.debug(s"NLS session parameter $param is set to $sessionSetting"), - s"Oracle NLS session parameter $param is $sessionSetting, but should be $prettyExpected", - ) - } - } yield () - } - - // Check the NLS settings of the database so that Oracle uses the expected encodings and collations for - // string fields in tables. - def checkOracleNlsSetting(): Either[String, Unit] = - for { - _ <- checkNlsParameter("NLS_CHARACTERSET", Seq("AL32UTF8")) - _ <- checkNlsParameter("NLS_NCHAR_CHARACTERSET", Seq("AL32UTF8", "AL16UTF16")) - _ <- checkNlsParameter("NLS_SORT", Seq("BINARY")) - _ <- checkNlsParameter("NLS_COMP", Seq("BINARY")) - } yield () - - for { - _ <- checkOracleVersion().leftMap(DbMigrations.DatabaseVersionError) - _ <- checkOracleNlsSetting().leftMap(DbMigrations.DatabaseConfigError) - } yield () - case Profile.H2(_) => - // We don't perform version checks for H2 - Right(()) - } - if (standardConfig) either - else - either.leftFlatMap { error => - logger.info(error.toString) - Right(()) - } - } - } + )(implicit tc: TraceContext): Either[DbMigrations.Error, Unit] = + withDb(DbVersionCheck.dbVersionCheck(timeouts, standardConfig, dbConfig)) private def checkPendingMigrationInternal( flyway: Flyway diff --git a/community/common/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala b/community/common/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala new file mode 100644 index 000000000..ab8671185 --- /dev/null +++ b/community/common/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala @@ -0,0 +1,187 @@ +// Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import cats.syntax.either._ +import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout} +import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.resource.DbStorage.Profile +import com.digitalasset.canton.util.LoggerUtil +import io.functionmeta.functionFullName +import org.slf4j.event.Level +import slick.jdbc.JdbcBackend.Database +import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton + +import scala.util.Try + +object DbVersionCheck { + + def dbVersionCheck( + timeouts: ProcessingTimeout, + standardConfig: Boolean, + dbConfig: DbConfig, + )(implicit + elc: ErrorLoggingContext + ): Database => Either[DbMigrations.Error, Unit] = { + val logger = elc.logger + implicit val tc = elc.traceContext + + { db => + logger.debug(s"Performing version checks") + val profile = DbStorage.profile(dbConfig) + val either: Either[DbMigrations.Error, Unit] = profile match { + + case Profile.Postgres(jdbc) => + val expectedPostgresVersions = Seq(10, 11, 12, 13, 14) + val expectedPostgresVersionsStr = + s"${(expectedPostgresVersions.dropRight(1)).mkString(", ")}, or ${expectedPostgresVersions + .takeRight(1) + .mkString("")}" + @SuppressWarnings(Array("org.wartremover.warts.TraversableOps")) + val maxPostgresVersion = expectedPostgresVersions.max + + // See https://www.postgresql.org/docs/9.1/sql-show.html + val query = sql"show server_version".as[String] + // Block on the query result, because `withDb` does not support running functions that return a + // future (at the time of writing). + val vector = timeouts.network.await(functionFullName)(db.run(query)) + val stringO = vector.headOption + val either = for { + versionString <- stringO.toRight(left = s"Could not read Postgres version") + // An example `versionString` is 12.9 (Debian 12.9-1.pgdg110+1) + majorVersion <- versionString + .split('.') + .headOption + .toRight(left = + s"Could not parse Postgres version string $versionString. Are you using the recommended Postgres version 11 ?" + ) + .flatMap(str => + Try(str.toInt).toEither.leftMap(exn => + s"Exception in parsing Postgres version string $versionString: $exn" + ) + ) + _unit <- { + if (expectedPostgresVersions.contains(majorVersion)) Right(()) + else if (majorVersion > maxPostgresVersion) { + val level = if (standardConfig) Level.WARN else Level.INFO + LoggerUtil.logAtLevel( + level, + s"Expected Postgres version $expectedPostgresVersionsStr but got higher version $versionString", + ) + Right(()) + } else + Left( + s"Expected Postgres version $expectedPostgresVersionsStr but got lower version $versionString" + ) + } + } yield () + either.leftMap(DbMigrations.DatabaseVersionError) + + case Profile.Oracle(jdbc) => + def checkOracleVersion(): Either[String, Unit] = { + + val expectedOracleVersion = 19 + val expectedOracleVersionPrefix = + " 19." // Leading whitespace is intentional, see the example bannerString + + // See https://docs.oracle.com/en/database/oracle/oracle-database/18/refrn/V-VERSION.html + val oracleVersionQuery = sql"select banner from v$$version".as[String].headOption + val stringO = timeouts.network.await(functionFullName)(db.run(oracleVersionQuery)) + stringO match { + case Some(bannerString) => + // An example `bannerString` is "Oracle Database 18c Express Edition Release 18.0.0.0.0 - Production" + if (bannerString.contains(expectedOracleVersionPrefix)) { + logger.debug( + s"Check for oracle version $expectedOracleVersion passed: using $bannerString" + ) + Right(()) + } else { + Left(s"Expected Oracle version $expectedOracleVersion but got $bannerString") + } + case None => + Left(s"Database version check failed: could not read Oracle version") + } + } + + // Checks that the NLS parameter `param` is set to one of the `expected` strings + // - The DB setting must be set + // - The session setting may be empty + def checkNlsParameter( + param: String, + expected: Seq[String], + ): Either[String, Unit] = { + def prettyExpected: String = + if (expected.size == 1) expected(0) + else s"one of ${expected.mkString(", ")}" + + logger.debug(s"Checking NLS parameter $param") + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + val queryDbSetting = + sql"SELECT value from nls_database_parameters where parameter=$param" + .as[String] + .headOption + val dbSettingO = + timeouts.network.await(functionFullName + s"-database-$param")(db.run(queryDbSetting)) + + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + val querySessionSetting = + sql"SELECT value from nls_session_parameters where parameter=$param" + .as[String] + .headOption + val sessionSettingO = timeouts.network.await(functionFullName + s"-session-$param")( + db.run(querySessionSetting) + ) + + for { + // Require to find the setting for the database, but leave it optional for the session + dbSetting <- dbSettingO.toRight( + s"Oracle NLS database parameter $param is not set, but should be $prettyExpected" + ) + _ <- Either.cond( + expected.contains(dbSetting.toUpperCase), + logger.debug(s"NLS database parameter $param is set to $dbSetting"), + s"Oracle NLS database parameter $param is $dbSetting, but should be $prettyExpected", + ) + + _ <- sessionSettingO.fold( + Either.right[String, Unit](logger.debug(s"NLS session parameter $param is unset")) + ) { sessionSetting => + Either.cond( + expected.contains(sessionSetting.toUpperCase), + logger.debug(s"NLS session parameter $param is set to $sessionSetting"), + s"Oracle NLS session parameter $param is $sessionSetting, but should be $prettyExpected", + ) + } + } yield () + } + + // Check the NLS settings of the database so that Oracle uses the expected encodings and collations for + // string fields in tables. + def checkOracleNlsSetting(): Either[String, Unit] = + for { + _ <- checkNlsParameter("NLS_CHARACTERSET", Seq("AL32UTF8")) + _ <- checkNlsParameter("NLS_NCHAR_CHARACTERSET", Seq("AL32UTF8", "AL16UTF16")) + _ <- checkNlsParameter("NLS_SORT", Seq("BINARY")) + _ <- checkNlsParameter("NLS_COMP", Seq("BINARY")) + } yield () + + for { + _ <- checkOracleVersion().leftMap(DbMigrations.DatabaseVersionError) + _ <- checkOracleNlsSetting().leftMap(DbMigrations.DatabaseConfigError) + } yield () + case Profile.H2(_) => + // We don't perform version checks for H2 + Right(()) + } + if (standardConfig) either + else + either.leftFlatMap { error => + logger.info(error.toString) + Right(()) + } + } + } + +} diff --git a/community/common/src/main/scala/com/digitalasset/canton/resource/Storage.scala b/community/common/src/main/scala/com/digitalasset/canton/resource/Storage.scala index 6e1142ed5..8672ad908 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/resource/Storage.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/resource/Storage.scala @@ -191,6 +191,11 @@ trait DbStorage extends Storage with FlagCloseable { self: NamedLogging => case _ => s"limit $numberOfItems" + (if (skipItems != 0L) s" offset $skipItems" else "") } + /** Automatically performs #$ interpolation for a call to `limit` */ + def limitSql(numberOfItems: Int, skipItems: Long = 0L): SQLActionBuilder = { + sql" #${limit(numberOfItems, skipItems)} " + } + def metrics: DbStorageMetrics lazy val api: profile.DbStorageAPI.type = profile.DbStorageAPI diff --git a/community/common/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala b/community/common/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala index 6cd5aac1f..61ff4481c 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala @@ -14,6 +14,7 @@ import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.config.{ KeepAliveClientConfig, + LoggingConfig, ProcessingTimeout, TestingConfigInternal, } @@ -21,6 +22,7 @@ import com.digitalasset.canton.crypto.{Crypto, CryptoPureApi, SyncCryptoClient} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.Lifecycle.toCloseableOption import com.digitalasset.canton.lifecycle._ +import com.digitalasset.canton.logging.pretty.CantonPrettyPrinter import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.SequencerClientMetrics import com.digitalasset.canton.networking.grpc.ClientChannelBuilder @@ -156,6 +158,7 @@ class SequencerClient( metrics: SequencerClientMetrics, recorderO: Option[SequencerClientRecorder], replayEnabled: Boolean, + loggingConfig: LoggingConfig, val loggerFactory: NamedLoggerFactory, initialCounter: Option[SequencerCounter] = None, )(implicit executionContext: ExecutionContext, tracer: Tracer) @@ -190,6 +193,9 @@ class SequencerClient( private val receivedEvents: BlockingQueue[OrdinarySerializedEvent] = new ArrayBlockingQueue[OrdinarySerializedEvent](config.eventInboxSize.unwrap) + private lazy val printer = + new CantonPrettyPrinter(loggingConfig.api.maxStringLength, loggingConfig.api.maxMessageLines) + /** returns true if the sequencer subscription is healthy */ def subscriptionIsHealthy: Boolean = currentSubscription.get().exists(x => !x.isDegraded) @@ -296,6 +302,11 @@ class SequencerClient( maxSequencingTime, timestampOfSigningKey, ) + if (loggingConfig.eventDetails) { + logger.debug( + s"About to send async batch ${printer.printAdHoc(batch)} as request ${printer.printAdHoc(request)}" + ) + } span.setAttribute("member", member.show) span.setAttribute("message_id", messageId.unwrap) @@ -991,6 +1002,7 @@ object SequencerClient { replayConfigForMember: Member => Option[ReplayConfig], metrics: SequencerClientMetrics, futureSupervisor: FutureSupervisor, + loggingConfig: LoggingConfig, loggerFactory: NamedLoggerFactory, supportedProtocolVersions: Seq[ProtocolVersion], minimumProtocolVersion: Option[ProtocolVersion], @@ -1068,6 +1080,7 @@ object SequencerClient { metrics, recorderO, replayConfigForMember(member).isDefined, + loggingConfig, loggerFactory, ) } diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/Identifier.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/Identifier.scala index f25fbd50e..8dbd3feff 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/Identifier.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/Identifier.scala @@ -173,4 +173,15 @@ object UniqueIdentifier { fromProtoPrimitive_(uid).valueOr(err => throw new DbDeserializationException(s"Failed to parse a unique ID $uid: $err") ) + + /** Split an uid filter into the two subparts */ + def splitFilter(filter: String, append: String = ""): (String, String) = { + val items = filter.split(SafeSimpleString.delimiter) + val prefix = items(0) + if (items.lengthCompare(1) > 0) { + val suffix = items(1) + (prefix ++ append, suffix ++ append) + } else (prefix ++ append, append) + } + } diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyAggregationService.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyAggregationService.scala index f10078f42..789404364 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyAggregationService.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyAggregationService.scala @@ -5,6 +5,8 @@ package com.digitalasset.canton.topology.admin.grpc import cats.data.EitherT import cats.syntax.traverse._ + +import com.digitalasset.canton.util.MonadUtil import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.error.CantonError import com.google.protobuf.timestamp.{Timestamp => ProtoTimestamp} @@ -74,42 +76,53 @@ class GrpcTopologyAggregationService( ) } - private def reorganise( - fetched: List[(DomainId, Map[PartyId, Map[ParticipantId, ParticipantAttributes]])] - ): Map[PartyId, Map[ParticipantId, Map[DomainId, ParticipantPermission]]] = { - val tmp = fetched - .flatMap { case (domainId, res) => - res.flatMap { case (partyId, participants) => - participants.map { case (participantId, relationship) => - (domainId, partyId, (participantId, relationship.permission)) - } + private def findMatchingParties( + clients: List[(DomainId, StoreBasedTopologySnapshot)], + filterParty: String, + filterParticipant: String, + limit: Int, + ): Future[Set[PartyId]] = MonadUtil + .foldLeftM((Set.empty[PartyId], false), clients) { case ((res, isDone), (_, client)) => + if (isDone) Future.successful((res, true)) + else + client.inspectKnownParties(filterParty, filterParticipant, limit).map { found => + val tmp = found ++ res + if (tmp.size >= limit) (tmp.take(limit), true) else (tmp, false) } - } - groupBySnd(tmp).map { case (k, v) => - val tmp = groupBySnd(v.map { case (a, (b, c)) => - (a, b, c) - }).map { case (k2, v2) => - (k2, v2.toMap) - } - (k, tmp) } - } + .map(_._1) + + private def findParticipants( + clients: List[(DomainId, StoreBasedTopologySnapshot)], + partyId: PartyId, + ): Future[Map[ParticipantId, Map[DomainId, ParticipantPermission]]] = + clients + .flatTraverse { case (domainId, client) => + client + .activeParticipantsOf(partyId.toLf) + .map(_.map { case (participantId, attributes) => + (domainId, participantId, attributes.permission) + }.toList) + } + .map(_.groupBy { case (_, participantId, _) => participantId }.map { case (k, v) => + (k, v.map { case (domain, _, permission) => (domain, permission) }.toMap) + }) override def listParties(request: v0.ListPartiesRequest): Future[v0.ListPartiesResponse] = TraceContext.fromGrpcContext { implicit traceContext => + val v0.ListPartiesRequest(asOfP, limit, filterDomain, filterParty, filterParticipant) = + request val res: EitherT[Future, CantonError, v0.ListPartiesResponse] = for { - matched <- snapshots(request.filterDomain, request.asOf) - res <- EitherT.right(matched.traverse { case (storeId, client) => - client - .inspectKnownParties(request.filterParty, request.filterParticipant, request.limit) - .map { res => - (storeId, res) - } + matched <- snapshots(filterDomain, asOfP) + parties <- EitherT.right( + findMatchingParties(matched, filterParty, filterParticipant, limit) + ) + results <- EitherT.right(parties.toList.traverse { partyId => + findParticipants(matched, partyId).map(res => (partyId, res)) }) } yield { - val mapped = reorganise(res) v0.ListPartiesResponse( - results = mapped.map { case (partyId, participants) => + results = results.map { case (partyId, participants) => v0.ListPartiesResponse.Result( party = partyId.toProtoPrimitive, participants = participants.map { case (participantId, domains) => @@ -124,7 +137,7 @@ class GrpcTopologyAggregationService( ) }.toSeq, ) - }.toSeq + } ) } EitherTUtil.toFuture(mapErrNew(res)) diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala index bc34438b5..140c22199 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala @@ -268,7 +268,7 @@ private class ForwardingTopologySnapshotClient( filterParty: String, filterParticipant: String, limit: Int, - ): Future[Map[PartyId, Map[ParticipantId, ParticipantAttributes]]] = + ): Future[Set[PartyId]] = parent.inspectKnownParties(filterParty, filterParticipant, limit) override def findUnvettedPackagesOrDependencies( @@ -405,7 +405,7 @@ class CachingTopologySnapshot( filterParty: String, filterParticipant: String, limit: Int, - ): Future[Map[PartyId, Map[ParticipantId, ParticipantAttributes]]] = + ): Future[Set[PartyId]] = parent.inspectKnownParties(filterParty, filterParticipant, limit) /** returns the list of currently known mediators */ diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala index 784ad74aa..f7b27e7c1 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala @@ -262,7 +262,7 @@ trait PartyTopologySnapshotClient { filterParty: String, filterParticipant: String, limit: Int, - ): Future[Map[PartyId, Map[ParticipantId, ParticipantAttributes]]] + ): Future[Set[PartyId]] } diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClient.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClient.scala index 544ca6bab..11795bf5d 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClient.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClient.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.topology.client import cats.data.EitherT import cats.syntax.functorFilter._ import cats.syntax.list._ -import cats.syntax.traverse._ import cats.syntax.functor._ import com.daml.lf.data.Ref.PackageId import com.digitalasset.canton.config.ProcessingTimeout @@ -693,35 +692,8 @@ class StoreBasedTopologySnapshot( filterParty: String, filterParticipant: String, limit: Int, - ): Future[Map[PartyId, Map[ParticipantId, ParticipantAttributes]]] = - findTransactions( - asOfInclusive = false, - includeSecondary = false, - types = Seq( - DomainTopologyTransactionType.PartyToParticipant, - DomainTopologyTransactionType.ParticipantState, - ), - filterUid = None, - filterNamespace = None, - ) - .flatMap { col => - val parties = col.toIdentityState - .collect { - case TopologyStateUpdateElement(_, ParticipantState(_, _, participant, _, _)) => - participant.adminParty - case TopologyStateUpdateElement(_, PartyToParticipant(_, party, _, _)) => party - } - .filter { party => - party.filterString.startsWith(filterParty) - } - .take(limit) - parties - .traverse { party => - activeParticipantsOf(party.toLf) - .map(res => (party, res.filter(_._1.filterString.startsWith(filterParticipant)))) - } // FIXME(i7397): This will filter out disabled participants, breaking the limit clause. - .map(_.toMap) - } + ): Future[Set[PartyId]] = + store.inspectKnownParties(timestamp, filterParty, filterParticipant, limit) override private[client] def loadUnvettedPackagesOrDependencies( participant: ParticipantId, diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala index 993276fb9..366e046d7 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala @@ -244,7 +244,9 @@ class TopologyTransactionProcessor( validated <- validatedF _ <- incrementalF _ <- cascadingF // does synchronize storeF - filtered = validated._2.filter(_.rejectionReason.isEmpty).map(_.transaction) + filtered = validated._2.collect { + case transaction if transaction.rejectionReason.isEmpty => transaction.transaction + } _ <- listeners.toList.traverse( _.observed( sequencingTimestamp, @@ -437,9 +439,23 @@ class TopologyTransactionProcessor( filterNamespace = Some(namespaces), ) ) - targetFiltered = target.signedTransactions.filter(tx => - cascadingFilter(tx) && authValidator.isCurrentlyAuthorized(tx) - ) + + targetFiltered = target.signedTransactions.filter { tx => + lazy val isDomainGovernance = tx.transaction.element match { + case _: TopologyStateUpdateElement => false + case _: DomainGovernanceElement => true + } + + /* + We check that the transaction is properly authorized or is a domain governance. + This allows not to drop domain governance transactions with cascading updates. + In the scenario where a key authorizes a domain parameters change and is later + revoked, the domain parameters stay valid. + */ + val isAuthorized = authValidator.isCurrentlyAuthorized(tx) || isDomainGovernance + + cascadingFilter(tx) && isAuthorized + } current <- performUnlessClosingF( store diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala index a550e082e..2f4ce9760 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala @@ -377,6 +377,13 @@ abstract class TopologyStore(implicit ec: ExecutionContext) extends AutoCloseabl traceContext: TraceContext ): Future[StoredTopologyTransactions[TopologyChangeOp]] + def inspectKnownParties( + timestamp: CantonTimestamp, + filterParty: String, + filterParticipant: String, + limit: Int, + )(implicit traceContext: TraceContext): Future[Set[PartyId]] + /** find active topology transactions * * active / state means that for the key authorizing the transaction, there is a connected path to reach the root certificate diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala index 0a80d3c5c..b5a3468be 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala @@ -15,7 +15,7 @@ import com.digitalasset.canton.config.RequireTypes.{ String255, String300, } -import com.digitalasset.canton.crypto.PublicKey +import com.digitalasset.canton.crypto.{Fingerprint, PublicKey} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.Lifecycle import com.digitalasset.canton.logging.NamedLoggerFactory @@ -469,6 +469,46 @@ class DbTopologyStore( ): Future[StoredTopologyTransactions[TopologyChangeOp]] = queryForTransactions(transactionStoreIdName, sql"") + @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) + override def inspectKnownParties( + timestamp: CantonTimestamp, + filterParty: String, + filterParticipant: String, + limit: Int, + )(implicit + traceContext: TraceContext + ): Future[Set[PartyId]] = { + val p2pm = DomainTopologyTransactionType.PartyToParticipant + val pdsm = DomainTopologyTransactionType.ParticipantState + val (filterPartyIdentifier, filterPartyNamespace) = + UniqueIdentifier.splitFilter(filterParty, "%") + val (filterParticipantIdentifier, filterParticipantNamespace) = + UniqueIdentifier.splitFilter(filterParticipant, "%") + val limitS = storage.limit(limit) + val query = + sql""" + SELECT identifier, namespace FROM topology_transactions WHERE store_id = $stateStoreIdFilterName + AND valid_from < $timestamp AND (valid_until IS NULL OR $timestamp <= valid_until) + AND ( + (transaction_type = $p2pm AND identifier LIKE $filterPartyIdentifier AND namespace LIKE $filterPartyNamespace + AND secondary_identifier LIKE $filterParticipantIdentifier AND secondary_namespace LIKE $filterParticipantNamespace) + OR (transaction_type = $pdsm AND identifier LIKE $filterPartyIdentifier AND namespace LIKE $filterPartyNamespace + AND identifier LIKE $filterParticipantIdentifier AND namespace LIKE $filterParticipantNamespace) + ) AND ignore_reason IS NULL GROUP BY (identifier, namespace) #${limitS}""" + readTime.metric.event { + storage + .query( + query.as[ + (String, String) + ], + functionFullName, + ) + .map(_.map { case (id, ns) => + PartyId(UniqueIdentifier(Identifier.tryCreate(id), Namespace(Fingerprint.tryCreate(ns)))) + }.toSet) + } + } + /** query optimized for inspection */ override def inspect( stateStore: Boolean, @@ -514,12 +554,10 @@ class DbTopologyStore( else if (namespaceOnly) { query2 ++ sql" AND namespace LIKE ${idFilter + "%"}" } else { - val splitted = idFilter.split(SafeSimpleString.delimiter) - val prefix = splitted(0) - val tmp = query2 ++ sql" AND identifier like ${prefix + "%"} " - if (splitted.lengthCompare(1) > 0) { - val suffix = splitted(1) - tmp ++ sql" AND namespace like ${suffix + "%"} " + val (prefix, suffix) = UniqueIdentifier.splitFilter(idFilter, "%") + val tmp = query2 ++ sql" AND identifier like $prefix " + if (suffix.sizeCompare(1) > 0) { + tmp ++ sql" AND namespace like $suffix " } else tmp } diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala index 8a7fe3020..7dd12b3c6 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStore.scala @@ -399,6 +399,38 @@ class InMemoryTopologyStore(val loggerFactory: NamedLoggerFactory)(implicit ec: Future.unit } + override def inspectKnownParties( + timestamp: CantonTimestamp, + filterParty: String, + filterParticipant: String, + limit: Int, + )(implicit traceContext: TraceContext): Future[Set[PartyId]] = { + def filter(entry: TopologyStoreEntry[Positive]): Boolean = { + // active + entry.from < timestamp && entry.until.forall(until => timestamp <= until) && + // not rejected + entry.rejected.isEmpty && + // matches either a party to participant mapping (with appropriate filters) + ((entry.transaction.uniquePath.dbType == DomainTopologyTransactionType.PartyToParticipant && + entry.transaction.uniquePath.maybeUid.exists(_.toProtoPrimitive.startsWith(filterParty)) && + entry.secondaryUid.exists(_.toProtoPrimitive.startsWith(filterParticipant))) || + // or matches a participant with appropriate filters + (entry.transaction.uniquePath.dbType == DomainTopologyTransactionType.ParticipantState && + entry.transaction.uniquePath.maybeUid + .exists(_.toProtoPrimitive.startsWith(filterParty)) && + entry.transaction.uniquePath.maybeUid + .exists(_.toProtoPrimitive.startsWith(filterParticipant)))) + } + val topologyStateStoreSeq = blocking(synchronized(topologyStateStore.toSeq)) + Future.successful( + topologyStateStoreSeq + .foldLeft(Set.empty[PartyId]) { + case (acc, elem) if acc.size >= limit || !filter(elem) => acc + case (acc, elem) => elem.transaction.uniquePath.maybeUid.fold(acc)(x => acc + PartyId(x)) + } + ) + } + /** query optimized for inspection */ override def inspect( stateStore: Boolean, @@ -525,4 +557,5 @@ class InMemoryTopologyStore(val loggerFactory: NamedLoggerFactory)(implicit ec: }) override def close(): Unit = () + } diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala index 50575dc16..1db42f9ab 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala @@ -41,7 +41,9 @@ sealed trait TopologyStateUpdateMapping extends TopologyMapping sealed trait DomainGovernanceMapping extends TopologyMapping { def domainId: DomainId - override def uniquePath(_elementId: TopologyElementId): UniquePath = + override def uniquePath( + _elementId: TopologyElementId + ): UniquePathSignedDomainGovernanceTransaction = UniquePathSignedDomainGovernanceTransaction(domainId.unwrap, dbType) } diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala index 49863aa05..6029abe56 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransaction.scala @@ -180,7 +180,7 @@ final case class DomainGovernanceElement(mapping: DomainGovernanceMapping) prettyOfClass(param("id", _.id), param("mapping", _.mapping)) lazy val id: TopologyElementId = TopologyElementId(mapping.domainId.toLengthLimitedString) - lazy val uniquePath: UniquePath = + lazy val uniquePath: UniquePathSignedDomainGovernanceTransaction = mapping.uniquePath(id) // TODO(Rafael): id is not used for the path ; improve API? } diff --git a/community/common/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala b/community/common/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala index 1217b103c..2e909f37c 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala @@ -72,8 +72,8 @@ object Policy { flagCloseable, maxRetries = Int.MaxValue, retryInterval, - operationName, - actionable, + operationName = operationName, + actionable = Some(actionable), ).unlessShutdown(FutureUnlessShutdown.outcomeF(task), AllExnRetryable)( Success.always, executionContext, @@ -85,6 +85,7 @@ abstract class RetryWithDelay( logger: TracedLogger, operationName: String, longDescription: String, + actionable: Option[String], // How to mitigate the error initialDelay: FiniteDuration, totalMaxRetries: Int, flagCloseable: FlagCloseable, @@ -93,6 +94,8 @@ abstract class RetryWithDelay( private val complainAfterRetries: Int = 10 + private val actionableMessage: String = actionable.map(" " + _).getOrElse("") + protected def nextDelay(nextCount: Int, delay: FiniteDuration): FiniteDuration /** A [[com.digitalasset.canton.util.retry.Success]] criteria is supplied @@ -232,7 +235,7 @@ abstract class RetryWithDelay( invocationP.trySuccess(retryP.future) LoggerUtil.logAtLevel( level, - s"Now retrying operation '$operationName'. $longDescription", + s"Now retrying operation '$operationName'. $longDescription$actionableMessage", ) // Run the task again on the normal execution context as the task might take a long time. // `performUnlessClosingF` guards against closing the execution context. @@ -306,11 +309,14 @@ abstract class RetryWithDelay( run(runTask(), 0, NoErrorKind, 0, initialDelay) } - private def messageOfOutcome(outcome: Try[Any], consequence: String): String = outcome match { + private def messageOfOutcome( + outcome: Try[Any], + consequence: String, + ): String = outcome match { case util.Success(result) => s"The operation '$operationName' was not successful. $consequence Result: $result. $longDescription" case Failure(_) => - s"The operation '$operationName' has failed with an exception. $consequence $longDescription" + s"The operation '$operationName' has failed with an exception. $consequence $longDescription$actionableMessage" } @SuppressWarnings(Array("org.wartremover.warts.Null")) @@ -357,6 +363,7 @@ case class Directly( logger, operationName, longDescription, + None, Duration.Zero, maxRetries, flagCloseable, @@ -374,11 +381,13 @@ case class Pause( delay: FiniteDuration, operationName: String, longDescription: String = "", + actionable: Option[String] = None, retryLogLevel: Option[Level] = None, ) extends RetryWithDelay( logger, operationName, longDescription, + actionable, delay, maxRetries, flagCloseable, @@ -427,12 +436,14 @@ case class Backoff( maxDelay: Duration, operationName: String, longDescription: String = "", + actionable: Option[String] = None, retryLogLevel: Option[Level] = None, )(implicit jitter: Jitter = Jitter.full(maxDelay)) extends RetryWithDelay( logger, operationName, longDescription, + actionable, initialDelay, maxRetries, flagCloseable, diff --git a/community/common/src/test/scala/com/digitalasset/canton/networking/grpc/ApiRequestLoggerTest.scala b/community/common/src/test/scala/com/digitalasset/canton/networking/grpc/ApiRequestLoggerTest.scala index 4f6474fc5..9dcbe95d3 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/networking/grpc/ApiRequestLoggerTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/networking/grpc/ApiRequestLoggerTest.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.networking.grpc +import com.digitalasset.canton.config.ApiLoggingConfig import com.digitalasset.canton.domain.api.v0.HelloServiceGrpc.HelloService import com.digitalasset.canton.domain.api.v0.{Hello, HelloServiceGrpc} import com.digitalasset.canton.logging.{NamedEventCapturingLogger, TracedLogger} @@ -171,9 +172,11 @@ class ApiRequestLoggerTest extends AnyWordSpec with BaseTest with HasExecutionCo val apiRequestLogger: ApiRequestLogger = new ApiRequestLogger( capturingLogger, - logMessagePayloads, - maxStringLength = maxStringLenth, - maxMetadataSize = maxMetadataSize, + config = ApiLoggingConfig( + messagePayloads = Some(logMessagePayloads), + maxStringLength = maxStringLenth, + maxMetadataSize = maxMetadataSize, + ), ) val server: Server = InProcessServerBuilder diff --git a/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala b/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala index 95cf3a02a..f00c0082d 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala @@ -11,6 +11,7 @@ import com.digitalasset.canton._ import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.config.{ DefaultProcessingTimeouts, + LoggingConfig, ProcessingTimeout, TestingConfigInternal, } @@ -726,6 +727,7 @@ class SequencerClientTest extends AsyncWordSpec with BaseTest with HasExecutorSe CommonMockMetrics.sequencerClient, None, false, + LoggingConfig(), loggerFactory, )(executionContext, tracer) val signedEvents = storedEvents.map(SequencerTestUtils.sign) diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala index 5e9ddb196..026645e15 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala @@ -484,6 +484,22 @@ class TestingOwnerWithKeys( TrustLevel.Ordinary, ) ) + val p2p1 = mkAdd( + PartyToParticipant( + RequestSide.Both, + PartyId(UniqueIdentifier(Identifier.tryCreate("one"), Namespace(key1.id))), + participant1, + ParticipantPermission.Submission, + ) + ) + val p2p2 = mkAdd( + PartyToParticipant( + RequestSide.Both, + PartyId(UniqueIdentifier(Identifier.tryCreate("two"), Namespace(key1.id))), + participant1, + ParticipantPermission.Submission, + ) + ) private val defaultDomainParameters = TestDomainParameters.defaultDynamic diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala index 621c6b7b3..b02f4a1b2 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala @@ -53,7 +53,7 @@ class PartyTopologySnapshotClientTest extends AsyncWordSpec with BaseTest { filterParty: String, filterParticipant: String, limit: Int, - ): Future[Map[PartyId, Map[ParticipantId, ParticipantAttributes]]] = + ): Future[Set[PartyId]] = ??? override def activeParticipantsOfParties( diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala index 3506e3664..72b067952 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessorTest.scala @@ -178,7 +178,15 @@ class TopologyTransactionProcessorTest st2 <- fetch(store, ts(1).immediateSuccessor) } yield { validate(st1, block1) - validate(st2, List(ns1k1_k1)) // dmp1_k2 is revoked because the ns delegation is revoked + + /* + dmp1_k2 is not revoked + Domain governance transaction are not removed by cascading updates. The + idea behind is that the change of domain parameters is authorized and then + the new parameters stay valid even if the authorizing key is revoked. That + also ensures that we always have some domain parameters set. + */ + validate(st2, List(ns1k1_k1, dmp1_k2)) } } diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala index 199ae5626..e009f4471 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala @@ -995,6 +995,21 @@ trait TopologyStoreTest } yield res } + "inspection for parties" in { + val store = mk() + val ts = CantonTimestamp.Epoch + for { + _ <- store.updateState(ts, deactivate = Seq(), positive = List(ps1, p2p1, p2p2)) + res <- store.inspectKnownParties(ts.immediateSuccessor, "one", "", 100) + res2 <- store.inspectKnownParties(ts.immediateSuccessor, "", "", 1) + empty1 <- store.inspectKnownParties(ts.immediateSuccessor, "three", "", 100) + } yield { + empty1 shouldBe empty + res.toSeq should have length (1) + res2.toSeq should have length (1) + } + } + } "using watermarks" when { diff --git a/community/demo/src/main/daml/ai-analysis/daml.yaml b/community/demo/src/main/daml/ai-analysis/daml.yaml index d506f7ebf..c8c5f6940 100644 --- a/community/demo/src/main/daml/ai-analysis/daml.yaml +++ b/community/demo/src/main/daml/ai-analysis/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220324.9615.0.467b8fbb +sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad name: ai-analysis parties: - Alice diff --git a/community/demo/src/main/daml/bank/daml.yaml b/community/demo/src/main/daml/bank/daml.yaml index 2805952ba..9c89213f0 100644 --- a/community/demo/src/main/daml/bank/daml.yaml +++ b/community/demo/src/main/daml/bank/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220324.9615.0.467b8fbb +sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad name: bank parties: - Alice diff --git a/community/demo/src/main/daml/doctor/daml.yaml b/community/demo/src/main/daml/doctor/daml.yaml index 024d0c406..b35808209 100644 --- a/community/demo/src/main/daml/doctor/daml.yaml +++ b/community/demo/src/main/daml/doctor/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220324.9615.0.467b8fbb +sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad name: doctor parties: - Alice diff --git a/community/demo/src/main/daml/health-insurance/daml.yaml b/community/demo/src/main/daml/health-insurance/daml.yaml index 23b871f31..cd2c6a76c 100644 --- a/community/demo/src/main/daml/health-insurance/daml.yaml +++ b/community/demo/src/main/daml/health-insurance/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220324.9615.0.467b8fbb +sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad name: health-insurance parties: - Alice diff --git a/community/demo/src/main/daml/medical-records/daml.yaml b/community/demo/src/main/daml/medical-records/daml.yaml index 8798f5f42..cb5e6287b 100644 --- a/community/demo/src/main/daml/medical-records/daml.yaml +++ b/community/demo/src/main/daml/medical-records/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220324.9615.0.467b8fbb +sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad name: medical-records parties: - Alice diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/DomainNodeBootstrap.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/DomainNodeBootstrap.scala index 9eeeef681..a30ee6751 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/DomainNodeBootstrap.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/DomainNodeBootstrap.scala @@ -9,7 +9,6 @@ import cats.data.EitherT import cats.syntax.either._ import cats.syntax.traverse._ import com.daml.error.ErrorGroup -import com.digitalasset.canton import com.digitalasset.canton.concurrent.{ ExecutionContextIdlenessExecutorService, FutureSupervisor, @@ -56,17 +55,12 @@ import com.digitalasset.canton.store.SequencerCounterTrackerStore import com.digitalasset.canton.store.db.SequencerClientDiscriminator import com.digitalasset.canton.time.{Clock, HasUptime} import com.digitalasset.canton.topology.TopologyManagerError.DomainErrorGroup -import com.digitalasset.canton.topology.{DomainId, _} -import com.digitalasset.canton.topology.admin.grpc.{ - GrpcTopologyAggregationService, - GrpcTopologyManagerReadService, - GrpcTopologyManagerWriteService, -} import com.digitalasset.canton.topology.client._ import com.digitalasset.canton.topology.processing.TopologyTransactionProcessor import com.digitalasset.canton.topology.store.StoredTopologyTransactions import com.digitalasset.canton.topology.store.TopologyStoreId.{AuthorizedStore, DomainStore} import com.digitalasset.canton.topology.transaction._ +import com.digitalasset.canton.topology._ import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext import com.digitalasset.canton.tracing.{NoTracing, TraceContext} import com.digitalasset.canton.util.Thereafter.syntax.ThereafterOps @@ -115,8 +109,6 @@ class DomainNodeBootstrap( @SuppressWarnings(Array("org.wartremover.warts.Var")) var topologyManager: Option[DomainTopologyManager] = None - private val ips = new IdentityProvidingServiceClient() - override protected def autoInitializeIdentity(): EitherT[Future, String, Unit] = withNewTraceContext { implicit traceContext => for { @@ -241,43 +233,7 @@ class DomainNodeBootstrap( loggerFactory, ) topologyManager = Some(manager) - - adminServerRegistry - .addService( - canton.topology.admin.v0.TopologyManagerReadServiceGrpc - .bindService( - new GrpcTopologyManagerReadService( - topologyStoreFactory.allNonDiscriminated, - ips, - loggerFactory, - ), - executionContext, - ) - ) - .addService( - canton.topology.admin.v0.TopologyManagerWriteServiceGrpc - .bindService( - new GrpcTopologyManagerWriteService( - manager, - manager.store, - crypto.cryptoPublicStore, - loggerFactory, - ), - executionContext, - ) - ) - .addService( - canton.topology.admin.v0.TopologyAggregationServiceGrpc - .bindService( - new GrpcTopologyAggregationService( - topologyStoreFactory.allNonDiscriminated, - ips, - loggerFactory, - ), - executionContext, - ) - ) - + startTopologyManagementWriteService(manager, manager.store) manager } diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainConfig.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainConfig.scala index 208d0db30..77519fac5 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainConfig.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainConfig.scala @@ -70,7 +70,7 @@ case class CommunityPublicServerConfig( case class DomainNodeParameters( tracing: TracingConfig, delayLoggingThreshold: NonNegativeFiniteDuration, - logMessagePayloads: Boolean, + loggingConfig: LoggingConfig, logQueryCost: Option[QueryCostMonitoringConfig], enableAdditionalConsistencyChecks: Boolean, enablePreviewFeatures: Boolean, diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/DomainNodeSequencerClientFactory.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/DomainNodeSequencerClientFactory.scala index 860f7f8d5..22e9a6ee1 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/DomainNodeSequencerClientFactory.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/DomainNodeSequencerClientFactory.scala @@ -100,6 +100,7 @@ class DomainNodeSequencerClientFactory( Domain.replaySequencerConfig.get().lift(member).map(Domain.defaultReplayPath(member)), clientMetrics, futureSupervisor, + cantonParameterConfig.loggingConfig, clientLoggerFactory, supportedProtocolVersions = ProtocolVersion.supportedProtocolsDomain, minimumProtocolVersion = None, diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/PublicGrpcServerInitialization.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/PublicGrpcServerInitialization.scala index 232840954..1d1d5018c 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/PublicGrpcServerInitialization.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/initialization/PublicGrpcServerInitialization.scala @@ -46,7 +46,7 @@ object PublicGrpcServerInitialization { metrics, executionContext, loggerFactory, - cantonParameterConfig.logMessagePayloads, + cantonParameterConfig.loggingConfig.api, cantonParameterConfig.tracing, ) // Overriding the dummy setting from PublicServerConfig. diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntime.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntime.scala index 6a3b5c773..d864f89c7 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntime.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntime.scala @@ -224,6 +224,7 @@ class SequencerRuntime( metrics.sequencerClient, None, replayEnabled = false, + localNodeParameters.loggingConfig, loggerFactory, snapshot.flatMap(_.heads.get(sequencerId).map(_ + 1)), ) diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala index bd1f28f0a..ce1579bb8 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala @@ -10,6 +10,7 @@ import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.config.{ DefaultProcessingTimeouts, + LoggingConfig, ProcessingTimeout, TestingConfigInternal, } @@ -166,6 +167,7 @@ case class Env(loggerFactory: NamedLoggerFactory)(implicit _ => None, CommonMockMetrics.sequencerClient, FutureSupervisor.Noop, + LoggingConfig(), loggerFactory, ProtocolVersion.supportedProtocolsParticipant, Some(ProtocolVersion.latestForTest), diff --git a/community/participant/src/main/daml/daml.yaml b/community/participant/src/main/daml/daml.yaml index c87d30473..1dffcf133 100644 --- a/community/participant/src/main/daml/daml.yaml +++ b/community/participant/src/main/daml/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220324.9615.0.467b8fbb +sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad name: AdminWorkflows source: AdminWorkflows.daml version: 2.1.0 diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala index 0c0bda8e0..7e57b1819 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala @@ -12,6 +12,7 @@ import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.lf.CantonOnly import com.daml.lf.data.Ref.PackageId import com.daml.lf.engine.Engine +import com.digitalasset.canton.LedgerParticipantId import com.digitalasset.canton.concurrent.{ ExecutionContextIdlenessExecutorService, FutureSupervisor, @@ -67,16 +68,6 @@ import com.digitalasset.canton.resource._ import com.digitalasset.canton.sequencing.client.{RecordingConfig, ReplayConfig} import com.digitalasset.canton.time._ import com.digitalasset.canton.topology._ -import com.digitalasset.canton.topology.admin.grpc.{ - GrpcTopologyAggregationService, - GrpcTopologyManagerReadService, - GrpcTopologyManagerWriteService, -} -import com.digitalasset.canton.topology.admin.v0.{ - TopologyAggregationServiceGrpc, - TopologyManagerReadServiceGrpc, - TopologyManagerWriteServiceGrpc, -} import com.digitalasset.canton.topology.client.{ DomainTopologyClient, IdentityProvidingServiceClient, @@ -86,7 +77,6 @@ import com.digitalasset.canton.topology.transaction.{NamespaceDelegation, OwnerT import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext import com.digitalasset.canton.tracing.{NoTracing, TraceContext} import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil} -import com.digitalasset.canton.LedgerParticipantId import io.grpc.ServerServiceDefinition import java.util.concurrent.ScheduledExecutorService @@ -144,8 +134,6 @@ class ParticipantNodeBootstrap( None ) - private val ips = new IdentityProvidingServiceClient() - private val topologyManager = new ParticipantTopologyManager( clock, @@ -154,31 +142,8 @@ class ParticipantNodeBootstrap( cantonParameterConfig.processingTimeouts, loggerFactory, ) - // add participant node topology manager - adminServerRegistry.addService( - TopologyManagerWriteServiceGrpc - .bindService( - new GrpcTopologyManagerWriteService( - topologyManager, - topologyManager.store, - crypto.cryptoPublicStore, - loggerFactory, - ), - executionContext, - ) - ) - adminServerRegistry.addService( - TopologyManagerReadServiceGrpc - .bindService( - new GrpcTopologyManagerReadService( - topologyStoreFactory.allNonDiscriminated, - ips, - loggerFactory, - ), - executionContext, - ) - ) + startTopologyManagementWriteService(topologyManager, topologyManager.store) private def createAndStartLedgerApiServer( ledgerId: String, @@ -588,17 +553,6 @@ class ParticipantNodeBootstrap( DomainConnectivityServiceGrpc .bindService(new GrpcDomainConnectivityService(stateService), executionContext) ) - adminServerRegistry.addService( - TopologyAggregationServiceGrpc - .bindService( - new GrpcTopologyAggregationService( - topologyStoreFactory.allNonDiscriminated, - ips, - loggerFactory, - ), - executionContext, - ) - ) adminServerRegistry.addService( TransferServiceGrpc.bindService( new GrpcTransferService(sync.transferService), diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala index d6832e108..53d28e656 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala @@ -76,7 +76,8 @@ object PartyNotificationConfig { /** Publish party changes when they have become effective on a domain. * This ensures that ledger API apps can immediately make use of party changes when they receive the notification. * If a party is changed on a participant while the participant is not connected to any domain, - * then the notification is delayed up to the point that the participant connects to a domain. + * then the party change will fail if triggered via the ledger API + * and delayed until the participant connects to a domain if triggered via Canton's admin endpoint. */ case object ViaDomain extends PartyNotificationConfig } @@ -84,7 +85,7 @@ object PartyNotificationConfig { case class ParticipantNodeParameters( override val tracing: TracingConfig, override val delayLoggingThreshold: NonNegativeFiniteDuration, - override val logMessagePayloads: Boolean, + override val loggingConfig: LoggingConfig, override val logQueryCost: Option[QueryCostMonitoringConfig], override val enableAdditionalConsistencyChecks: Boolean, override val enablePreviewFeatures: Boolean, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistryHelpers.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistryHelpers.scala index 7a9415c28..392eb2853 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistryHelpers.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistryHelpers.scala @@ -231,6 +231,7 @@ trait DomainRegistryHelpers extends FlagCloseable with NamedLogging { ), metrics(config.domain).sequencerClient, futureSupervisor, + participantNodeParameters.loggingConfig, domainLoggerFactory, ProtocolVersion.supportedProtocolsParticipant, minimumProtocolVersion, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala index ff610601c..781ac2f64 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala @@ -335,7 +335,7 @@ object CantonLedgerApiServerWrapper extends NoTracing { otherInterceptors = List( new ApiRequestLogger( config.loggerFactory, - config.cantonParameterConfig.logMessagePayloads, + config.cantonParameterConfig.loggingConfig.api, ), GrpcTracing .newBuilder(config.tracerProvider.openTelemetry) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ConfirmationRequestFactory.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ConfirmationRequestFactory.scala index 194dd4ed1..e07bbaba3 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ConfirmationRequestFactory.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ConfirmationRequestFactory.scala @@ -8,10 +8,11 @@ import cats.syntax.either._ import cats.syntax.traverse._ import com.daml.ledger.participant.state.v2.SubmitterInfo import com.digitalasset.canton._ +import com.digitalasset.canton.config.LoggingConfig import com.digitalasset.canton.crypto._ import com.digitalasset.canton.data.ViewType.TransactionViewType import com.digitalasset.canton.data._ -import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.participant.admin.PackageService import com.digitalasset.canton.participant.protocol.submission.ConfirmationRequestFactory._ @@ -37,13 +38,16 @@ import scala.concurrent.{ExecutionContext, Future} * * @param transactionTreeFactory used to create the payload * @param seedGenerator used to derive the transaction seed + * @param logEventDetails if set to true, we'll log the generated transaction view tree */ class ConfirmationRequestFactory( submitterNode: ParticipantId, domain: DomainId, + loggingConfig: LoggingConfig, + val loggerFactory: NamedLoggerFactory, )(val transactionTreeFactory: TransactionTreeFactory, seedGenerator: SeedGenerator)(implicit executionContext: ExecutionContext -) { +) extends NamedLogging { /** Creates a confirmation request from a wellformed transaction. * @@ -108,10 +112,17 @@ class ConfirmationRequestFactory( keySeed, version, ) - } yield ConfirmationRequest( - InformeeMessage(transactionTree.fullInformeeTree), - transactionViewEnvelopes, - ) + } yield { + if (loggingConfig.eventDetails) { + logger.debug( + s"Transaction tree is ${loggingConfig.api.printer.printAdHoc(transactionTree)}" + ) + } + ConfirmationRequest( + InformeeMessage(transactionTree.fullInformeeTree), + transactionViewEnvelopes, + ) + } } private def assertSubmittersNodeAuthorization( @@ -179,6 +190,7 @@ object ConfirmationRequestFactory { cryptoOps: HashOps with HmacOps, seedGenerator: SeedGenerator, packageService: PackageService, + loggingConfig: LoggingConfig, loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext): ConfirmationRequestFactory = { @@ -191,7 +203,7 @@ object ConfirmationRequestFactory { loggerFactory, ) - new ConfirmationRequestFactory(submitterNode, domainId)( + new ConfirmationRequestFactory(submitterNode, domainId, loggingConfig, loggerFactory)( transactionTreeFactory, seedGenerator, ) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferCoordination.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferCoordination.scala index 1be3fb416..f843073fc 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferCoordination.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferCoordination.scala @@ -115,6 +115,26 @@ class TransferCoordination( .leftMap[TransferProcessorError](_ => NoTimeProofFromDomain(domain)) } yield timeProof + def getTimeProofAndSnapshot(targetDomain: DomainId)(implicit + traceContext: TraceContext + ): EitherT[ + FutureUnlessShutdown, + TransferProcessorError, + (TimeProof, DomainSnapshotSyncCryptoApi), + ] = + for { + timeProof <- recentTimeProof(targetDomain) + timestamp = timeProof.timestamp + + // Since events are stored before they are processed, we wait just to be sure. + waitFuture <- EitherT.fromEither[FutureUnlessShutdown]( + awaitTimestamp(targetDomain, timestamp, waitForEffectiveTime = true) + ) + _ <- EitherT.right(FutureUnlessShutdown.outcomeF(waitFuture.getOrElse(Future.unit))) + targetCrypto <- cryptoSnapshot(targetDomain, timestamp) + .mapK(FutureUnlessShutdown.outcomeK) + } yield (timeProof, targetCrypto) + /** Stores the given transfer data on the target domain. */ def addTransferOutRequest( transferData: TransferData diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala index caa035847..ee5b807ed 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala @@ -16,23 +16,15 @@ import com.digitalasset.canton.data.{CantonTimestamp, FullTransferOutTree, ViewT import com.digitalasset.canton.error.BaseCantonError import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.lifecycle.FutureUnlessShutdown.syntax._ -import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ - Confirmation, - Disabled, - Observation, - Submission, +import com.digitalasset.canton.logging.{ + ErrorLoggingContext, + NamedLoggerFactory, + NamedLogging, + TracedLogger, } -import com.digitalasset.canton.topology._ -import com.digitalasset.canton.topology.client.TopologySnapshot -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} import com.digitalasset.canton.participant.RequestCounter import com.digitalasset.canton.participant.protocol.ProcessingSteps.PendingRequestData import com.digitalasset.canton.participant.protocol.ProtocolProcessor.PendingRequestDataOrReplayData -import com.digitalasset.canton.participant.protocol.{ - ProtocolProcessor, - SingleDomainCausalTracker, - TransferOutUpdate, -} import com.digitalasset.canton.participant.protocol.conflictdetection.{ ActivenessCheck, ActivenessResult, @@ -46,6 +38,11 @@ import com.digitalasset.canton.participant.protocol.submission.{ import com.digitalasset.canton.participant.protocol.transfer.TransferInProcessingSteps.NoTransferData import com.digitalasset.canton.participant.protocol.transfer.TransferOutProcessingSteps._ import com.digitalasset.canton.participant.protocol.transfer.TransferProcessingSteps._ +import com.digitalasset.canton.participant.protocol.{ + ProtocolProcessor, + SingleDomainCausalTracker, + TransferOutUpdate, +} import com.digitalasset.canton.participant.store.TransferStore.TransferCompleted import com.digitalasset.canton.participant.store._ import com.digitalasset.canton.participant.util.DAMLe @@ -56,6 +53,14 @@ import com.digitalasset.canton.protocol.messages._ import com.digitalasset.canton.sequencing.protocol._ import com.digitalasset.canton.serialization.DeserializationError import com.digitalasset.canton.time.TimeProof +import com.digitalasset.canton.topology._ +import com.digitalasset.canton.topology.client.TopologySnapshot +import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ + Confirmation, + Disabled, + Observation, + Submission, +} import com.digitalasset.canton.topology.transaction.{ParticipantAttributes, ParticipantPermission} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.EitherTUtil.{condUnitET, ifThenET} @@ -63,6 +68,7 @@ import com.digitalasset.canton.util.EitherUtil.condUnitE import com.digitalasset.canton.util.{EitherTUtil, MonadUtil} import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{LfPartyId, SequencerCounter, checked} +import org.slf4j.event.Level import scala.collection.{concurrent, mutable} import scala.concurrent.{ExecutionContext, Future} @@ -129,7 +135,7 @@ class TransferOutProcessingSteps( FutureUnlessShutdown.outcomeK ) - timeProofAndSnapshot <- getTimeProofAndSnapshot(targetDomain) + timeProofAndSnapshot <- transferCoordination.getTimeProofAndSnapshot(targetDomain) (timeProof, targetCrypto) = timeProofAndSnapshot _ = logger.debug(withDetails(s"Picked time proof ${timeProof.timestamp}")) @@ -230,27 +236,6 @@ class TransferOutProcessingSteps( .toRight[TransferProcessorError](TransferOutProcessingSteps.UnknownContract(contractId)) .map(storedContract => storedContract.contract.metadata.stakeholders) - private[this] def getTimeProofAndSnapshot(targetDomain: DomainId)(implicit - traceContext: TraceContext - ): EitherT[ - FutureUnlessShutdown, - TransferProcessorError, - (TimeProof, DomainSnapshotSyncCryptoApi), - ] = - for { - timeProof <- transferCoordination.recentTimeProof(targetDomain) - timestamp = timeProof.timestamp - - // Since events are stored before they are processed, we wait just to be sure. - waitFuture <- EitherT.fromEither[FutureUnlessShutdown]( - transferCoordination.awaitTimestamp(targetDomain, timestamp, waitForEffectiveTime = true) - ) - _ <- EitherT.right(FutureUnlessShutdown.outcomeF(waitFuture.getOrElse(Future.unit))) - targetCrypto <- transferCoordination - .cryptoSnapshot(targetDomain, timestamp) - .mapK(FutureUnlessShutdown.outcomeK) - } yield (timeProof, targetCrypto) - override protected def decryptTree(originSnapshot: DomainSnapshotSyncCryptoApi)( envelope: OpenEnvelope[EncryptedViewMessage[TransferOutViewType]] ): EitherT[Future, EncryptedViewMessageDecryptionError[TransferOutViewType], WithRecipients[ @@ -543,115 +528,18 @@ class TransferOutProcessingSteps( private[this] def triggerTransferInWhenExclusivityTimeoutExceeded( pendingRequestData: PendingRequestData )(implicit traceContext: TraceContext): EitherT[Future, TransferProcessorError, Unit] = { + val targetDomain = pendingRequestData.targetDomain val t0 = pendingRequestData.targetTimeProof.timestamp - def hostedStakeholders(snapshot: TopologySnapshot): Future[Set[LfPartyId]] = { - pendingRequestData.stakeholders.toList - .traverseFilter { partyId => - snapshot - .hostedOn(partyId, participantId) - .map(x => x.filter(_.permission == ParticipantPermission.Submission).map(_ => partyId)) - } - .map(_.toSet) - } - - def performAutoInOnce: EitherT[Future, TransferProcessorError, com.google.rpc.status.Status] = { - for { - targetIps <- getTimeProofAndSnapshot(targetDomain) - .map(_._2) - .onShutdown(Left(DomainNotReady(targetDomain, "Shutdown of time tracker"))) - possibleSubmittingParties <- EitherT.right(hostedStakeholders(targetIps.ipsSnapshot)) - inParty <- EitherT.fromOption[Future]( - possibleSubmittingParties.headOption, - AutomaticTransferInError("No possible submitting party for automatic transfer-in"), - ) - submissionResult <- transferCoordination - .transferIn(targetDomain, inParty, pendingRequestData.transferId)(TraceContext.empty) - TransferInProcessingSteps.SubmissionResult(completionF) = submissionResult - status <- EitherT.liftF(completionF) - } yield status - } - - def performAutoInRepeatedly: EitherT[Future, TransferProcessorError, Unit] = { - case class StopRetry(result: Either[TransferProcessorError, com.google.rpc.status.Status]) - val retryCount = 5 - - def tryAgain( - previous: com.google.rpc.status.Status - ): EitherT[Future, StopRetry, com.google.rpc.status.Status] = { - if (BaseCantonError.isStatusErrorCode(MediatorReject.Timeout, previous)) - performAutoInOnce.leftMap(error => StopRetry(Left(error))) - else EitherT.leftT[Future, com.google.rpc.status.Status](StopRetry(Right(previous))) - } - - val initial = performAutoInOnce.leftMap(error => StopRetry(Left(error))) - val result = MonadUtil.repeatFlatmap(initial, tryAgain, retryCount) - - result.transform { - case Left(StopRetry(Left(error))) => Left(error) - case Left(StopRetry(Right(verdict))) => Right(()) - case Right(verdict) => Right(()) - } - } - - def triggerAutoIn( - targetSnapshot: TopologySnapshot, - targetDomainParameters: DynamicDomainParameters, - )(implicit traceContext: TraceContext): Unit = { - val timeoutTimestamp = targetDomainParameters.transferExclusivityLimitFor(t0) - - val autoIn = for { - targetHostedStakeholders <- EitherT.right(hostedStakeholders(targetSnapshot)) - _unit <- - if (targetHostedStakeholders.nonEmpty) { - logger.info( - s"Registering automatic submission of transfer-in with ID ${pendingRequestData.transferId} at time $timeoutTimestamp, where base timestamp is $t0" - ) - for { - timeoutFuture <- EitherT.fromEither[Future]( - transferCoordination.awaitTimestamp( - targetDomain, - timeoutTimestamp, - waitForEffectiveTime = false, - ) - ) - _ <- EitherT.liftF[Future, TransferProcessorError, Unit](timeoutFuture.getOrElse { - logger.debug(s"Automatic transfer-in triggered immediately") - Future.unit - }) - _unit <- EitherTUtil.leftSubflatMap(performAutoInRepeatedly) { - // Filter out submission errors occurring because the transfer is already completed - case NoTransferData(id, TransferCompleted(transferId, timeOfCompletion)) => - Right(()) - // Filter out the case that the participant has disconnected from the target domain in the meantime. - case UnknownDomain(domain, _reason) if domain == pendingRequestData.targetDomain => - Right(()) - case DomainNotReady(domain, _reason) if domain == pendingRequestData.targetDomain => - Right(()) - // Filter out the case that the target domain is closing right now - case other => Left(other) - } - } yield () - } else EitherT.pure[Future, TransferProcessorError](()) - } yield () - - EitherTUtil.doNotAwait(autoIn, "Automatic transfer-in failed") - } - - for { - targetIps <- transferCoordination.cryptoSnapshot(targetDomain, t0) - targetSnapshot = targetIps.ipsSnapshot - targetDomainParameters <- EitherTUtil - .fromFuture( - targetSnapshot.findDynamicDomainParametersOrDefault(), - _ => UnknownDomain(targetDomain, "When fetching domain parameters"), - ) - .leftWiden[TransferProcessorError] - } yield - if (targetDomainParameters.automaticTransferInEnabled) - triggerAutoIn(targetSnapshot, targetDomainParameters) - else () + TransferOutProcessingSteps.autoTransferIn( + pendingRequestData.transferId, + targetDomain, + transferCoordination, + pendingRequestData.stakeholders, + participantId, + t0, + ) } private[this] def deleteTransfer(targetDomain: DomainId, transferOutRequestId: RequestId)(implicit @@ -1096,6 +984,131 @@ object TransferOutProcessingSteps { } + def autoTransferIn( + id: TransferId, + targetDomain: DomainId, + transferCoordination: TransferCoordination, + stks: Set[LfPartyId], + participantId: ParticipantId, + t0: CantonTimestamp, + )(implicit + ec: ExecutionContext, + elc: ErrorLoggingContext, + ): EitherT[Future, TransferProcessorError, Unit] = { + val logger = elc.logger + implicit val tc = elc.traceContext + + def hostedStakeholders(snapshot: TopologySnapshot): Future[Set[LfPartyId]] = { + stks.toList + .traverseFilter { partyId => + snapshot + .hostedOn(partyId, participantId) + .map(x => x.filter(_.permission == ParticipantPermission.Submission).map(_ => partyId)) + } + .map(_.toSet) + } + + def performAutoInOnce: EitherT[Future, TransferProcessorError, com.google.rpc.status.Status] = { + for { + targetIps <- transferCoordination + .getTimeProofAndSnapshot(targetDomain) + .map(_._2) + .onShutdown(Left(DomainNotReady(targetDomain, "Shutdown of time tracker"))) + possibleSubmittingParties <- EitherT.right(hostedStakeholders(targetIps.ipsSnapshot)) + inParty <- EitherT.fromOption[Future]( + possibleSubmittingParties.headOption, + AutomaticTransferInError("No possible submitting party for automatic transfer-in"), + ) + submissionResult <- transferCoordination + .transferIn(targetDomain, inParty, id)(TraceContext.empty) + TransferInProcessingSteps.SubmissionResult(completionF) = submissionResult + status <- EitherT.liftF(completionF) + } yield status + } + + def performAutoInRepeatedly: EitherT[Future, TransferProcessorError, Unit] = { + case class StopRetry(result: Either[TransferProcessorError, com.google.rpc.status.Status]) + val retryCount = 5 + + def tryAgain( + previous: com.google.rpc.status.Status + ): EitherT[Future, StopRetry, com.google.rpc.status.Status] = { + if (BaseCantonError.isStatusErrorCode(MediatorReject.Timeout, previous)) + performAutoInOnce.leftMap(error => StopRetry(Left(error))) + else EitherT.leftT[Future, com.google.rpc.status.Status](StopRetry(Right(previous))) + } + + val initial = performAutoInOnce.leftMap(error => StopRetry(Left(error))) + val result = MonadUtil.repeatFlatmap(initial, tryAgain, retryCount) + + result.transform { + case Left(StopRetry(Left(error))) => Left(error) + case Left(StopRetry(Right(verdict))) => Right(()) + case Right(verdict) => Right(()) + } + } + + def triggerAutoIn( + targetSnapshot: TopologySnapshot, + targetDomainParameters: DynamicDomainParameters, + ): Unit = { + val timeoutTimestamp = targetDomainParameters.transferExclusivityLimitFor(t0) + + val autoIn = for { + targetHostedStakeholders <- EitherT.right(hostedStakeholders(targetSnapshot)) + _unit <- + if (targetHostedStakeholders.nonEmpty) { + logger.info( + s"Registering automatic submission of transfer-in with ID ${id} at time $timeoutTimestamp, where base timestamp is $t0" + ) + for { + timeoutFuture <- EitherT.fromEither[Future]( + transferCoordination.awaitTimestamp( + targetDomain, + timeoutTimestamp, + waitForEffectiveTime = false, + ) + ) + _ <- EitherT.liftF[Future, TransferProcessorError, Unit](timeoutFuture.getOrElse { + logger.debug(s"Automatic transfer-in triggered immediately") + Future.unit + }) + _unit <- EitherTUtil.leftSubflatMap(performAutoInRepeatedly) { + // Filter out submission errors occurring because the transfer is already completed + case NoTransferData(id, TransferCompleted(transferId, timeOfCompletion)) => + Right(()) + // Filter out the case that the participant has disconnected from the target domain in the meantime. + case UnknownDomain(domain, _reason) if domain == targetDomain => + Right(()) + case DomainNotReady(domain, _reason) if domain == targetDomain => + Right(()) + // Filter out the case that the target domain is closing right now + case other => Left(other) + } + } yield () + } else EitherT.pure[Future, TransferProcessorError](()) + } yield () + + EitherTUtil.doNotAwait(autoIn, "Automatic transfer-in failed", Level.INFO) + } + + for { + targetIps <- transferCoordination.cryptoSnapshot(targetDomain, t0) + targetSnapshot = targetIps.ipsSnapshot + targetDomainParameters <- EitherTUtil + .fromFuture( + targetSnapshot.findDynamicDomainParametersOrDefault(), + _ => UnknownDomain(targetDomain, "When fetching domain parameters"), + ) + .leftWiden[TransferProcessorError] + } yield { + + if (targetDomainParameters.automaticTransferInEnabled) + triggerAutoIn(targetSnapshot, targetDomainParameters) + else () + } + } + private def stringOfNec[A](chain: NonEmptyChain[String]): String = chain.toList.mkString(", ") case class PendingDataAndResponseArgs( diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala index e11be7a54..8f22ab5a4 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala @@ -35,7 +35,7 @@ import com.digitalasset.canton.protocol.{LfContractId, LfHash, WithContractHash} import com.digitalasset.canton.sequencing.client.{SendType, SequencerClient} import com.digitalasset.canton.sequencing.protocol.{Batch, OpenEnvelope, Recipients} import com.digitalasset.canton.store.SequencerCounterTrackerStore -import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration, PositiveSeconds} +import com.digitalasset.canton.time.PositiveSeconds import com.digitalasset.canton.topology.{DomainId, ParticipantId} import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.util.EitherUtil.RichEither @@ -130,7 +130,7 @@ class AcsCommitmentProcessor( domainCrypto: SyncCryptoClient, reconciliationInterval: PositiveSeconds, store: AcsCommitmentStore, - commitmentPeriodObserver: (ExecutionContext, ErrorLoggingContext) => Future[Unit], + commitmentPeriodObserver: (ExecutionContext, TraceContext) => Future[Unit], killSwitch: => Unit, metrics: PruningMetrics, override protected val timeouts: ProcessingTimeout, @@ -309,7 +309,7 @@ class AcsCommitmentProcessor( // as otherwise we can get a race where an incoming commitment doesn't "clear" the outstanding period _ = indicateReadyForRemote(completedPeriod.toInclusive) _ <- processBuffered(completedPeriod.toInclusive) - _ <- commitmentPeriodObserver(ec, loggingContext) + _ <- commitmentPeriodObserver(ec, traceContext) _ <- indicateLocallyProcessed(completedPeriod) } yield () } @@ -410,11 +410,11 @@ class AcsCommitmentProcessor( ), ( tickBeforeOrAt( - payload.period.toInclusive.toTs, + payload.period.toInclusive.forgetSecond, reconciliationInterval, ) != payload.period.toInclusive || tickBeforeOrAt( - payload.period.fromExclusive.toTs, + payload.period.fromExclusive.forgetSecond, reconciliationInterval, ) != payload.period.fromExclusive, s"Received commitment period doesn't align with the domain reconciliation interval: ${payload.period}", @@ -468,7 +468,7 @@ class AcsCommitmentProcessor( endOfLastProcessedPeriod = Some(period.toInclusive) for { // delete the processed buffered commitments (safe to do at any point after `processBuffered` completes) - _ <- store.queue.deleteThrough(period.toInclusive.toTs) + _ <- store.queue.deleteThrough(period.toInclusive.forgetSecond) // mark that we're done with processing this period; safe to do at any point after the commitment has been sent // and the outstanding commitments stored _ <- store.markComputedAndSent(period) @@ -491,7 +491,7 @@ class AcsCommitmentProcessor( // TODO(#8207) Properly check that bounds of commitment.period fall on commitment ticks correctInterval = Seq(commitment.period.fromExclusive, commitment.period.toInclusive).forall { - ts => tickBeforeOrAt(ts.toTs, reconciliationInterval) == ts + ts => tickBeforeOrAt(ts.forgetSecond, reconciliationInterval) == ts } _ <- if (validSig && correctInterval) checkCommitment(commitment) else Future.unit @@ -514,7 +514,7 @@ class AcsCommitmentProcessor( message: SignedProtocolMessage[AcsCommitment] )(implicit traceContext: TraceContext): Future[Boolean] = for { - cryptoSnapshot <- domainCrypto.awaitSnapshot(message.message.period.toInclusive.toTs) + cryptoSnapshot <- domainCrypto.awaitSnapshot(message.message.period.toInclusive.forgetSecond) pureCrypto = domainCrypto.pureCrypto msgHash = pureCrypto.digest( HashPurpose.AcsCommitment, @@ -529,16 +529,24 @@ class AcsCommitmentProcessor( private def checkCommitment( commitment: AcsCommitment - )(implicit traceContext: TraceContext): Future[Unit] = { - val readyToCheck = readyForRemote.exists(_ >= commitment.period.toInclusive) - - if (readyToCheck) { - checkMatchAndMarkSafe(List(commitment)) - } else { - logger.debug(s"Buffering $commitment for later processing") - store.queue.enqueue(commitment) - } - } + )(implicit traceContext: TraceContext): Future[Unit] = + queue + .executeUnlessFailed( + // Make sure that the ready-for-remote check is atomic with buffering the commitment + { + val readyToCheck = readyForRemote.exists(_ >= commitment.period.toInclusive) + + if (readyToCheck) { + // Do not sequentialize the checking + Future.successful(checkMatchAndMarkSafe(List(commitment))) + } else { + logger.debug(s"Buffering $commitment for later processing") + store.queue.enqueue(commitment).map((_: Unit) => Future.successful(())) + } + }, + s"check commitment readiness at ${commitment.period} by ${commitment.sender}", + ) + .flatten private def indicateReadyForRemote(timestamp: CantonTimestampSecond): Unit = { readyForRemote.foreach(oldTs => @@ -554,7 +562,7 @@ class AcsCommitmentProcessor( timestamp: CantonTimestampSecond )(implicit traceContext: TraceContext): Future[Unit] = { for { - toProcess <- store.queue.peekThrough(timestamp.toTs) + toProcess <- store.queue.peekThrough(timestamp.forgetSecond) _ <- checkMatchAndMarkSafe(toProcess) } yield { logger.debug( @@ -570,7 +578,7 @@ class AcsCommitmentProcessor( lastPruningTime: Option[CantonTimestamp], )(implicit traceContext: TraceContext): Boolean = { if (local.isEmpty) { - if (lastPruningTime.forall(_ < remote.period.toInclusive.toTs)) { + if (lastPruningTime.forall(_ < remote.period.toInclusive.forgetSecond)) { // TODO(M40): This signifies a Byzantine sender (the signature passes). Alarms should be raised. Errors.MismatchError.NoSharedContracts.Mismatch(domainId, remote) } else @@ -632,7 +640,7 @@ class AcsCommitmentProcessor( s"Computing commitments for $period, number of stakeholder sets: ${commitmentSnapshot.keySet.size}" ) for { - crypto <- domainCrypto.awaitSnapshot(period.toInclusive.toTs) + crypto <- domainCrypto.awaitSnapshot(period.toInclusive.forgetSecond) cmts <- commitments( participantId, commitmentSnapshot, @@ -827,7 +835,7 @@ object AcsCommitmentProcessor { val periodEnd = tickBefore(timestamp, interval) val periodStart = endOfPreviousPeriod.getOrElse(CantonTimestampSecond.MinValue) - CommitmentPeriod(periodStart.toTs, periodEnd.toTs, interval).toOption + CommitmentPeriod(periodStart.forgetSecond, periodEnd.forgetSecond, interval).toOption } /** Compute the ACS commitments at the given timestamp. @@ -849,7 +857,7 @@ object AcsCommitmentProcessor { val commitmentTimer = pruningMetrics.map(_.commitments.compute.metric.time()) for { - ipsSnapshot <- domainCrypto.ipsSnapshot(timestamp.toTs) + ipsSnapshot <- domainCrypto.ipsSnapshot(timestamp.forgetSecond) // Important: use the keys of the timestamp isActiveParticipant <- ipsSnapshot.isParticipantActive(participantId) @@ -893,11 +901,10 @@ object AcsCommitmentProcessor { commitmentsPruningBound: CommitmentsPruningBound, earliestInFlightSubmissionF: Future[Option[CantonTimestamp]], reconciliationInterval: PositiveSeconds, - beforeOrAt: CantonTimestamp, )(implicit ec: ExecutionContext, loggingContext: ErrorLoggingContext, - ): Future[Option[CantonTimestamp]] = + ): Future[Option[CantonTimestampSecond]] = for { // This logic progressively lowers the timestamp based on the following constraints: // 1. Pruning must not delete data needed for recovery (after the clean replay timestamp) @@ -917,20 +924,20 @@ object AcsCommitmentProcessor { // Latest potential pruning point is the ACS commitment tick before or at the "clean replay" timestamp // and strictly before the earliest timestamp associated with an in-flight submission. latestTickBeforeOrAt = getTickBeforeOrAt( - beforeOrAt - .min(cleanReplayTs) - .min(inFlightSubmissionTs.fold(CantonTimestamp.MaxValue)(_.immediatePredecessor)) + cleanReplayTs.min( + inFlightSubmissionTs.fold(CantonTimestamp.MaxValue)(_.immediatePredecessor) + ) ) // Only acs commitment ticks whose ACS commitment fully matches all counter participant ACS commitments are safe, // so look for the most recent such tick before latestTickBeforeOrAt if any. tsSafeToPruneUpTo <- commitmentsPruningBound match { case CommitmentsPruningBound.Outstanding(noOutstandingCommitmentsF) => - noOutstandingCommitmentsF(latestTickBeforeOrAt.toTs) + noOutstandingCommitmentsF(latestTickBeforeOrAt.forgetSecond).map(_.map(getTickBeforeOrAt)) case CommitmentsPruningBound.LastComputedAndSent(lastComputedAndSentF) => lastComputedAndSentF.map( _.map(lastComputedAndSent => - getTickBeforeOrAt(lastComputedAndSent).min(latestTickBeforeOrAt).toTs + getTickBeforeOrAt(lastComputedAndSent).min(latestTickBeforeOrAt) ) ) } @@ -969,11 +976,10 @@ object AcsCommitmentProcessor { inFlightSubmissionStore: InFlightSubmissionStore, domainId: DomainId, checkForOutstandingCommitments: Boolean, - beforeOrAt: CantonTimestamp, )(implicit ec: ExecutionContext, loggingContext: ErrorLoggingContext, - ): Future[Option[CantonTimestamp]] = { + ): Future[Option[CantonTimestampSecond]] = { implicit val traceContext: TraceContext = loggingContext.traceContext val cleanReplayF = SyncDomainEphemeralStateFactory .crashRecoveryPruningBoundInclusive(requestJournalStore, sequencerCounterTrackerStore) @@ -983,7 +989,7 @@ object AcsCommitmentProcessor { CommitmentsPruningBound.Outstanding(acsCommitmentStore.noOutstandingCommitments(_)) else CommitmentsPruningBound.LastComputedAndSent( - acsCommitmentStore.lastComputedAndSent.map(_.map(_.toTs)) + acsCommitmentStore.lastComputedAndSent.map(_.map(_.forgetSecond)) ) val earliestInFlightF = inFlightSubmissionStore.lookupEarliest(domainId) @@ -992,69 +998,9 @@ object AcsCommitmentProcessor { commitmentsPruningBound = commitmentsPruningBound, earliestInFlightF, reconciliationInterval, - beforeOrAt, ) } - @SuppressWarnings(Array("org.wartremover.warts.Var")) - def pruneObserver( - requestJournalStore: RequestJournalStore, - sequencerCounterTrackerStore: SequencerCounterTrackerStore, - reconciliationInterval: PositiveSeconds, - acsCommitmentStore: AcsCommitmentStore, - acs: ActiveContractStore, - keyJournal: ContractKeyJournal, - inFlightSubmissionStore: InFlightSubmissionStore, - domainId: DomainId, - acsPruningInterval: NonNegativeFiniteDuration, - clock: Clock, - ): (ExecutionContext, ErrorLoggingContext) => Future[Unit] = { - - var lastPrune: Option[CantonTimestamp] = None - (executionContext, errorLoggingContext) => { - implicit val loggingContext: ErrorLoggingContext = errorLoggingContext - implicit val tc: TraceContext = errorLoggingContext.traceContext - implicit val ec: ExecutionContext = executionContext - - val now = clock.now - if (lastPrune.forall(_ < now.minus(acsPruningInterval.unwrap))) { - safeToPrune( - requestJournalStore, - sequencerCounterTrackerStore, - reconciliationInterval, - acsCommitmentStore, - inFlightSubmissionStore, - domainId, - checkForOutstandingCommitments = false, - now, // TODO(Rafael) figure out whether we want to use CantonTimestamp.MaxValue here - ).flatMap { tsO => - tsO.fold(Future.unit) { ts => - lastPrune = Some(ts) - - // Clean unused entries from the ACS - val acsF = EitherTUtil - .logOnError(acs.prune(ts), s"Periodic ACS prune at $ts:") - .value - // Discard the result of this prune, as it's not needed - .void - // clean unused contract key journal entries - val journalF = - EitherTUtil - .logOnError( - keyJournal.prune(ts), - s"Periodic contract key journal prune at $ts: ", - ) - .value - // discard the result of this prune - .void - acsF.flatMap(_ => journalF) - } - } - } else Future.unit - } - - } - object Errors extends AcsCommitmentErrorGroup { @Explanation( """This error indicates that there was an internal error within the ACS commitment processing.""" diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruneObserver.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruneObserver.scala new file mode 100644 index 000000000..5bb68d57d --- /dev/null +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruneObserver.scala @@ -0,0 +1,100 @@ +// Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.pruning + +import com.digitalasset.canton.data.CantonTimestampSecond +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.participant.store._ +import com.digitalasset.canton.store.SequencerCounterTrackerStore +import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration, PositiveSeconds} +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.EitherTUtil +import cats.syntax.functor._ + +import java.time.Duration +import java.util.concurrent.atomic.AtomicReference +import scala.concurrent.{ExecutionContext, Future} +import scala.math.Ordering.Implicits._ + +private[participant] class PruneObserver( + requestJournalStore: RequestJournalStore, + sequencerCounterTrackerStore: SequencerCounterTrackerStore, + reconciliationInterval: PositiveSeconds, + acsCommitmentStore: AcsCommitmentStore, + acs: ActiveContractStore, + keyJournal: ContractKeyJournal, + inFlightSubmissionStore: InFlightSubmissionStore, + domainId: DomainId, + acsPruningInterval: NonNegativeFiniteDuration, + clock: Clock, + protected val loggerFactory: NamedLoggerFactory, +) extends NamedLogging { + + /* + The synchronization around this variable is relatively loose. + In case of concurrent calls, the risk is to do unnecessary calls + to pruning, which is not a big deal since the pruning operation is idempotent. + */ + private val lastPrune: AtomicReference[CantonTimestampSecond] = new AtomicReference( + CantonTimestampSecond.MinValue + ) + + def observer(implicit ec: ExecutionContext, traceContext: TraceContext): Future[Unit] = { + val now = clock.now + val durationSinceLastPruning: Duration = now - lastPrune.get().forgetSecond + val doPruning: Boolean = durationSinceLastPruning > acsPruningInterval.unwrap + + for { + safeToPruneTsO <- + if (doPruning) + AcsCommitmentProcessor.safeToPrune( + requestJournalStore, + sequencerCounterTrackerStore, + reconciliationInterval, + acsCommitmentStore, + inFlightSubmissionStore, + domainId, + checkForOutstandingCommitments = false, + ) + else Future.successful(None) + + _ <- safeToPruneTsO.fold(Future.unit)(prune) + } yield () + } + + private def prune(ts: CantonTimestampSecond)(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): Future[Unit] = { + + val newValue = lastPrune.updateAndGet(_.max(ts)) + + if (newValue == ts) { + // Clean unused entries from the ACS + val acsF = EitherTUtil + .logOnError(acs.prune(ts.forgetSecond), s"Periodic ACS prune at $ts:") + .value + // Discard the result of this prune, as it's not needed + .void + // clean unused contract key journal entries + val journalF = + EitherTUtil + .logOnError( + keyJournal.prune(ts.forgetSecond), + s"Periodic contract key journal prune at $ts: ", + ) + .value + // discard the result of this prune + .void + acsF.flatMap(_ => journalF) + } else { + /* + Possible race condition here, another call to prune with later timestamp + was done in the meantime. Not doing anything. + */ + Future.unit + } + } +} diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/AcsCommitmentStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/AcsCommitmentStore.scala index fe5323b86..513e0e898 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/AcsCommitmentStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/AcsCommitmentStore.scala @@ -112,8 +112,8 @@ trait AcsCommitmentLookup { traceContext: TraceContext ): Future[Option[CantonTimestampSecond]] - /** The latest commitment tick before or at the given timestamp for which no commitments are outstanding. - * It is safe to prune the domain at the returned tick as long as the tick is not before the last timestamp needed + /** The latest timestamp before or at the given timestamp for which no commitments are outstanding. + * It is safe to prune the domain at the returned timestamp as long as it is not before the last timestamp needed * for crash recovery (see [[com.digitalasset.canton.participant.pruning.AcsCommitmentProcessor.safeToPrune]]) * * Returns None if no such tick is known. diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/TransferStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/TransferStore.scala index 2fdbaf5df..1cfd1d912 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/TransferStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/TransferStore.scala @@ -181,4 +181,19 @@ trait TransferLookup { filterSubmitter: Option[LfPartyId], limit: Int, )(implicit traceContext: TraceContext): Future[Seq[TransferData]] + + /** Find utility to look for in-flight transfers. + * Transfers are ordered by the tuple (request timestamp, origin domain ID), ie transfers are ordered by request timestamps + * and ties are broken with lexicographic ordering on domain IDs. + * + * The ordering here has been chosen to allow a participant to fetch all the pending transfers. The ordering has to + * be consistent accross calls and uniquely identify a pending transfer, but is otherwise arbitary. + * + * @param requestAfter optionally, specify a strict lower bound for the transfers returned, according to the + * (request timestamp, origin domain ID) ordering + * @param limit limit the number of results + */ + def findAfter(requestAfter: Option[(CantonTimestamp, DomainId)], limit: Int)(implicit + traceContext: TraceContext + ): Future[Seq[TransferData]] } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala index 132769273..ebc9645e0 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala @@ -445,7 +445,7 @@ class DbAcsCommitmentStore( ): Future[Option[CantonTimestamp]] = processingTime.metric.event { for { computed <- lastComputedAndSent - adjustedTsOpt = computed.map(_.toTs.min(beforeOrAt)) + adjustedTsOpt = computed.map(_.forgetSecond.min(beforeOrAt)) outstandingOpt <- adjustedTsOpt.traverse { ts => storage.query( sql"select from_exclusive, to_inclusive from outstanding_acs_commitments where domain_id=$domainId and from_exclusive < $ts" diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala index f77d43f34..fa565f88a 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala @@ -361,7 +361,10 @@ class DbContractStore( ) } - private def storeElements[A](elements: Seq[A], fn: A => StoredContract)(implicit + private def storeElements( + elements: Seq[SerializableContract], + fn: SerializableContract => StoredContract, + )(implicit ec: ExecutionContext, traceContext: TraceContext, ): Future[Unit] = { diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbTransferStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbTransferStore.scala index 1191541ed..181016f60 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbTransferStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbTransferStore.scala @@ -20,7 +20,7 @@ import com.digitalasset.canton.participant.util.TimeOfChange import com.digitalasset.canton.protocol.messages._ import com.digitalasset.canton.protocol.version.VersionedSignedContent import com.digitalasset.canton.protocol.{SerializableContract, TransactionId, TransferId} -import com.digitalasset.canton.resource.DbStorage.DbAction +import com.digitalasset.canton.resource.DbStorage.{DbAction, Profile} import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.sequencing.protocol.{OpenEnvelope, SequencedEvent, SignedContent} import com.digitalasset.canton.serialization.ProtoConverter @@ -277,6 +277,13 @@ class DbTransferStore( storage.update_(query, functionFullName) } + private lazy val findPendingBase = sql""" + select transfer_out_timestamp, transfer_out_request_counter, transfer_out_request, transfer_out_decision_time, + contract, creating_transaction_id, transfer_out_result, time_of_completion_request_counter, time_of_completion_timestamp + from transfers + where target_domain=$domain and time_of_completion_request_counter is null and time_of_completion_timestamp is null + """ + override def find( filterOrigin: Option[DomainId], filterTimestamp: Option[CantonTimestamp], @@ -289,18 +296,40 @@ class DbTransferStore( import DbStorage.Implicits.BuilderChain._ import DbStorage.Implicits._ - val base = sql""" - select transfer_out_timestamp, transfer_out_request_counter, transfer_out_request, transfer_out_decision_time, - contract, creating_transaction_id, transfer_out_result, time_of_completion_request_counter, time_of_completion_timestamp - from transfers - where target_domain=$domain and time_of_completion_request_counter is null and time_of_completion_timestamp is null - """ val originFilter = filterOrigin.fold(sql"")(domain => sql" and origin_domain=${domain}") val timestampFilter = filterTimestamp.fold(sql"")(ts => sql" and request_timestamp=${ts}") val submitterFilter = filterSubmitter.fold(sql"")(submitter => sql" and submitter_lf=${submitter}") - val limitSql = sql" #${storage.limit(limit)} " - (base ++ originFilter ++ timestampFilter ++ submitterFilter ++ limitSql).as[TransferData] + val limitSql = storage.limitSql(limit) + (findPendingBase ++ originFilter ++ timestampFilter ++ submitterFilter ++ limitSql) + .as[TransferData] + }, + functionFullName, + ) + } + + override def findAfter( + requestAfter: Option[(CantonTimestamp, DomainId)], + limit: Int, + )(implicit traceContext: TraceContext): Future[Seq[TransferData]] = + processingTime.metric.event { + storage.query( + { + import DbStorage.Implicits.BuilderChain._ + + val timestampFilter = + requestAfter.fold(sql"")({ case (requestTimestamp, originDomain) => + storage.profile match { + case Profile.Oracle(_) => + sql"and (request_timestamp > ${requestTimestamp} or (request_timestamp = ${requestTimestamp} and origin_domain > ${originDomain}))" + case _ => + sql" and (request_timestamp, origin_domain) > (${requestTimestamp}, ${originDomain}) " + } + }) + val order = sql" order by request_timestamp, origin_domain " + val limitSql = storage.limitSql(limit) + + (findPendingBase ++ timestampFilter ++ order ++ limitSql).as[TransferData] }, functionFullName, ) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryAcsCommitmentStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryAcsCommitmentStore.scala index a660d378e..3e1abee08 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryAcsCommitmentStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryAcsCommitmentStore.scala @@ -178,9 +178,9 @@ class InMemoryAcsCommitmentStore(protected val loggerFactory: NamedLoggerFactory Future.successful { for { lastTs <- lastComputed.get - adjustedTs = lastTs.toTs.min(beforeOrAt) + adjustedTs = lastTs.forgetSecond.min(beforeOrAt) periods = _outstanding.get().map { case (period, _participants) => - period.fromExclusive.toTs -> period.toInclusive.toTs + period.fromExclusive.forgetSecond -> period.toInclusive.forgetSecond } safe = AcsCommitmentStore.latestCleanPeriod( beforeOrAt = adjustedTs, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryTransferStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryTransferStore.scala index ba73067f5..f335f11c7 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryTransferStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryTransferStore.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.participant.store.memory import cats.data.EitherT +import cats.implicits.catsSyntaxPartialOrder import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.RequestCounter @@ -158,4 +159,28 @@ class InMemoryTransferStore( Future.successful { val _ = transferDataMap.remove(transferId) } + + override def findAfter(requestAfter: Option[(CantonTimestamp, DomainId)], limit: Int)(implicit + traceContext: TraceContext + ): Future[Seq[TransferData]] = Future.successful { + def filter(entry: TransferEntry): Boolean = + entry.timeOfCompletion.isEmpty && // Always filter out completed transfers + requestAfter.forall(ts => + (entry.transferData.transferId.requestTimestamp, entry.transferData.originDomain) > ts + ) + + transferDataMap.values + .to(LazyList) + .filter(filter) + .take(limit) + .map(_.transferData) + .sortBy(t => (t.transferId.requestTimestamp, t.transferId.originDomain))( + // Explicitly use the standard ordering on two-tuples here + // As Scala does not seem to infer the right implicits to use here + Ordering.Tuple2( + CantonTimestamp.orderCantonTimestamp.toOrdering, + DomainId.orderDomainId.toOrdering, + ) + ) + } } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/TransferCache.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/TransferCache.scala index 84a3f01ef..1b936e9d5 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/TransferCache.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/TransferCache.scala @@ -123,6 +123,12 @@ class TransferCache(transferStore: TransferStore, override val loggerFactory: Na transferStore .find(filterOrigin, filterRequestTimestamp, filterSubmitter, limit) .map(_.filter(transferData => !pendingCompletions.contains(transferData.transferId))) + + override def findAfter(requestAfter: Option[(CantonTimestamp, DomainId)], limit: Int)(implicit + traceContext: TraceContext + ): Future[Seq[TransferData]] = transferStore + .findAfter(requestAfter, limit) + .map(_.filter(transferData => !pendingCompletions.contains(transferData.transferId))) } object TransferCache { diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala index 5f0a497dd..c354c3782 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala @@ -21,7 +21,7 @@ import com.daml.telemetry.TelemetryContext import com.digitalasset.canton._ import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.{String256M, String255} +import com.digitalasset.canton.config.RequireTypes.{String255, String256M} import com.digitalasset.canton.crypto.{CryptoPureApi, SyncCryptoApiProvider} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.error.CantonErrorGroups.ParticipantErrorGroup.SyncServiceErrorGroup @@ -37,7 +37,10 @@ import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, import com.digitalasset.canton.participant.Pruning._ import com.digitalasset.canton.participant.admin._ import com.digitalasset.canton.participant.admin.grpc.PruningServiceError -import com.digitalasset.canton.participant.config.ParticipantNodeParameters +import com.digitalasset.canton.participant.config.{ + ParticipantNodeParameters, + PartyNotificationConfig, +} import com.digitalasset.canton.participant.domain._ import com.digitalasset.canton.participant.event.RecordOrderPublisher import com.digitalasset.canton.participant.metrics.ParticipantMetrics @@ -546,6 +549,16 @@ class CantonSyncService( .fromProtoPrimitive(rawSubmissionId, "LedgerSubmissionId") .leftMap(err => TransactionError.internalError(err.toString)) ) + // Allow party allocation via ledger API only if notification is Eager or the participant is connected to a domain + // Otherwise the gRPC call will just timeout without a meaning error message + _ <- EitherT.cond[Future]( + parameters.partyChangeNotification == PartyNotificationConfig.Eager || + connectedDomainsMap.nonEmpty, + (), + SubmissionResult.SynchronousError( + SyncServiceError.PartyAllocationNoDomainError.Error(rawSubmissionId).rpcStatus() + ), + ) _ <- topologyManager .authorize( TopologyStateUpdate( @@ -1719,4 +1732,22 @@ object SyncServiceError extends SyncServiceErrorGroup { ) extends SyncServiceError with CombinedError[SyncServiceError] + @Explanation( + """The participant is not connected to a domain and can therefore not allocate a party + because the party notification is configured as ``party-notification.type = via-domain``.""" + ) + @Resolution( + "Connect the participant to a domain first or change the participant's party notification config to ``eager``." + ) + object PartyAllocationNoDomainError + extends ErrorCode( + "PARTY_ALLOCATION_WITHOUT_CONNECTED_DOMAIN", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + case class Error(submission_id: LedgerSubmissionId)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = show"Cannot allocate a party without being connected to a domain" + ) + } } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala index 2025d058a..b8f5da54e 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala @@ -4,7 +4,9 @@ package com.digitalasset.canton.participant.sync import akka.stream.Materializer +import cats.Monad import cats.data.EitherT +import cats.syntax.functor._ import cats.syntax.traverse._ import com.daml.ledger.participant.state.v2.{SubmitterInfo, TransactionMeta} import com.digitalasset.canton._ @@ -39,7 +41,7 @@ import com.digitalasset.canton.participant.protocol.transfer.TransferProcessingS TransferProcessorError, } import com.digitalasset.canton.participant.protocol.transfer._ -import com.digitalasset.canton.participant.pruning.AcsCommitmentProcessor +import com.digitalasset.canton.participant.pruning.{AcsCommitmentProcessor, PruneObserver} import com.digitalasset.canton.participant.store.ActiveContractSnapshot.ActiveContractIdsChange import com.digitalasset.canton.participant.store.{ ParticipantNodePersistentState, @@ -64,16 +66,16 @@ import com.digitalasset.canton.sequencing.{ } import com.digitalasset.canton.store.{CursorPrehead, SequencedEventStore} import com.digitalasset.canton.time.{Clock, DomainTimeTracker} -import com.digitalasset.canton.topology.{DomainId, ParticipantId} import com.digitalasset.canton.topology.client.DomainTopologyClientWithInit import com.digitalasset.canton.topology.processing.{ ApproximateTime, EffectiveTime, TopologyTransactionProcessor, } +import com.digitalasset.canton.topology.{DomainId, ParticipantId} import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.digitalasset.canton.util.ErrorUtil import com.digitalasset.canton.util.ShowUtil._ +import com.digitalasset.canton.util.{ErrorUtil, FutureUtil, MonadUtil} import io.opentelemetry.api.trace.Tracer import scala.concurrent.{ExecutionContext, Future} @@ -132,6 +134,7 @@ class SyncDomain( domainCrypto.crypto.pureCrypto, seedGenerator, packageService, + parameters.loggingConfig, loggerFactory, ) @@ -192,7 +195,7 @@ class SyncDomain( domainCrypto, staticDomainParameters.reconciliationInterval, persistent.acsCommitmentStore, - AcsCommitmentProcessor.pruneObserver( + new PruneObserver( persistent.requestJournalStore, persistent.sequencerCounterTrackerStore, staticDomainParameters.reconciliationInterval, @@ -203,7 +206,8 @@ class SyncDomain( domainId, parameters.stores.acsPruningInterval, clock, - ), + loggerFactory, + ).observer(_, _), killSwitch = selfKillSwitch, pruningMetrics, timeouts, @@ -551,9 +555,84 @@ class SyncDomain( logger.debug(s"Started sync domain for $domainId")(initializationTraceContext) ephemeral.markAsRecovered() logger.debug("Sync domain is ready.")(initializationTraceContext) + FutureUtil.doNotAwait( + completeTxIn, + "Failed to complete outstanding transfer-ins on startup. " + + "You may have to complete the transfer-ins manually.", + ) + () }).value } + def completeTxIn(implicit tc: TraceContext): Future[Unit] = { + + val fetchLimit = 1000 + + def completeTransfers( + previous: Option[(CantonTimestamp, DomainId)] + ): Future[Either[Option[(CantonTimestamp, DomainId)], Unit]] = { + logger.debug(s"Fetch $fetchLimit pending transfers") + val resF = for { + pendingTransfers <- persistent.transferStore.findAfter( + requestAfter = previous, + limit = fetchLimit, + ) + // TODO(phoebe): Here, transfer-ins are completed sequentially. Consider running several in parallel to speed + // this up. It may be helpful to use the `RateLimiter` + eithers <- MonadUtil + .sequentialTraverse(pendingTransfers)({ data => + logger.debug(s"Complete ${data.transferId} after startup") + val eitherF = TransferOutProcessingSteps.autoTransferIn( + data.transferId, + domainId, + transferCoordination, + data.contract.metadata.stakeholders, + participantId, + data.transferOutRequest.targetTimeProof.timestamp, + ) + eitherF.value.map(_.left.map(err => data.transferId -> err)) + }) + + } yield { + // Log any errors, then discard the errors and continue to complete pending transfers + eithers.foreach({ + case Left((transferId, error)) => + logger.debug(s"Failed to complete pending transfer $transferId. The error was $error.") + case Right(()) => () + }) + + pendingTransfers.lastOption.map(t => t.transferId.requestTimestamp -> t.originDomain) + } + + resF.map({ + // Continue completing transfers that are after the last completed transfer + case Some(value) => Left(Some(value)) + // We didn't find any uncompleted transfers, so stop + case None => Right(()) + }) + } + + logger.debug(s"Wait for replay to complete") + for { + // Wait to see a timestamp >= now from the domain -- when we see such a timestamp, it means that the participant + // has "caught up" on messages from the domain (and so should have seen all the transfer-ins) + //TODO(i9009): This assumes the participant and domain clocks are synchronized, which may not be the case + waitForReplay <- timeTracker + .awaitTick(clock.now) + .map(_.void) + .getOrElse(Future.unit) + + params <- topologyClient.currentSnapshotApproximation.findDynamicDomainParametersOrDefault() + + _bool <- Monad[Future].tailRecM(None: Option[(CantonTimestamp, DomainId)])(ts => + completeTransfers(ts) + ) + } yield { + logger.debug(s"Transfer in completion has finished") + } + + } + /** A [[SyncDomain]] is ready when it has resubscribed to the sequencer client. */ def ready: Boolean = ephemeral.recovered diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/ConfirmationRequestFactoryTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/ConfirmationRequestFactoryTest.scala index f9f5a8c68..4b2b53dab 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/ConfirmationRequestFactoryTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/submission/ConfirmationRequestFactoryTest.scala @@ -8,6 +8,7 @@ import cats.syntax.either._ import cats.syntax.functor._ import com.daml.ledger.participant.state.v2.SubmitterInfo import com.digitalasset.canton._ +import com.digitalasset.canton.config.LoggingConfig import com.digitalasset.canton.crypto._ import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto import com.digitalasset.canton.data.ViewType.TransactionViewType @@ -169,7 +170,7 @@ class ConfirmationRequestFactoryTest extends AsyncWordSpec with BaseTest with Ha override def saltsFromView(view: TransactionViewTree): Iterable[Salt] = ??? } - new ConfirmationRequestFactory(submitterParticipant, domain)( + new ConfirmationRequestFactory(submitterParticipant, domain, LoggingConfig(), loggerFactory)( transactionTreeFactory, seedGenerator, ) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala index 7f371c8cf..c68ca9ae6 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala @@ -97,7 +97,7 @@ trait AcsCommitmentProcessorBaseTest extends BaseTest { def ts(i: Int): CantonTimestampSecond = CantonTimestampSecond.ofEpochSecond(i.longValue) def toc(timestamp: Int, requestCounter: Int = 0): TimeOfChange = - TimeOfChange(requestCounter.toLong, ts(timestamp).toTs) + TimeOfChange(requestCounter.toLong, ts(timestamp).forgetSecond) def mkChangeIdHash(index: Int) = ChangeIdHash(DefaultDamlValues.lfhash(index)) @@ -221,12 +221,12 @@ trait AcsCommitmentProcessorBaseTest extends BaseTest { def withTestHash[A] = WithContractHash[A](_, testHash) - def rt(timestamp: Int, tieBreaker: Int) = RecordTime(ts(timestamp).toTs, tieBreaker.toLong) + def rt(timestamp: Int, tieBreaker: Int) = + RecordTime(ts(timestamp).forgetSecond, tieBreaker.toLong) val coid = (txId, discriminator) => ExampleTransactionFactory.suffixedId(txId, discriminator) } -@SuppressWarnings(Array("org.wartremover.warts.TraversableOps")) class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcessorBaseTest { import AcsCommitmentProcessorTestHelpers._ @@ -296,7 +296,6 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess CommitmentsPruningBound.Outstanding(_ => Future.successful(None)), earliestInFlightSubmissionF = Future.successful(None), reconciliationInterval = longInterval, - beforeOrAt = CantonTimestamp.now(), ) } yield res shouldBe None } @@ -311,16 +310,17 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess ), earliestInFlightSubmissionF = Future.successful(None), reconciliationInterval = longInterval, - beforeOrAt = CantonTimestamp.now(), ) - } yield res shouldBe Some(CantonTimestamp.MinValue) + } yield res shouldBe Some(CantonTimestampSecond.MinValue) } "take checkForOutstandingCommitments flag into account" in { val longInterval = PositiveSeconds.ofDays(100) val now = CantonTimestamp.now() - def safeToPrune(checkForOutstandingCommitments: Boolean): Future[Option[CantonTimestamp]] = { + def safeToPrune( + checkForOutstandingCommitments: Boolean + ): Future[Option[CantonTimestampSecond]] = { val noOutstandingCommitmentsF: CantonTimestamp => Future[Some[CantonTimestamp]] = _ => Future.successful(Some(CantonTimestamp.MinValue)) val lastComputedAndSentF = Future.successful(Some(now)) @@ -333,7 +333,6 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess else CommitmentsPruningBound.LastComputedAndSent(lastComputedAndSentF), earliestInFlightSubmissionF = Future.successful(None), reconciliationInterval = longInterval, - beforeOrAt = now, ) } @@ -341,8 +340,8 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess res1 <- safeToPrune(true) res2 <- safeToPrune(false) } yield { - res1 shouldBe Some(CantonTimestamp.MinValue) - res2 shouldBe Some(AcsCommitmentProcessor.tickBeforeOrAt(now, longInterval).toTs) + res1 shouldBe Some(CantonTimestampSecond.MinValue) + res2 shouldBe Some(AcsCommitmentProcessor.tickBeforeOrAt(now, longInterval)) } } } @@ -365,7 +364,7 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess } val acsF = acsSetup(contractSetup.fmap { case (_, createdAt, archivedAt) => - (createdAt.toTs, archivedAt.toTs) + (createdAt.forgetSecond, archivedAt.forgetSecond) }) def commitments( @@ -373,7 +372,7 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess at: CantonTimestampSecond, ): Future[Map[ParticipantId, AcsCommitment.CommitmentType]] = for { - snapshotOrErr <- acs.snapshot(at.toTs) + snapshotOrErr <- acs.snapshot(at.forgetSecond) snapshot <- snapshotOrErr.fold( _ => Future.failed(new RuntimeException(s"Failed to get snapshot at timestamp $at")), sn => Future.successful(sn.map { case (cid, _ts) => cid -> stakeholderLookup(cid) }), @@ -512,7 +511,8 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess TestingTopology().withParticipants(remote).build().forOwnerAndDomain(remote) val cmt = commitment(cids) val snapshotF = crypto.snapshot(CantonTimestamp.Epoch) - val period = CommitmentPeriod(fromExclusive.toTs, toInclusive.toTs, interval).value + val period = + CommitmentPeriod(fromExclusive.forgetSecond, toInclusive.forgetSecond, interval).value val payload = AcsCommitment.create(domainId, remote, localId, period, cmt) snapshotF.flatMap { snapshot => @@ -535,14 +535,14 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess ) // First ask for the remote commitments to be processed, and then compute locally _ <- delivered - .traverse_ { case (ts, batch) => processor.processBatchInternal(ts.toTs, batch) } + .traverse_ { case (ts, batch) => processor.processBatchInternal(ts.forgetSecond, batch) } .onShutdown(fail()) _ = changes.foreach { case (ts, tb, change) => processor.publish(RecordTime(ts, tb), change) } _ <- processor.queue.flush() - computed <- store.searchComputedBetween(CantonTimestamp.Epoch, timeProofs.last) - received <- store.searchReceivedBetween(CantonTimestamp.Epoch, timeProofs.last) + computed <- store.searchComputedBetween(CantonTimestamp.Epoch, timeProofs.lastOption.value) + received <- store.searchReceivedBetween(CantonTimestamp.Epoch, timeProofs.lastOption.value) } yield { verify(processor.sequencerClient, times(2)).sendAsync( any[Batch[OpenEnvelope[ProtocolMessage]]], @@ -581,7 +581,6 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess inFlightSubmissionStore, domainId, checkForOutstandingCommitments = true, - CantonTimestamp.Epoch.plusSeconds(200), ) } yield { res shouldEqual None @@ -607,17 +606,16 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess inFlightSubmissionStore, domainId, checkForOutstandingCommitments = true, - CantonTimestamp.Epoch.plusSeconds(200), ) } yield { - res shouldEqual Some(CantonTimestamp.MinValue) + res shouldEqual Some(CantonTimestampSecond.MinValue) } } def assertInIntervalBefore( before: CantonTimestamp, reconciliationInterval: PositiveSeconds, - ): Option[CantonTimestamp] => Assertion = { + ): Option[CantonTimestampSecond] => Assertion = { case None => fail() case Some(ts) => val delta = JDuration.between(ts.toInstant, before.toInstant) @@ -665,7 +663,6 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess inFlightSubmissionStore, domainId, checkForOutstandingCommitments = true, - CantonTimestamp.ofEpochSecond(200), ) _ <- requestJournalStore.insert( RequestData(4L, RequestState.Pending, ts4, None) @@ -682,7 +679,6 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess inFlightSubmissionStore, domainId, checkForOutstandingCommitments = true, - CantonTimestamp.ofEpochSecond(200), ) } yield { withClue("request 1:") { @@ -729,7 +725,6 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess inFlightSubmissionStore, domainId, checkForOutstandingCommitments = true, - CantonTimestamp.Epoch.plusSeconds(200), ) } yield assertInIntervalBefore(tsCleanRequest, reconciliationInterval)(res) } @@ -764,7 +759,6 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess inFlightSubmissionStore, domainId, checkForOutstandingCommitments = true, - CantonTimestamp.Epoch.plusSeconds(200), ) } yield { assertInIntervalBefore(ts1, reconciliationInterval)(res) @@ -837,7 +831,6 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess inFlightSubmissionStore, domainId, checkForOutstandingCommitments = true, - CantonTimestamp.Epoch.plusSeconds(200), ) // Now remove the timed-out submission 1 and compute the pruning point again () <- inFlightSubmissionStore.delete(Seq(submission1.referenceByMessageId)) @@ -849,7 +842,6 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess inFlightSubmissionStore, domainId, checkForOutstandingCommitments = true, - CantonTimestamp.Epoch.plusSeconds(200), ) // Now remove the clean request and compute the pruning point again () <- inFlightSubmissionStore.delete(Seq(submission2.referenceByMessageId)) @@ -861,7 +853,6 @@ class AcsCommitmentProcessorTest extends AsyncWordSpec with AcsCommitmentProcess inFlightSubmissionStore, domainId, checkForOutstandingCommitments = true, - CantonTimestamp.Epoch.plusSeconds(200), ) } yield { assertInIntervalBefore(submission1.associatedTimestamp, reconciliationInterval)(res1) @@ -1016,7 +1007,6 @@ class AcsCommitmentProcessorSyncTest } /* Scalacheck doesn't play nice with AsyncWordSpec, so using AnyWordSpec and waiting on futures */ -@SuppressWarnings(Array("org.wartremover.warts.TraversableOps")) class AcsCommitmentProcessorPropertyTest extends AnyWordSpec with BaseTest diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/HookedAcs.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/HookedAcs.scala index aa968cb2f..572477f82 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/HookedAcs.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/HookedAcs.scala @@ -25,8 +25,9 @@ import java.util.concurrent.atomic.AtomicReference import scala.collection.immutable.SortedMap import scala.concurrent.{ExecutionContext, Future} -class HookedAcs(private val acs: ActiveContractStore)(implicit val ec: ExecutionContext) - extends ActiveContractStore { +private[participant] class HookedAcs(private val acs: ActiveContractStore)(implicit + val ec: ExecutionContext +) extends ActiveContractStore { import HookedAcs.{noAction, noTransferAction} private val nextCreateHook: AtomicReference[(Seq[LfContractId], TimeOfChange) => Future[Unit]] = diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/TransferStoreTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/TransferStoreTest.scala index a14a600d5..4c78abee8 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/TransferStoreTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/TransferStoreTest.scala @@ -3,11 +3,9 @@ package com.digitalasset.canton.participant.store -import java.util.UUID import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.crypto._ import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.topology._ import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} import com.digitalasset.canton.participant.protocol.submission.SeedGenerator import com.digitalasset.canton.participant.protocol.transfer.{TransferData, TransferOutRequest} @@ -23,10 +21,12 @@ import com.digitalasset.canton.protocol.messages._ import com.digitalasset.canton.protocol.{RequestId, TransferId} import com.digitalasset.canton.sequencing.protocol._ import com.digitalasset.canton.time.TimeProofTestUtil +import com.digitalasset.canton.topology._ import com.digitalasset.canton.util.{Checked, FutureUtil} import com.digitalasset.canton.{BaseTest, LfPartyId} import org.scalatest.wordspec.AsyncWordSpec +import java.util.UUID import scala.concurrent.duration._ import scala.concurrent.{Await, Future} @@ -187,6 +187,99 @@ trait TransferStoreTest { } } + "findAfter" should { + + def populate(store: TransferStore) = for { + transfer1 <- mkTransferData( + TransferId(domain1, CantonTimestamp.Epoch.plusMillis(200L)), + mediator1, + LfPartyId.assertFromString("party1"), + ) + transfer2 <- mkTransferData( + TransferId(domain1, CantonTimestamp.Epoch.plusMillis(100L)), + mediator1, + LfPartyId.assertFromString("party2"), + ) + transfer3 <- mkTransferData( + TransferId(domain2, CantonTimestamp.Epoch.plusMillis(100L)), + mediator2, + LfPartyId.assertFromString("party2"), + ) + transfer4 <- mkTransferData( + TransferId(domain2, CantonTimestamp.Epoch.plusMillis(200L)), + mediator2, + LfPartyId.assertFromString("party2"), + ) + _ <- valueOrFail(store.addTransfer(transfer1))("first add failed") + _ <- valueOrFail(store.addTransfer(transfer2))("second add failed") + _ <- valueOrFail(store.addTransfer(transfer3))("third add failed") + _ <- valueOrFail(store.addTransfer(transfer4))("fourth add failed") + } yield (List(transfer1, transfer2, transfer3, transfer4)) + + "order pending transfers" in { + val store = mk(targetDomain) + + for { + transfers <- populate(store) + lookup <- store.findAfter(None, 10) + } yield { + val List(transfer1, transfer2, transfer3, transfer4) = transfers: @unchecked + assert(lookup == Seq(transfer2, transfer3, transfer1, transfer4)) + } + + } + "give pending transfers after the given timestamp" in { + val store = mk(targetDomain) + + for { + transfers <- populate(store) + List(transfer1, transfer2, transfer3, transfer4) = transfers: @unchecked + lookup <- store.findAfter( + requestAfter = Some(transfer2.transferId.requestTimestamp -> transfer2.originDomain), + 10, + ) + } yield { + assert(lookup == Seq(transfer3, transfer1, transfer4)) + } + } + "give no pending transfers when empty" in { + val store = mk(targetDomain) + for { lookup <- store.findAfter(None, 10) } yield { + lookup shouldBe empty + } + } + "limit the results" in { + val store = mk(targetDomain) + + for { + transfers <- populate(store) + lookup <- store.findAfter(None, 2) + } yield { + val List(transfer1, transfer2, transfer3, transfer4) = transfers: @unchecked + assert(lookup == Seq(transfer2, transfer3)) + } + } + "exclude completed transfers" in { + val store = mk(targetDomain) + + for { + transfers <- populate(store) + List(transfer1, transfer2, transfer3, transfer4) = transfers: @unchecked + checked <- store + .completeTransfer( + transfer2.transferId, + TimeOfChange(3L, CantonTimestamp.Epoch.plusSeconds(3)), + ) + .value + lookup <- store.findAfter(None, 10) + } yield { + assert(checked.successful) + assert(lookup == Seq(transfer3, transfer1, transfer4)) + } + + } + } + "addTransfer" should { "be idempotent" in { val store = mk(targetDomain) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/TransferCacheTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/TransferCacheTest.scala index 22490cbb7..64e6632d7 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/TransferCacheTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/TransferCacheTest.scala @@ -288,6 +288,10 @@ object TransferCacheTest { )(implicit traceContext: TraceContext): Future[Seq[TransferData]] = baseStore.find(filterOrigin, filterTimestamp, filterSubmitter, limit) + override def findAfter(requestAfter: Option[(CantonTimestamp, DomainId)], limit: Int)(implicit + traceContext: TraceContext + ): Future[Seq[TransferData]] = baseStore.findAfter(requestAfter, limit) + override def lookup(transferId: TransferId)(implicit traceContext: TraceContext ): EitherT[Future, TransferLookupError, TransferData] = diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/CantonSyncServiceTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/CantonSyncServiceTest.scala index 147152e93..88cd3d808 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/CantonSyncServiceTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/CantonSyncServiceTest.scala @@ -12,9 +12,11 @@ import com.daml.platform.apiserver.SeedService.Seeding import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveNumeric, String255} import com.digitalasset.canton.config.{ + ApiLoggingConfig, + BatchAggregatorConfig, CachingConfigs, DefaultProcessingTimeouts, - BatchAggregatorConfig, + LoggingConfig, } import com.digitalasset.canton.crypto.{Fingerprint, SyncCryptoApiProvider} import com.digitalasset.canton.logging.SuppressingLogger @@ -77,7 +79,7 @@ class CantonSyncServiceTest extends FixtureAnyWordSpec with BaseTest with HasExe tracing = TracingConfig(TracingConfig.Propagation.Disabled), delayLoggingThreshold = NonNegativeFiniteDuration.ofMillis(5000), enableAdditionalConsistencyChecks = true, - logMessagePayloads = true, + loggingConfig = LoggingConfig(api = ApiLoggingConfig(messagePayloads = Some(true))), logQueryCost = None, processingTimeouts = DefaultProcessingTimeouts.testing, enablePreviewFeatures = false, diff --git a/project/project/DamlVersions.scala b/project/project/DamlVersions.scala index 732628ac9..37621bb47 100644 --- a/project/project/DamlVersions.scala +++ b/project/project/DamlVersions.scala @@ -7,7 +7,7 @@ object DamlVersions { /** The version of the daml compiler (and in most cases of the daml libraries as well). */ - val version: String = "2.1.0-snapshot.20220324.9615.0.467b8fbb" + val version: String = "2.1.0-snapshot.20220328.9630.0.66c37bad" /** Custom Daml artifacts override version. */