diff --git a/sdk/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/transfer_id.proto b/sdk/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/transfer_id.proto new file mode 100644 index 000000000000..5dfe3b054852 --- /dev/null +++ b/sdk/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/transfer_id.proto @@ -0,0 +1,13 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.admin.participant.v30; + +import "google/protobuf/timestamp.proto"; + +message TransferId { + string source_domain = 1; + google.protobuf.Timestamp timestamp = 2; +} diff --git a/sdk/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/transfer_service.proto b/sdk/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/transfer_service.proto deleted file mode 100644 index a03975cbdc5e..000000000000 --- a/sdk/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/transfer_service.proto +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package com.digitalasset.canton.admin.participant.v30; - -import "google/protobuf/timestamp.proto"; - -// Supports transferring contracts from one domain to another -service TransferService { - - // return the in-flight transfers on a given participant for a given target domain - rpc TransferSearch(AdminTransferSearchQuery) returns (AdminTransferSearchResponse); -} - -message TransferId { - string source_domain = 1; - google.protobuf.Timestamp timestamp = 2; -} - -message AdminTransferSearchQuery { - string search_domain = 1; - string filter_origin_domain = 2; // exact match if non-empty - google.protobuf.Timestamp filter_timestamp = 3; // optional; exact match if set - string filter_submitting_party = 4; - int64 limit = 5; -} - -message AdminTransferSearchResponse { - repeated TransferSearchResult results = 1; - - message TransferSearchResult { - string contract_id = 1; - TransferId transfer_id = 2; - string origin_domain = 3; - string target_domain = 4; - string submitting_party = 5; - bool ready_for_transfer_in = 6; - google.protobuf.Timestamp target_time_proof = 7; - } -} diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala index 962187de3b48..3ee64f8c5c43 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala @@ -26,7 +26,6 @@ import com.digitalasset.canton.admin.participant.v30.PartyNameManagementServiceG import com.digitalasset.canton.admin.participant.v30.PingServiceGrpc.PingServiceStub import com.digitalasset.canton.admin.participant.v30.PruningServiceGrpc.PruningServiceStub import com.digitalasset.canton.admin.participant.v30.ResourceManagementServiceGrpc.ResourceManagementServiceStub -import com.digitalasset.canton.admin.participant.v30.TransferServiceGrpc.TransferServiceStub import com.digitalasset.canton.admin.participant.v30.{ResourceLimits as _, *} import com.digitalasset.canton.admin.pruning import com.digitalasset.canton.admin.pruning.v30.{NoWaitCommitmentsSetup, WaitCommitmentsSetup} @@ -34,7 +33,6 @@ import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.data.{CantonTimestamp, CantonTimestampSecond} import com.digitalasset.canton.logging.TracedLogger import com.digitalasset.canton.participant.admin.ResourceLimits -import com.digitalasset.canton.participant.admin.grpc.TransferSearchResult import com.digitalasset.canton.participant.admin.traffic.TrafficStateAdmin import com.digitalasset.canton.participant.domain.DomainConnectionConfig as CDomainConnectionConfig import com.digitalasset.canton.participant.pruning.AcsCommitmentProcessor.{ @@ -48,7 +46,6 @@ import com.digitalasset.canton.protocol.messages.{AcsCommitment, CommitmentPerio import com.digitalasset.canton.sequencing.SequencerConnectionValidation import com.digitalasset.canton.sequencing.protocol.TrafficState import com.digitalasset.canton.serialization.ProtoConverter -import com.digitalasset.canton.serialization.ProtoConverter.InstantConverter import com.digitalasset.canton.time.PositiveSeconds import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId} import com.digitalasset.canton.tracing.TraceContext @@ -885,60 +882,6 @@ object ParticipantAdminCommands { service.modifyDomain(request) override def handleResponse(response: ModifyDomainResponse): Either[String, Unit] = Right(()) - - } - - } - - object Transfer { - - abstract class Base[Req, Res, Ret] extends GrpcAdminCommand[Req, Res, Ret] { - override type Svc = TransferServiceStub - - override def createService(channel: ManagedChannel): TransferServiceStub = - TransferServiceGrpc.stub(channel) - } - - final case class TransferSearch( - targetDomain: DomainAlias, - sourceDomainFilter: Option[DomainAlias], - timestampFilter: Option[Instant], - submittingPartyFilter: Option[PartyId], - limit0: Int, - ) extends Base[ - AdminTransferSearchQuery, - AdminTransferSearchResponse, - Seq[TransferSearchResult], - ] { - - override def createRequest(): Either[String, AdminTransferSearchQuery] = - Right( - AdminTransferSearchQuery( - searchDomain = targetDomain.toProtoPrimitive, - filterOriginDomain = sourceDomainFilter.map(_.toProtoPrimitive).getOrElse(""), - filterTimestamp = - timestampFilter.map((value: Instant) => InstantConverter.toProtoPrimitive(value)), - filterSubmittingParty = submittingPartyFilter.fold("")(_.toLf), - limit = limit0.toLong, - ) - ) - - override def submitRequest( - service: TransferServiceStub, - request: AdminTransferSearchQuery, - ): Future[AdminTransferSearchResponse] = - service.transferSearch(request) - - override def handleResponse( - response: AdminTransferSearchResponse - ): Either[String, Seq[TransferSearchResult]] = - response match { - case AdminTransferSearchResponse(results) => - results.traverse(TransferSearchResult.fromProtoV30).leftMap(_.toString) - } - - override def timeoutType: TimeoutType = DefaultUnboundedTimeout - } } diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala index 8e7f7b81e6bd..d7c5d16c5d83 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala @@ -720,6 +720,8 @@ object CantonConfig { deriveEnumerationReader[PbkdfScheme] lazy implicit val cryptoKeyFormatReader: ConfigReader[CryptoKeyFormat] = deriveEnumerationReader[CryptoKeyFormat] + lazy implicit val encryptionSchemeConfigReader: ConfigReader[EncryptionSchemeConfig] = + deriveReader[EncryptionSchemeConfig] implicit def cryptoSchemeConfig[S: ConfigReader: Order]: ConfigReader[CryptoSchemeConfig[S]] = deriveReader[CryptoSchemeConfig[S]] lazy implicit val communityCryptoReader: ConfigReader[CommunityCryptoConfig] = @@ -1138,6 +1140,8 @@ object CantonConfig { deriveEnumerationWriter[PbkdfScheme] lazy implicit val cryptoKeyFormatWriter: ConfigWriter[CryptoKeyFormat] = deriveEnumerationWriter[CryptoKeyFormat] + lazy implicit val encryptionSchemeConfigWriter: ConfigWriter[EncryptionSchemeConfig] = + deriveWriter[EncryptionSchemeConfig] implicit def cryptoSchemeConfigWriter[S: ConfigWriter]: ConfigWriter[CryptoSchemeConfig[S]] = deriveWriter[CryptoSchemeConfig[S]] lazy implicit val communityCryptoWriter: ConfigWriter[CommunityCryptoConfig] = diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala index 6e1eb2772d90..0456dab95d6f 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala @@ -508,7 +508,12 @@ abstract class ParticipantReference( override def health: ParticipantHealthAdministration = new ParticipantHealthAdministration(this, consoleEnvironment, loggerFactory) - override def parties: ParticipantPartiesAdministrationGroup + @Help.Summary("Inspect and manage parties") + @Help.Group("Parties") + def parties: ParticipantPartiesAdministrationGroup = partiesGroup + // above command needs to be def such that `Help` works. + lazy private val partiesGroup = + new ParticipantPartiesAdministrationGroup(id, this, consoleEnvironment, loggerFactory) private lazy val topology_ = new TopologyAdministrationGroup( @@ -604,14 +609,6 @@ class RemoteParticipantReference(environment: ConsoleEnvironment, override val n extends ParticipantReference(environment, name) with RemoteInstanceReference { - @Help.Summary("Inspect and manage parties") - @Help.Group("Parties") - override def parties: ParticipantPartiesAdministrationGroup = partiesGroup - - // above command needs to be def such that `Help` works. - lazy private val partiesGroup = - new ParticipantPartiesAdministrationGroup(id, this, consoleEnvironment) - @Help.Summary("Return remote participant config") def config: RemoteParticipantConfig = consoleEnvironment.environment.config.remoteParticipantsByString(name) @@ -673,15 +670,6 @@ class LocalParticipantReference( /** secret, not publicly documented way to get the admin token */ def adminToken: Option[String] = underlying.map(_.adminToken.secret) - // TODO(#14048) these are "remote" groups. the normal participant node has "local" versions. - // but rather than keeping this, we should make local == remote and add local methods separately - @Help.Summary("Inspect and manage parties") - @Help.Group("Parties") - def parties: LocalParticipantPartiesAdministrationGroup = partiesGroup - // above command needs to be def such that `Help` works. - lazy private val partiesGroup = - new LocalParticipantPartiesAdministrationGroup(this, this, consoleEnvironment, loggerFactory) - private lazy val testing_ = new LocalParticipantTestingGroup(this, consoleEnvironment, loggerFactory) @Help.Summary("Commands used for development and testing", FeatureFlag.Testing) diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala index 4442be1cdfb2..580095068ce9 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala @@ -65,7 +65,6 @@ import com.digitalasset.canton.health.admin.data.ParticipantStatus import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} import com.digitalasset.canton.participant.ParticipantNode import com.digitalasset.canton.participant.admin.ResourceLimits -import com.digitalasset.canton.participant.admin.grpc.TransferSearchResult import com.digitalasset.canton.participant.admin.inspection.SyncStateInspection import com.digitalasset.canton.participant.domain.DomainConnectionConfig import com.digitalasset.canton.participant.pruning.AcsCommitmentProcessor.{ @@ -1725,36 +1724,6 @@ trait ParticipantAdministration extends FeatureFlagFilter { } yield () } } - - } - - @Help.Summary("Composability related functionality", FeatureFlag.Preview) - @Help.Group("Transfer") - object transfer extends Helpful { - - @Help.Summary("Search the currently in-flight transfers", FeatureFlag.Testing) - @Help.Description( - "Returns all in-flight transfers with the given target domain that match the filters, but no more than the limit specifies." - ) - def search( - targetDomain: DomainAlias, - filterSourceDomain: Option[DomainAlias], - filterTimestamp: Option[Instant], - filterSubmittingParty: Option[PartyId], - limit: PositiveInt = defaultLimit, - ): Seq[TransferSearchResult] = - check(FeatureFlag.Preview)(consoleEnvironment.run { - adminCommand( - ParticipantAdminCommands.Transfer - .TransferSearch( - targetDomain, - filterSourceDomain, - filterTimestamp, - filterSubmittingParty, - limit.value, - ) - ) - }) } @Help.Summary("Functionality for managing resources") diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala index e0857864c203..a3b976f313c6 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala @@ -21,7 +21,6 @@ import com.digitalasset.canton.config.NonNegativeDuration import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.{ AdminCommandRunner, - BaseInspection, CantonInternalError, CommandFailure, ConsoleCommandResult, @@ -31,13 +30,10 @@ import com.digitalasset.canton.console.{ FeatureFlagFilter, Help, Helpful, - InstanceReference, - LocalParticipantReference, ParticipantReference, } import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.participant.ParticipantNode import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore import com.digitalasset.canton.topology.transaction.* @@ -97,12 +93,11 @@ class PartiesAdministrationGroup( class ParticipantPartiesAdministrationGroup( participantId: => ParticipantId, - runner: AdminCommandRunner - & ParticipantAdministration - & BaseLedgerApiAdministration - & InstanceReference, - consoleEnvironment: ConsoleEnvironment, -) extends PartiesAdministrationGroup(runner, consoleEnvironment) { + reference: ParticipantReference, + override protected val consoleEnvironment: ConsoleEnvironment, + override protected val loggerFactory: NamedLoggerFactory, +) extends PartiesAdministrationGroup(reference, consoleEnvironment) + with FeatureFlagFilter { @Help.Summary("List parties hosted by this participant") @Help.Description("""Inspect the parties hosted by this participant as used for synchronisation. @@ -182,7 +177,7 @@ class ParticipantPartiesAdministrationGroup( ) def primaryConnected: Either[String, Seq[ListConnectedDomainsResult]] = - runner + reference .adminCommand(ParticipantAdminCommands.DomainConnectivity.ListConnectedDomains()) .toEither @@ -261,7 +256,7 @@ class ParticipantPartiesAdministrationGroup( _ <- validDisplayName match { case None => Right(()) case Some(name) => - runner + reference .adminCommand( ParticipantAdminCommands.PartyNameManagement .SetPartyDisplayName(partyId, name.unwrap) @@ -273,7 +268,7 @@ class ParticipantPartiesAdministrationGroup( // sync with ledger-api server if this node is connected to at least one domain if (syncLedgerApi && primaryConnected.exists(_.nonEmpty)) retryE( - runner.ledger_api.parties.list().map(_.party).contains(partyId), + reference.ledger_api.parties.list().map(_.party).contains(partyId), show"The party $partyId never appeared on the ledger API server", ) else Right(()) @@ -303,12 +298,16 @@ class ParticipantPartiesAdministrationGroup( groupAddressing: Boolean, mustFullyAuthorize: Boolean, ): ConsoleCommandResult[SignedTopologyTransaction[TopologyChangeOp, PartyToParticipant]] = { + // determine the next serial + val nextSerial = reference.topology.party_to_participant_mappings + .list_from_authorized(filterParty = partyId.filterString) + .filter(_.item.domainId.isEmpty) + .maxByOption(_.context.serial) + .map(_.context.serial.increment) - runner + reference .adminCommand( TopologyAdminCommands.Write.Propose( - // TODO(#14048) properly set the serial or introduce auto-detection so we don't - // have to set it on the client side mapping = PartyToParticipant.create( partyId, None, @@ -323,7 +322,7 @@ class ParticipantPartiesAdministrationGroup( groupAddressing, ), signedBy = Seq(this.participantId.fingerprint), - serial = None, + serial = nextSerial, store = AuthorizedStore.filterName, mustFullyAuthorize = mustFullyAuthorize, change = TopologyChangeOp.Replace, @@ -335,9 +334,9 @@ class ParticipantPartiesAdministrationGroup( @Help.Summary("Disable party on participant") // TODO(#14067): reintroduce `force` once it is implemented on the server side and threaded through properly. def disable(name: String /*, force: Boolean = false*/ ): Unit = { - runner.topology.party_to_participant_mappings + reference.topology.party_to_participant_mappings .propose_delta( - PartyId(runner.id.member.uid.tryChangeId(name)), + PartyId(reference.id.member.uid.tryChangeId(name)), removes = List(this.participantId), ) .discard @@ -354,7 +353,7 @@ class ParticipantPartiesAdministrationGroup( party: PartyId, modifier: PartyDetails => PartyDetails, ): PartyDetails = { - runner.ledger_api.parties.update( + reference.ledger_api.parties.update( party = party, modifier = modifier, ) @@ -366,25 +365,10 @@ class ParticipantPartiesAdministrationGroup( ) def set_display_name(party: PartyId, displayName: String): Unit = consoleEnvironment.run { // takes displayName as String argument which is validated at GrpcPartyNameManagementService - runner.adminCommand( + reference.adminCommand( ParticipantAdminCommands.PartyNameManagement.SetPartyDisplayName(party, displayName) ) } -} - -class LocalParticipantPartiesAdministrationGroup( - reference: LocalParticipantReference, - runner: AdminCommandRunner - & BaseInspection[ParticipantNode] - & ParticipantAdministration - & BaseLedgerApiAdministration - & InstanceReference, - val consoleEnvironment: ConsoleEnvironment, - val loggerFactory: NamedLoggerFactory, -) extends ParticipantPartiesAdministrationGroup(reference.id, runner, consoleEnvironment) - with FeatureFlagFilter { - - import runner.* @Help.Summary("Waits for any topology changes to be observed", FeatureFlag.Preview) @Help.Description( @@ -395,11 +379,9 @@ class LocalParticipantPartiesAdministrationGroup( timeout: NonNegativeDuration = consoleEnvironment.commandTimeouts.bounded, )(implicit env: ConsoleEnvironment): Unit = check(FeatureFlag.Preview) { - access(node => - TopologySynchronisation.awaitTopologyObserved(reference, partyAssignment, timeout) - ) + reference.health.wait_for_initialized() + TopologySynchronisation.awaitTopologyObserved(reference, partyAssignment, timeout) } - } object TopologySynchronisation { diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala index bf0937028342..f45a8f5e4dab 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala @@ -1428,7 +1428,7 @@ class TopologyAdministrationGroup( TimeQuery.Range(fromO, toO): Time-range of when the transaction was added to the store operation: Optionally, what type of operation the transaction should have. filterParty: Filter for parties starting with the given filter string. - filterParticipant: Filter for participants starting with the given filter string. + filterParticipant: If non-empty, returns only parties that are hosted on this participants. filterSigningKey: Filter for transactions that are authorized with a key that starts with the given filter string. protocolVersion: Export the topology transactions in the optional protocol version. |""" @@ -1554,7 +1554,6 @@ class TopologyAdministrationGroup( proposals: Boolean = false, timeQuery: TimeQuery = TimeQuery.HeadState, operation: Option[TopologyChangeOp] = Some(TopologyChangeOp.Replace), - // TODO(#14048) should be filterDomain and filterParticipant filterUid: String = "", filterSigningKey: String = "", protocolVersion: Option[String] = None, @@ -1582,14 +1581,10 @@ class TopologyAdministrationGroup( @Help.Summary("Propose a change to a participant's domain trust certificate.") @Help.Description( - """A participant's domain trust certificate serves two functions: - |1. It signals to the domain that the participant would like to act on the domain. - |2. It controls whether contracts can be reassigned to any domain or only a specific set of domains. + """A participant's domain trust certificate signals to the domain that the participant would like to act on the domain. participantId: the identifier of the trust certificate's target participant domainId: the identifier of the domain on which the participant would like to act - transferOnlyToGivenTargetDomains: whether or not to restrict reassignments to a set of domains - targetDomains: the set of domains to which the participant permits assignments of contracts store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically propagated to connected domains, if applicable. @@ -1609,8 +1604,6 @@ class TopologyAdministrationGroup( def propose( participantId: ParticipantId, domainId: DomainId, - transferOnlyToGivenTargetDomains: Boolean = false, - targetDomains: Seq[DomainId] = Seq.empty, synchronize: Option[NonNegativeDuration] = Some( consoleEnvironment.commandTimeouts.bounded ), @@ -1624,8 +1617,6 @@ class TopologyAdministrationGroup( mapping = DomainTrustCertificate( participantId, domainId, - transferOnlyToGivenTargetDomains, - targetDomains, ), signedBy = Seq(instance.id.fingerprint), store = store.getOrElse(domainId.filterString), @@ -1761,30 +1752,12 @@ class TopologyAdministrationGroup( ) } - @Help.Summary("Propose a limitation of how many participants may host a certain party") - @Help.Description(""" - domainId: the domain on which to impose the limits for the given party - partyId: the party to which the hosting limits are applied - maxNumHostingParticipants: the maximum number of participants that may host the given party - - store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically - propagated to connected domains, if applicable. - - "": the topology transaction will be directly submitted to the specified domain without - storing it locally first. This also means it will _not_ be synchronized to other domains - automatically. - mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be - sufficient to fully authorize the topology transaction. if this is not the case, the request fails. - when set to false, the proposal retains the proposal status until enough signatures are accumulated to - satisfy the mapping's authorization requirements. - signedBy: the fingerprint of the key to be used to sign this proposal - serial: the expected serial this topology transaction should have. Serials must be contiguous and start at 1. - This transaction will be rejected if another fully authorized transaction with the same serial already - exists, or if there is a gap between this serial and the most recently used serial. - If None, the serial will be automatically selected by the node.""") + // When we removed the field maxNumHostingParticipants from the PartyHostingLimits, this method did not make sense anymore. + // We keep it here for now, because it's already implemented and might be useful in the future. + // Look at the history if you need the summary and description of this method. def propose( domainId: DomainId, partyId: PartyId, - maxNumHostingParticipants: Int, store: Option[String] = None, mustFullyAuthorize: Boolean = false, signedBy: Seq[Fingerprint] = Seq(instance.id.fingerprint), @@ -1795,7 +1768,7 @@ class TopologyAdministrationGroup( ): SignedTopologyTransaction[TopologyChangeOp, PartyHostingLimits] = { synchronisation.runAdminCommand(synchronize)( TopologyAdminCommands.Write.Propose( - PartyHostingLimits(domainId, partyId, maxNumHostingParticipants), + PartyHostingLimits(domainId, partyId), signedBy = signedBy, store = store.getOrElse(domainId.toProtoPrimitive), serial = serial, diff --git a/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/domain_parameters.proto b/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/domain_parameters.proto index 179606221cc9..5db544277121 100644 --- a/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/domain_parameters.proto +++ b/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/domain_parameters.proto @@ -66,9 +66,7 @@ message DynamicDomainParameters { ParticipantDomainLimits participant_domain_limits = 13; - // TODO(#14050) implement party hosting limits - uint32 default_max_hosting_participants_per_party = 14; - + reserved 14; // was default_max_hosting_participants_per_party = 14; google.protobuf.Duration sequencer_aggregate_submission_timeout = 15; com.digitalasset.canton.protocol.v30.TrafficControlParameters traffic_control_parameters = 16; AcsCommitmentsCatchUpConfig acs_commitments_catchup_config = 17; diff --git a/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto b/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto index e47c3a585e9f..45d66316f6ed 100644 --- a/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto +++ b/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto @@ -113,13 +113,9 @@ message DomainTrustCertificate { // the uid of the domain that the participant trusts string domain = 2; - // if true, the participant restricts the domains to which it accepts transfer-outs - // TODO(#1252) implement transfer permissions - bool transfer_only_to_given_target_domains = 3; + reserved 3; // was bool transfer_only_to_given_target_domains = 3; - // the uids of the target domains that this participants supports for transfer if transfer - // is restricted. - repeated string target_domains = 4; + reserved 4; // was repeated string target_domains = 4; } // the optional trust certificate of the domain towards the participant @@ -146,13 +142,10 @@ message ParticipantDomainPermission { // these limits can be used to limit the number of participants that can host a given party // authorization: whoever controls the domain uid // UNIQUE(domain,party) -// TODO(#14050) implement me message PartyHostingLimits { string domain = 1; string party = 2; - - // how many participants can be assigned to the given party - uint32 quota = 3; + reserved 3; // was quota = 3; } // list of packages supported by this participant diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/metrics/TrafficConsumptionMetrics.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/metrics/TrafficConsumptionMetrics.scala index 0d1bd0e8d225..b34a1d686fa7 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/metrics/TrafficConsumptionMetrics.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/metrics/TrafficConsumptionMetrics.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.metrics import cats.Eval -import com.daml.metrics.api.MetricHandle.{Gauge, LabeledMetricsFactory} +import com.daml.metrics.api.MetricHandle.{Counter, Gauge, LabeledMetricsFactory} import com.daml.metrics.api.MetricQualification.Traffic import com.daml.metrics.api.* import com.daml.metrics.api.noop.NoOpMetricsFactory @@ -22,6 +22,60 @@ class TrafficConsumptionMetrics( prefix: MetricName, labeledMetricsFactory: LabeledMetricsFactory, ) { + val trafficCostOfSubmittedEvent = labeledMetricsFactory.meter( + MetricInfo( + prefix :+ "submitted-event-cost", + summary = "Cost of event submitted from the sequencer client.", + description = """When the sequencer client sends an event to the sequencer to be sequenced, + it will record on this metric the cost of the event. Note that the event may or may not end up being sequenced. + So this metric may not exactly match the actual consumed traffic.""", + qualification = MetricQualification.Traffic, + ) + )(MetricsContext.Empty) + + val trafficCostOfDeliveredSequencedEvent = labeledMetricsFactory.meter( + MetricInfo( + prefix :+ "event-delivered-cost", + summary = "Cost of events that were sequenced and delivered.", + description = """Cost of events for which the sender received confirmation that they were delivered. + There is an exception for aggregated submissions: the cost of aggregate events will be recorded + as soon as the event is ordered and the sequencer waits to receive threshold-many events. + The final event may or may not be delivered successfully depending on the result of the aggregation. + """, + qualification = MetricQualification.Traffic, + ) + )(MetricsContext.Empty) + + val deliveredEventCounter: Counter = labeledMetricsFactory.counter( + MetricInfo( + prefix :+ "event-delivered", + summary = "Number of events that were sequenced and delivered.", + description = """Counter for event-delivered-cost.""", + qualification = MetricQualification.Traffic, + ) + ) + + val trafficCostOfNotDeliveredSequencedEvent = labeledMetricsFactory.meter( + MetricInfo( + prefix :+ "event-rejected-cost", + summary = "Cost of events that were sequenced but no delivered successfully.", + description = + """Cost of events for which the sender received confirmation that the events will not be delivered. + The reason for non-delivery is labeled on the metric, if available. + """, + qualification = MetricQualification.Traffic, + ) + )(MetricsContext.Empty) + + val rejectedEventCounter: Counter = labeledMetricsFactory.counter( + MetricInfo( + prefix :+ "event-rejected", + summary = "Number of events that were sequenced but not delivered.", + description = """Counter for event-rejected-cost.""", + qualification = MetricQualification.Traffic, + ) + ) + // Gauges don't support metrics context per update. So instead create a map with a gauge per context. private val lastTrafficUpdateTimestampMetrics: TrieMap[MetricsContext, Eval[Gauge[Long]]] = TrieMap.empty[MetricsContext, Eval[Gauge[Long]]] diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala index 57a604b131e8..2b678cc0428c 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala @@ -429,8 +429,6 @@ final case class DynamicDomainParameters private ( maxRequestSize = maxRequestSize.unwrap, onboardingRestriction = onboardingRestriction.toProtoV30, participantDomainLimits = Some(participantDomainLimits.toProto), - // TODO(#14050) limit number of participants that can be allocated to a given party - defaultMaxHostingParticipantsPerParty = 0, sequencerAggregateSubmissionTimeout = Some(sequencerAggregateSubmissionTimeout.toProtoPrimitive), trafficControlParameters = trafficControlParameters.map(_.toProtoV30), @@ -671,7 +669,6 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma maxRequestSizeP, onboardingRestrictionP, defaultLimitsP, - _partyHostingLimits, sequencerAggregateSubmissionTimeoutP, trafficControlConfigP, acsCommitmentCatchupConfigP, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbExceptionRetryPolicy.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbExceptionRetryPolicy.scala new file mode 100644 index 000000000000..257682c52d22 --- /dev/null +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbExceptionRetryPolicy.scala @@ -0,0 +1,164 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.resource + +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} +import com.digitalasset.canton.resource.DatabaseStorageError.DatabaseStorageDegradation.DatabaseTaskRejected +import com.digitalasset.canton.resource.DbStorage.NoConnectionAvailable +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.retry.ErrorKind.* +import com.digitalasset.canton.util.retry.{ErrorKind, ExceptionRetryPolicy} +import org.postgresql.util.PSQLException +import org.slf4j.event.Level + +import java.sql.{ + SQLException, + SQLIntegrityConstraintViolationException, + SQLNonTransientConnectionException, + SQLRecoverableException, + SQLTransientException, +} +import scala.annotation.tailrec + +/** Defines which exceptions should be retryable when thrown by the database. */ +object DbExceptionRetryPolicy extends ExceptionRetryPolicy { + + /** Max number of retries for spurious transient errors. + * Main use case is a transient unique constraint violation due to racy merge statements. + * Should go away after a very limited amount of retries. + * + * Value determined empirically in UpsertTestOracle. + * For single row inserts, 1 is sufficient. + * For batched inserts, 3 was more than sufficient in the test. + */ + private val spuriousTransientErrorMaxRetries = 10 + + @tailrec override def determineExceptionErrorKind( + exception: Throwable, + logger: TracedLogger, + )(implicit + tc: TraceContext + ): ErrorKind = exception match { + case exn: java.util.concurrent.RejectedExecutionException => + // This occurs when slick's task queue is full + + // Create a CantonError so that the error code gets logged. + DatabaseTaskRejected(exn.toString)( + ErrorLoggingContext.fromTracedLogger(logger) + ).discard + + TransientErrorKind() + case exception: PSQLException => + // Error codes documented here: https://www.postgresql.org/docs/9.6/errcodes-appendix.html + val error = exception.getSQLState + + if (error.startsWith("08")) { + // Class 08 — Connection Exception + TransientErrorKind() + } else if (error == "40001") { + // Class 40 — Transaction Rollback: 40001 serialization_failure + // Failure to serialize db accesses, happens due to contention + TransientErrorKind() + } else if (error == "40P01") { + // Deadlock + // See DatabaseDeadlockTestPostgres + // This also covers deadlocks reported as BatchUpdateExceptions, + // because they refer to a PSQLException has cause. + TransientErrorKind() + } else if (error == "25006") { + // Retry on read only transaction, which can occur on Azure + TransientErrorKind() + } else if (error.startsWith("57P") && error != "57P014" && error != "57P04") { + // Retry on operator invention errors, otherwise Canton components crash in an uncontrolled manner when + // the exception bubbles up (don't retry on `query_canceled` and `database_dropped`) + TransientErrorKind() + } else if ( + error == "53000" || error == "53100" || error == "53200" || error == "53300" || error == "53400" + ) { + // Retry insufficient db resource errors + TransientErrorKind() + } else { + // Don't retry on other exceptions. These other exceptions should be those for which retrying typically won't + // help, for example a unique constraint violation. + logger.info(s"Fatal sql exception has error code: $error") + FatalErrorKind + } + + case _: SQLIntegrityConstraintViolationException => + // Both H2 and Oracle may fail with spurious constraint violations, due to racy implementation of the MERGE statements. + // In H2, this may also occur because it does not properly implement the serializable isolation level. + // See UpsertTestOracle + // See https://github.com/h2database/h2database/issues/2167 + TransientErrorKind(spuriousTransientErrorMaxRetries) + + case _: SQLRecoverableException | _: SQLTransientException | + _: SQLNonTransientConnectionException => + TransientErrorKind() + + // Handle SQLException and all classes that derive from it (e.g. java.sql.BatchUpdateException) + // Note that if the exception is not known but has a cause, we'll base the retry on the cause + case ex: SQLException => + val code = ex.getErrorCode + if (ex.getErrorCode == 1) { + // Retry on ORA-00001: unique constraint violated exception + TransientErrorKind(spuriousTransientErrorMaxRetries) + } else if (ex.getMessage == "Connection is closed") { + // May fail with a "Connection is closed" message if the db has gone down + TransientErrorKind() + } else if (ex.getErrorCode == 4021) { + // ORA timeout occurred while waiting to lock object + TransientErrorKind() + } else if (ex.getErrorCode == 54) { + // ORA timeout occurred while waiting to lock object or because NOWAIT has been set + // e.g. as part of truncate table + TransientErrorKind() + } else if (ex.getErrorCode == 60) { + // Deadlock + // See DatabaseDeadlockTestOracle + TransientErrorKind() + } else if ( + ex.getErrorCode == 604 && + List("ORA-08176", "ORA-08177").exists(ex.getMessage.contains) + ) { + // Oracle failure in a batch operation + // For Oracle, the `cause` is not always set properly for exceptions. This is a problem for batched queries. + // So, look through an exception's `message` to see if it contains a retryable problem. + TransientErrorKind() + } else if (ex.getErrorCode == 8176) { + // consistent read failure; rollback data not available + // Cause: Encountered data changed by an operation that does not generate rollback data + // Action: In read/write transactions, retry the intended operation. + TransientErrorKind() + } else if (ex.getErrorCode == 8177) { + // failure to serialize transaction with serializable isolation level + TransientErrorKind() + } else if (ex.getErrorCode == 17410) { + // No more data to read from socket, can be caused by network problems + TransientErrorKind(spuriousTransientErrorMaxRetries) + } else if (code == 17002) { + // This has been observed as either IO Error: Connection reset by peer or IO Error: Broken pipe + // when straight-up killing an Oracle database server (`kill -9 `) + TransientErrorKind() + } else if (code == 1088 || code == 1089 || code == 1090 || code == 1092) { + // Often observed for orderly Oracle shutdowns + // https://docs.oracle.com/en/database/oracle/oracle-database/19/errmg/ORA-00910.html#GUID-D9EBDFFA-88C6-4185-BD2C-E1B959A97274 + TransientErrorKind() + } else if (ex.getCause != null) { + logger.info("Unable to retry on exception, checking cause.") + determineExceptionErrorKind(ex.getCause, logger) + } else { + FatalErrorKind + } + + case _ => FatalErrorKind + } + + override def retryLogLevel(e: Throwable): Option[Level] = e match { + case _: NoConnectionAvailable => + // Avoid log noise if no connection is available either due to contention or a temporary network problem + Some(Level.DEBUG) + case _ => None + } +} diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala index 8e5892f335d1..db3863c0469b 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala @@ -45,7 +45,6 @@ import com.digitalasset.canton.time.EnrichedDurations.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.retry.RetryEither -import com.digitalasset.canton.util.retry.RetryUtil.DbExceptionRetryable import com.digitalasset.canton.util.{Thereafter, *} import com.digitalasset.canton.{LfPackageId, LfPartyId} import com.google.protobuf.ByteString @@ -327,7 +326,7 @@ trait DbStorage extends Storage { self: NamedLogging => else dbConfig.parameters.connectionTimeout.asFiniteApproximation ), ) - .unlessShutdown(body, DbExceptionRetryable) + .unlessShutdown(body, DbExceptionRetryPolicy) .thereafter { _ => if (logOperations) { logger.debug(s"completed $action: $operationName") @@ -442,7 +441,7 @@ trait DbStorage extends Storage { self: NamedLogging => else dbConfig.parameters.connectionTimeout.asFiniteApproximation ), ) - .apply(body, DbExceptionRetryable) + .apply(body, DbExceptionRetryPolicy) .thereafter { _ => if (logOperations) { logger.debug(s"completed $action: $operationName") diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala index fca02ac64655..a21f6a6a8349 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala @@ -25,8 +25,7 @@ import com.digitalasset.canton.sequencing.authentication.grpc.AuthenticationToke import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.topology.{DomainId, Member} import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} -import com.digitalasset.canton.util.retry.Pause -import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable +import com.digitalasset.canton.util.retry.{NoExceptionRetryPolicy, Pause} import com.digitalasset.canton.version.ProtocolVersion import io.grpc.Status @@ -88,7 +87,7 @@ class AuthenticationTokenProvider( maxRetries = config.retries.value, delay = config.pauseRetries.underlying, operationName = "generate sequencer authentication token", - ).unlessShutdown(generateTokenET, NoExnRetryable) + ).unlessShutdown(generateTokenET, NoExceptionRetryPolicy) .onShutdown(Left(shutdownStatus)) } } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendAmplifier.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendAmplifier.scala index abeb46dede45..51544adab3c7 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendAmplifier.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendAmplifier.scala @@ -4,10 +4,12 @@ package com.digitalasset.canton.sequencing.client import cats.syntax.either.* +import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.config import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} import com.digitalasset.canton.lifecycle.{PerformUnlessClosing, UnlessShutdown} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.SequencerClientMetrics import com.digitalasset.canton.sequencing.protocol.{MessageId, SignedContent, SubmissionRequest} import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.SequencerId @@ -30,7 +32,8 @@ class SendAmplifier( peekAtSendResult: () => Option[UnlessShutdown[SendResult]], puc: PerformUnlessClosing, timeout: scala.concurrent.duration.Duration, -)(implicit ec: ExecutionContext) + metrics: SequencerClientMetrics, +)(implicit ec: ExecutionContext, metricsContext: MetricsContext) extends NamedLogging { import SendAmplifier.* @@ -87,12 +90,17 @@ class SendAmplifier( (sequencerAlias, sequencerId, previousSequencers, transport, patienceO, token) } }).foreach { case (sequencerAlias, sequencerId, prevSequencers, transport, patienceO, token) => + val submissionCostOrZero = + signedRequest.content.submissionCost.map(_.cost.value).getOrElse(0L) val sendF = puc.performUnlessClosingUSF(s"sending message $messageId to sequencer $sequencerId") { logger.info( - s"Sending message ID $messageId to sequencer $sequencerId (alias $sequencerAlias) with max sequencing time " + + s"Sending message ID $messageId to sequencer $sequencerId (alias $sequencerAlias) with max sequencing time and submission cost $submissionCostOrZero " + s"${signedRequest.content.maxSequencingTime} (previous attempts = ${prevSequencers.size})" ) + metrics.trafficConsumption.trafficCostOfSubmittedEvent.mark(submissionCostOrZero)( + metricsContext.withExtraLabels("target-sequencer" -> sequencerAlias.toString) + ) transport.sendAsyncSigned(signedRequest, timeout).value.map { outcome => noTracingLogger.whenDebugEnabled { outcome match { diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendTracker.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendTracker.scala index 3753c29c9a4f..287c3b3f0c02 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendTracker.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SendTracker.scala @@ -11,6 +11,7 @@ import com.daml.metrics.api.MetricsContext.withEmptyMetricsContext import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.error.BaseCantonError import com.digitalasset.canton.lifecycle.* import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.SequencerClientMetrics @@ -245,9 +246,15 @@ class SendTracker( // Update the traffic controller with the traffic consumed in the receipt (trafficStateController, resultO) match { case (Some(tsc), Some(UnlessShutdown.Outcome(Success(deliver)))) => - deliver.trafficReceipt.foreach(tsc.updateWithReceipt(_, deliver.timestamp)) + deliver.trafficReceipt.foreach(tsc.updateWithReceipt(_, deliver.timestamp, None)) case (Some(tsc), Some(UnlessShutdown.Outcome(Error(deliverError)))) => - deliverError.trafficReceipt.foreach(tsc.updateWithReceipt(_, deliverError.timestamp)) + deliverError.trafficReceipt.foreach( + tsc.updateWithReceipt( + _, + deliverError.timestamp, + BaseCantonError.statusErrorCodes(deliverError.reason).headOption.orElse(Some("unknown")), + ) + ) case (Some(tsc), Some(UnlessShutdown.Outcome(Timeout(timestamp)))) => // Event was not sequenced but we can still advance the base rate at the timestamp tsc.tickStateAt(timestamp) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala index 6927f2c6def4..1a9fad0ffae3 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala @@ -73,7 +73,7 @@ import com.digitalasset.canton.util.PekkoUtil.{CombinedKillSwitch, WithKillSwitc import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.TryUtil.* import com.digitalasset.canton.util.* -import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable +import com.digitalasset.canton.util.retry.AllExceptionRetryPolicy import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{SequencerAlias, SequencerCounter, time} import com.google.common.annotations.VisibleForTesting @@ -228,7 +228,8 @@ abstract class SequencerClientImpl( callback: SendCallback, amplify: Boolean, )(implicit - traceContext: TraceContext + traceContext: TraceContext, + metricsContext: MetricsContext, ): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] = for { // TODO(#12950): Validate that group addresses map to at least one member @@ -269,7 +270,8 @@ abstract class SequencerClientImpl( callback: SendCallback, amplify: Boolean, )(implicit - traceContext: TraceContext + traceContext: TraceContext, + metricsContext: MetricsContext, ): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] = withSpan("SequencerClient.sendAsync") { implicit traceContext => span => def mkRequestE( @@ -420,7 +422,8 @@ abstract class SequencerClientImpl( peekAtSendResult: () => Option[UnlessShutdown[SendResult]], topologySnapshot: SyncCryptoApi, )(implicit - traceContext: TraceContext + traceContext: TraceContext, + metricsContext: MetricsContext, ): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] = { EitherTUtil .timed(metrics.submissions.sends) { @@ -467,7 +470,7 @@ abstract class SequencerClientImpl( signedRequest: SignedContent[SubmissionRequest], amplify: Boolean, peekAtSendResult: () => Option[UnlessShutdown[SendResult]], - )(implicit traceContext: TraceContext): Unit = { + )(implicit traceContext: TraceContext, metricsContext: MetricsContext): Unit = { val amplifier = new SendAmplifier( clock, signedRequest, @@ -476,6 +479,7 @@ abstract class SequencerClientImpl( peekAtSendResult, this, timeouts.network.duration, + metrics, ) if (amplify) amplifier.sendAmplified() else amplifier.sendOnce() } @@ -1811,7 +1815,7 @@ object SequencerClient { } retry .Pause(loggingContext.logger, performUnlessClosing, maxRetries, delay, sendDescription) - .unlessShutdown(doSend(), AllExnRetryable)( + .unlessShutdown(doSend(), AllExceptionRetryPolicy)( retry.Success.always, ec, loggingContext.traceContext, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSend.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSend.scala index b57d543e6536..38778d768d10 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSend.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientSend.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.sequencing.client import cats.data.EitherT +import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.protocol.messages.DefaultOpenEnvelope @@ -55,7 +56,10 @@ trait SequencerClientSend { aggregationRule: Option[AggregationRule] = None, callback: SendCallback = SendCallback.empty, amplify: Boolean = false, - )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] + )(implicit + traceContext: TraceContext, + metricsContext: MetricsContext, + ): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] /** Provides a value for max-sequencing-time to use for `sendAsync` if no better application provided timeout is available. * Is currently a configurable offset from our clock. diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TimeProof.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TimeProof.scala index 59c2c33d596f..e06ea5937fc6 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TimeProof.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TimeProof.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.sequencing.protocol import cats.data.EitherT import cats.syntax.either.* import cats.syntax.option.* +import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.config.CantonRequireTypes.String73 import com.digitalasset.canton.crypto.HashOps @@ -129,7 +130,8 @@ object TimeProof { protocolVersion: ProtocolVersion, )(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] = + ): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] = { + implicit val metricsContext: MetricsContext = MetricsContext("type" -> "time-proof") client.sendAsync( // we intentionally ask for an empty event to be sequenced to observe the time. // this means we can safely share this event without mentioning other recipients. @@ -144,6 +146,7 @@ object TimeProof { // Do not amplify because max sequencing time is set to MaxValue and therefore will exceed the aggregation time bound amplify = false, ) + } /** Use a constant prefix for a message which would permit the sequencer to track how many * time request events it is receiving. diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumed.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumed.scala index 0a1fb6f03f6e..d6109e1296ba 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumed.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumed.scala @@ -125,6 +125,10 @@ final case class TrafficConsumed( ) = baseTrafficRemainderAtCurrentTime.subtract(cost) + tracedLogger.debug( + s"Consuming cost ${cost.value}: From base traffic: ${baseTrafficRemainderAtCurrentTime.value - baseTrafficRemainderAfterConsume.value} From extra traffic: $extraTrafficConsumed" + ) + copy( baseTrafficRemainder = baseTrafficRemainderAfterConsume, extraTrafficConsumed = this.extraTrafficConsumed + extraTrafficConsumed, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficPurchasedSubmissionHandler.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficPurchasedSubmissionHandler.scala index adfc667032e0..2dae50f60a43 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficPurchasedSubmissionHandler.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficPurchasedSubmissionHandler.scala @@ -7,6 +7,7 @@ import cats.data.EitherT import cats.implicits.catsSyntaxAlternativeSeparate import cats.syntax.bifunctor.* import cats.syntax.parallel.* +import com.daml.metrics.api.MetricsContext import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} import com.digitalasset.canton.crypto.{DomainSnapshotSyncCryptoApi, DomainSyncCryptoClient} @@ -180,6 +181,7 @@ class TrafficPurchasedSubmissionHandler( traceContext: TraceContext, ): EitherT[FutureUnlessShutdown, TrafficControlError, CantonTimestamp] = { val callback = SendCallback.future + implicit val metricsContext: MetricsContext = MetricsContext("type" -> "traffic-purchase") // Make sure that the sequencer will ask for a time proof if it doesn't observe the sequencing in time domainTimeTracker.requestTick(maxSequencingTime) for { diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficStateController.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficStateController.scala index 1c73582c3dca..3a3814f4342b 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficStateController.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficStateController.scala @@ -124,9 +124,23 @@ class TrafficStateController( s"Failed to update traffic consumed state at $sequencingTimestamp", ) - def updateWithReceipt(trafficReceipt: TrafficReceipt, timestamp: CantonTimestamp)(implicit + def updateWithReceipt( + trafficReceipt: TrafficReceipt, + timestamp: CantonTimestamp, + deliverErrorReason: Option[String], + )(implicit metricsContext: MetricsContext ): Unit = { + deliverErrorReason match { + case Some(reason) => + metrics.trafficCostOfNotDeliveredSequencedEvent.mark(trafficReceipt.consumedCost.value)( + metricsContext.withExtraLabels("reason" -> reason) + ) + metrics.deliveredEventCounter.inc() + case None => + metrics.trafficCostOfDeliveredSequencedEvent.mark(trafficReceipt.consumedCost.value) + metrics.rejectedEventCounter.inc() + } trafficConsumedManager.updateWithReceipt(trafficReceipt, timestamp).discard } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/time/TimeProofRequestSubmitter.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/time/TimeProofRequestSubmitter.scala index a7edddeb62b7..37f41aa16b15 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/time/TimeProofRequestSubmitter.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/time/TimeProofRequestSubmitter.scala @@ -11,8 +11,7 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.sequencing.client.{SendAsyncClientError, SequencerClient} import com.digitalasset.canton.sequencing.protocol.TimeProof import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable -import com.digitalasset.canton.util.retry.{Backoff, Success} +import com.digitalasset.canton.util.retry.{AllExceptionRetryPolicy, Backoff, Success} import com.digitalasset.canton.util.{FutureUtil, HasFlushFuture, retry} import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting @@ -104,7 +103,7 @@ private[time] class TimeProofRequestSubmitterImpl( "request current time", ) retrySendTimeRequest - .unlessShutdown(mkRequest(), AllExnRetryable) + .unlessShutdown(mkRequest(), AllExceptionRetryPolicy) .map { _ => // if we still care about the outcome (we could have witnessed a recent time while sending the request), // then schedule retrying a new request. diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala index 44514fb5695f..523dfe92c8ab 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala @@ -181,7 +181,12 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId]( tx <- build(op, mapping, serial, protocolVersion, signingKeys).mapK( FutureUnlessShutdown.outcomeK ) - signedTx <- signTransaction(tx, signingKeys, isProposal = !expectFullAuthorization) + signedTx <- signTransaction( + tx, + signingKeys, + isProposal = !expectFullAuthorization, + protocolVersion, + ) _ <- add(Seq(signedTx), forceChanges, expectFullAuthorization) } yield signedTx } @@ -315,6 +320,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId]( transaction: TopologyTransaction[Op, M], signingKeys: Seq[Fingerprint], isProposal: Boolean, + protocolVersion: ProtocolVersion, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, TopologyManagerError, SignedTopologyTransaction[Op, M]] = { @@ -339,8 +345,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId]( keys, isProposal, crypto.privateCrypto, - // TODO(#14048) The `SignedTopologyTransaction` may use a different versioning scheme than the contained transaction. Use the right protocol version here - transaction.representativeProtocolVersion.representative, + protocolVersion, ) .leftMap { case SigningError.UnknownSigningKey(keyId) => diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala index 1b1da35528cb..e92f78daae0e 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala @@ -458,20 +458,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup { with TopologyManagerError } - object PartyExceedsHostingLimit - extends ErrorCode( - id = "PARTY_EXCEEDS_HOSTING_LIMIT", - ErrorCategory.InvalidIndependentOfSystemState, - ) { - final case class Reject(party: PartyId, limit: Int, numParticipants: Int)(implicit - override val loggingContext: ErrorLoggingContext - ) extends CantonError.Impl( - cause = - s"Party $party exceeds hosting limit of $limit with desired number of $numParticipants hosting participant." - ) - with TopologyManagerError - } - @Explanation( "This error indicates that the topology transaction references members that are currently unknown." ) @@ -679,9 +665,9 @@ object TopologyManagerError extends TopologyManagerErrorGroup { "This error indicates that the partyId to allocate is the same as an already existing admin party." ) @Resolution("Submit the topology transaction with a changed partyId.") - object PartyIdIsAdminParty + object PartyIdConflictWithAdminParty extends ErrorCode( - id = "TOPOLOGY_PARTY_ID_IS_ADMIN_PARTY", + id = "TOPOLOGY_PARTY_ID_CONFLICT_WITH_ADMIN_PARTY", ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, ) { final case class Reject(partyId: PartyId)(implicit @@ -699,9 +685,9 @@ object TopologyManagerError extends TopologyManagerErrorGroup { @Resolution( "Change the identity of the participant by either changing the namespace or the participant's UID and try to onboard to the domain again." ) - object ParticipantIdClashesWithPartyId + object ParticipantIdConflictWithPartyId extends ErrorCode( - id = "TOPOLOGY_PARTICIPANT_ID_CLASH_WITH_PARTY", + id = "TOPOLOGY_PARTICIPANT_ID_CONFLICT_WITH_PARTY", ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, ) { final case class Reject(participantId: ParticipantId, partyId: PartyId)(implicit diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala index 18e9aa83fd9e..3634db74f679 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala @@ -300,7 +300,7 @@ class TopologyStateProcessor( expectFullAuthorization, ) ) - .subflatMap { case (_, tx) => + .subflatMap { tx => tx.rejectionReason.toLeft(tx.transaction) } } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala index 3af4b6a83951..f5b5314f55ef 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala @@ -312,10 +312,7 @@ trait PartyTopologySnapshotClient { filterParty: String, filterParticipant: String, limit: Int, - )(implicit traceContext: TraceContext): Future[ - Set[PartyId] - ] // TODO(#14048): Decide on whether to standarize APIs on LfPartyId or PartyId and unify interfaces - + )(implicit traceContext: TraceContext): Future[Set[PartyId]] } object PartyTopologySnapshotClient { diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala index 5e9bcecbeb07..239e908bfe10 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala @@ -18,7 +18,7 @@ import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ErrorUtil import com.digitalasset.canton.util.ShowUtil.* -import com.google.common.graph.ValueGraphBuilder +import com.google.common.graph.{MutableValueGraph, ValueGraphBuilder} import scala.collection.concurrent.TrieMap import scala.jdk.CollectionConverters.* @@ -102,50 +102,48 @@ class AuthorizationGraph( * * *
  • All edges incoming to `to` are labelled with the same NSD.
  • + *
  • Each node has at least one incoming or outgoing edge.
  • * */ - private val graph = ValueGraphBuilder - .directed() - .allowsSelfLoops(true) // we allow self loops for the root certificate - .build[Fingerprint, AuthorizedNamespaceDelegation] + private val graph: MutableValueGraph[Fingerprint, AuthorizedNamespaceDelegation] = + ValueGraphBuilder + .directed() + .allowsSelfLoops(true) // we allow self loops for the root certificate + .build[Fingerprint, AuthorizedNamespaceDelegation] /** Authorized namespace delegations for namespace `this.namespace`, grouped by target */ private val cache = new TrieMap[Fingerprint, AuthorizedNamespaceDelegation]() - /** Check if `item` is authorized and, if so, add its mapping to this graph. - * - * @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REPLACE. - */ - def add(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = { - assertNamespaceAndOperation(item) - if ( - AuthorizedTopologyTransaction.isRootCertificate(item) || - this.existsAuthorizedKeyIn(item.signingKeys, requireRoot = true) - ) { - doAdd(item) - recompute() - true - } else false - } + def nodes: Set[Fingerprint] = graph.nodes().asScala.toSet - /** Add the mappings in `items` to this graph, regardless if they are authorized or not. + def replace(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Unit = + replace(Seq(item)) + + /** Add the mappings in `items` to this graph and remove any existing mappings with the same target fingerprint. * If an unauthorized namespace delegation is added to the graph, the graph will contain nodes that are not connected to the root. * The target key of the unauthorized delegation will still be considered unauthorized. * * @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REPLACE. */ - def unauthorizedAdd( + def replace( items: Seq[AuthorizedNamespaceDelegation] )(implicit traceContext: TraceContext): Unit = { - items.foreach(doAdd) + items.foreach(doReplace) recompute() } - private def doAdd( + private def doReplace( item: AuthorizedNamespaceDelegation )(implicit traceContext: TraceContext): Unit = { - assertNamespaceAndOperation(item) + ErrorUtil.requireArgument( + item.mapping.namespace == namespace, + s"unable to add namespace delegation for ${item.mapping.namespace} to graph for $namespace", + ) + ErrorUtil.requireArgument( + item.operation == Replace, + s"unable to add namespace delegation with operation ${item.operation} to graph for $namespace", + ) val targetFingerprint = item.mapping.target.fingerprint // if the node already exists, remove all authorizing edges from item.signingKeys to item.target // to not leak previous authorizations @@ -158,40 +156,22 @@ class AuthorizationGraph( } } - private def assertNamespaceAndOperation( - item: AuthorizedNamespaceDelegation - )(implicit traceContext: TraceContext): Unit = { - ErrorUtil.requireArgument( - item.mapping.namespace == namespace, - s"unable to add namespace delegation for ${item.mapping.namespace} to graph for $namespace", - ) - ErrorUtil.requireArgument( - item.operation == Replace, - s"unable to add namespace delegation with operation ${item.operation} to graph for $namespace", - ) - } - - /** Check if `item` is authorized and, if so, remove all mappings with the same target key from this graph. - * Note that addition and removal of a namespace delegation can be authorized by different keys. + /** Remove all mappings with the same target key from this graph. * * @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REMOVE. */ - def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = { + def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Unit = { ErrorUtil.requireArgument( item.mapping.namespace == namespace, s"unable to remove namespace delegation for ${item.mapping.namespace} from graph for $namespace", ) - ErrorUtil.requireArgument( item.operation == Remove, s"unable to remove namespace delegation with operation ${item.operation} from graph for $namespace", ) - if (existsAuthorizedKeyIn(item.signingKeys, requireRoot = true)) { - doRemove(item) - recompute() - true - } else false + doRemove(item) + recompute() } /** remove a namespace delegation @@ -204,21 +184,20 @@ class AuthorizationGraph( )(implicit traceContext: TraceContext): Unit = { val keyToRemove = item.mapping.target.fingerprint if (graph.nodes().contains(keyToRemove)) { - // remove all edges labelled with item - graph + // The java.util.Set returned by predecessors is backed by the graph. + // Therefore we convert it into an immutable scala Set, so that removeEdge + // doesn't cause a ConcurrentModificationException + val predecessors = graph .predecessors(keyToRemove) .asScala - // The java.util.Set returned by predecessors is backed by the graph. - // Therefore we convert it into an immutable scala Set, so that removeEdge - // doesn't cause a ConcurrentModificationException .toSet[Fingerprint] - .foreach { - graph.removeEdge(_, keyToRemove).discard - } - // if item.target has no outgoing authorizations, remove it from the graph altogether - if (graph.outDegree(keyToRemove) == 0) { - graph.removeNode(keyToRemove).discard + // remove all edges that have the same target key fingerprint as item + predecessors.foreach(graph.removeEdge(_, keyToRemove).discard) + + // Remove nodes without edges + (predecessors + keyToRemove).foreach { node => + if (graph.degree(node) == 0) graph.removeNode(node).discard } } else { logger.warn(s"Superfluous removal of namespace delegation $item") @@ -356,13 +335,19 @@ object AuthorizationCheck { /** Authorization graph for a decentralized namespace. * - * @throws java.lang.IllegalArgumentException if `dnd` and `direct` refer to different namespaces. + * @throws java.lang.IllegalArgumentException if `dnd` and `ownerGraphs` refer to different namespaces. */ final case class DecentralizedNamespaceAuthorizationGraph( dnd: DecentralizedNamespaceDefinition, ownerGraphs: Seq[AuthorizationGraph], ) extends AuthorizationCheck { + require( + dnd.owners.forgetNE == ownerGraphs.map(_.namespace).toSet, + s"The owner graphs refer to the wrong namespaces (expected: ${dnd.owners}), actual: ${ownerGraphs + .map(_.namespace)}).", + ) + override def existsAuthorizedKeyIn( authKeys: Set[Fingerprint], requireRoot: Boolean, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala index 12622a3ae06d..610fcdc1af52 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala @@ -3,11 +3,8 @@ package com.digitalasset.canton.topology.processing -import cats.Monoid -import cats.data.EitherT import cats.syntax.bifunctor.* import cats.syntax.foldable.* -import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.CryptoPureApi import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps @@ -21,67 +18,57 @@ import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction import com.digitalasset.canton.topology.store.* import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.transaction.TopologyMapping.RequiredAuthAuthorizations -import com.digitalasset.canton.topology.transaction.TopologyTransaction.GenericTopologyTransaction +import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace +import com.digitalasset.canton.topology.transaction.TopologyMapping.ReferencedAuthorizations import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.tracing.TraceContext import scala.concurrent.{ExecutionContext, Future} -/** Data collection providing information with respect to what is affected by this update - * - * @param authChecks the set of Uids that is mentioned in the transaction such that we can load the certificates for the respective uids - * @param cascadingNamespaces the set of namespaces where we had a namespace delegation change requiring a cascading update - * @param cascadingUids the set of uids where we had a identifier delegation change requiring a cascading update - */ -private[processing] final case class UpdateAggregation( - authChecks: Set[UniqueIdentifier] = Set(), - cascadingNamespaces: Set[Namespace] = Set(), - cascadingUids: Set[UniqueIdentifier] = Set(), +private[processing] final case class AuthorizationKeys( + uids: Set[UniqueIdentifier], + namespaces: Set[Namespace], ) { + def ++(other: AuthorizationKeys): AuthorizationKeys = AuthorizationKeys( + uids ++ other.uids, + namespaces ++ other.namespaces, + ) +} - /** returns all cascading uids which are not already covered by the cascading namespaces */ - def filteredCascadingUids: Set[UniqueIdentifier] = - cascadingUids.filterNot(x => cascadingNamespaces.contains(x.namespace)) - - /** returns true if the given uid is affected by a cascading update */ - def isCascading(uid: UniqueIdentifier): Boolean = - cascadingNamespaces.contains(uid.namespace) || cascadingUids.contains(uid) - - def add( - mapping: TopologyMapping, - currentTransaction: Option[GenericTopologyTransaction], - ): UpdateAggregation = mapping match { - case NamespaceDelegation(ns, _, _) => - // change in certificate requires full recompute for namespace (add could unlock existing certificates, remove could make anything obsolete) - this.copy(cascadingNamespaces = cascadingNamespaces + ns) - case IdentifierDelegation(uid, _) => - // change in identifier delegation requires full recompute for uid - this.copy(cascadingUids = cascadingUids + uid, authChecks = authChecks + uid) - case DecentralizedNamespaceDefinition(ns, _, owners) => - // change in decentralized namespace definition requires full recompute - this.copy(cascadingNamespaces = cascadingNamespaces + ns ++ owners) - case x => - this.copy(authChecks = - authChecks ++ mapping.requiredAuth(currentTransaction).authorizations.uids - ) - } +private object AuthorizationKeys { - def nothingCascading: Boolean = cascadingNamespaces.isEmpty && cascadingUids.isEmpty + val empty: AuthorizationKeys = AuthorizationKeys(Set.empty, Set.empty) - def authNamespaces: Set[Namespace] = authChecks.map(_.namespace) ++ cascadingNamespaces -} + def required( + toValidate: TopologyTransaction[TopologyChangeOp, TopologyMapping], + inStore: Option[TopologyTransaction[TopologyChangeOp, TopologyMapping]], + ): AuthorizationKeys = { + val referencedAuthorizations = toValidate.mapping.requiredAuth(inStore).referenced + requiredForProcessing(toValidate) ++ + requiredForCheckingAuthorization(referencedAuthorizations) + } -object UpdateAggregation { - implicit val monoid: Monoid[UpdateAggregation] = new Monoid[UpdateAggregation] { - override def empty: UpdateAggregation = UpdateAggregation() + private def requiredForProcessing( + toValidate: TopologyTransaction[TopologyChangeOp, TopologyMapping] + ): AuthorizationKeys = + toValidate.mapping match { + case NamespaceDelegation(namespace, _, _) => AuthorizationKeys(Set.empty, Set(namespace)) + case DecentralizedNamespaceDefinition(_, _, owners) if toValidate.operation == Replace => + // In case of Replace, we need to preload the owner graphs so that we can construct the decentralized graph. + // In case of a Remove, we do not need to preload anything, as we'll simply set the cache value to None. + AuthorizationKeys(Set.empty, owners.forgetNE) + case IdentifierDelegation(identifier, _) => AuthorizationKeys(Set(identifier), Set.empty) + case _: TopologyMapping => AuthorizationKeys.empty + } - override def combine(x: UpdateAggregation, y: UpdateAggregation): UpdateAggregation = - UpdateAggregation( - authChecks = x.authChecks ++ y.authChecks, - cascadingNamespaces = x.cascadingNamespaces ++ y.cascadingNamespaces, - cascadingUids = x.cascadingUids ++ y.cascadingUids, - ) + private def requiredForCheckingAuthorization( + requiredAuth: ReferencedAuthorizations + ): AuthorizationKeys = { + val ReferencedAuthorizations(namespacesWithRoot, namespaces, uids, _extraKeys) = requiredAuth + AuthorizationKeys( + uids, + namespacesWithRoot ++ namespaces ++ uids.map(_.namespace), + ) } } @@ -107,7 +94,7 @@ class IncomingTopologyTransactionAuthorizationValidator( val store: TopologyStore[TopologyStoreId], validationIsFinal: Boolean, val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext) +)(implicit override val executionContext: ExecutionContext) extends NamedLogging with TransactionAuthorizationValidator { @@ -128,46 +115,31 @@ class IncomingTopologyTransactionAuthorizationValidator( * And we use that "head state" to verify if the transactions are authorized or not. */ def validateAndUpdateHeadAuthState( - timestamp: CantonTimestamp, + effectiveTime: CantonTimestamp, toValidate: GenericSignedTopologyTransaction, inStore: Option[GenericSignedTopologyTransaction], expectFullAuthorization: Boolean, )(implicit traceContext: TraceContext - ): Future[(UpdateAggregation, GenericValidatedTopologyTransaction)] = { - for { - authCheckResult <- determineRelevantUidsAndNamespaces(toValidate, inStore.map(_.transaction)) - (updateAggregation, targetDomainVerified) = authCheckResult - loadGraphsF = loadAuthorizationGraphs(timestamp, updateAggregation.authNamespaces) - loadUidsF = loadIdentifierDelegationsCascading( - timestamp, - updateAggregation, - updateAggregation.authChecks, - ) - _ <- loadGraphsF - cascadingUidsFromNamespace <- loadUidsF - } yield { - - logger.debug(s"Update aggregation yielded ${updateAggregation}") - val validated = targetDomainVerified match { - case ValidatedTopologyTransaction(tx, None, _) => - processTransaction( - tx, - inStore, - expectFullAuthorization, - ) - case v => v - } - // add any uid for which we have a valid identifier delegation to the cascading set (as a new namespace - // certificate might activate an identifier delegation) - ( - updateAggregation.copy(cascadingUids = - updateAggregation.cascadingUids ++ cascadingUidsFromNamespace - ), - validated, - ) + ): Future[GenericValidatedTopologyTransaction] = + verifyDomain(toValidate) match { + case ValidatedTopologyTransaction(tx, None, _) => + val referencedKeys = + AuthorizationKeys.required(toValidate.transaction, inStore.map(_.transaction)) + val loadGraphsF = loadNamespaceCaches(effectiveTime, referencedKeys.namespaces) + val loadUidsF = loadIdentifierDelegationCaches(effectiveTime, referencedKeys.uids) + for { + _ <- loadGraphsF + _ <- loadUidsF + } yield processTransaction( + tx, + inStore, + expectFullAuthorization, + ) + + case invalid @ ValidatedTopologyTransaction(_, Some(_rejectionReason), _) => + Future.successful(invalid) } - } /** Validates a topology transaction as follows: *
      @@ -202,34 +174,27 @@ class IncomingTopologyTransactionAuthorizationValidator( private def handleSuccessfulSignatureChecks( toValidate: GenericSignedTopologyTransaction, - missingAuthorizers: RequiredAuthAuthorizations, + missingAuthorizers: ReferencedAuthorizations, expectFullAuthorization: Boolean, )(implicit traceContext: TraceContext ): ValidatedTopologyTransaction[TopologyChangeOp, TopologyMapping] = { // if there are no missing authorizers, we can update the internal caches - val isFullyAuthorized = if (missingAuthorizers.isEmpty) { - val processedNSD = toValidate - .selectMapping[NamespaceDelegation] - .forall { sigTx => processNamespaceDelegation(AuthorizedTopologyTransaction(sigTx)) } + if (missingAuthorizers.isEmpty) { + toValidate.selectMapping[NamespaceDelegation].foreach { sigTx => + processNamespaceDelegation(AuthorizedTopologyTransaction(sigTx)) + } - val processedIDD = toValidate.selectMapping[IdentifierDelegation].forall { sigTx => + toValidate.selectMapping[IdentifierDelegation].foreach { sigTx => processIdentifierDelegation(AuthorizedTopologyTransaction(sigTx)) } - val processedDND = - toValidate.selectMapping[DecentralizedNamespaceDefinition].forall { sigTx => - processDecentralizedNamespaceDefinition(AuthorizedTopologyTransaction(sigTx)) - } - val mappingSpecificCheck = processedNSD && processedIDD && processedDND - if (!mappingSpecificCheck) { - logger.debug(s"Mapping specific check failed") + toValidate.selectMapping[DecentralizedNamespaceDefinition].foreach { sigTx => + processDecentralizedNamespaceDefinition(AuthorizedTopologyTransaction(sigTx)) } - mappingSpecificCheck - } else { false } + } - val acceptMissingAuthorizers = - toValidate.isProposal && !expectFullAuthorization + val acceptMissingAuthorizers = toValidate.isProposal && !expectFullAuthorization // if the result of this validation is final (when processing transactions for the authorized store // or sequenced transactions from the domain) we set the proposal flag according to whether the transaction @@ -237,11 +202,11 @@ class IncomingTopologyTransactionAuthorizationValidator( // This must not be done when preliminarily validating transactions via the DomainTopologyManager, because // the validation outcome might change when validating the transaction again after it has been sequenced. val finalTransaction = - if (validationIsFinal) toValidate.copy(isProposal = !isFullyAuthorized) + if (validationIsFinal) toValidate.copy(isProposal = !missingAuthorizers.isEmpty) else toValidate // Either the transaction is fully authorized or the request allows partial authorization - if (isFullyAuthorized || acceptMissingAuthorizers) { + if (missingAuthorizers.isEmpty || acceptMissingAuthorizers) { ValidatedTopologyTransaction(finalTransaction, None) } else { if (!missingAuthorizers.isEmpty) { @@ -263,7 +228,7 @@ class IncomingTopologyTransactionAuthorizationValidator( toValidate: GenericSignedTopologyTransaction ): Option[Either[ TopologyTransactionRejection, - (GenericSignedTopologyTransaction, RequiredAuthAuthorizations), + (GenericSignedTopologyTransaction, ReferencedAuthorizations), ]] = { toValidate .selectMapping[NamespaceDelegation] @@ -280,63 +245,38 @@ class IncomingTopologyTransactionAuthorizationValidator( ) .bimap( TopologyTransactionRejection.SignatureCheckFailed, - _ => (toValidate, RequiredAuthAuthorizations.empty /* no missing authorizers */ ), + _ => (toValidate, ReferencedAuthorizations.empty /* no missing authorizers */ ), ) result } } - /** loads all identifier delegations into the identifier delegation cache - * - * This function has two "modes". On a cascading update affecting namespaces, we have - * to reload all identifier delegation certificates in order to figure out the affected - * uids. The return Set then contains all the uids that were loaded as a result of the - * namespace query. - * - * If there is no cascading namespace update, we just load the affected uids and return an empty set. - */ - private def loadIdentifierDelegationsCascading( - timestamp: CantonTimestamp, - cascadingUpdate: UpdateAggregation, - transactionUids: Set[UniqueIdentifier], - )(implicit traceContext: TraceContext): Future[Set[UniqueIdentifier]] = { - // we need to load the identifier delegations for all the uids that are mentioned by a transactions - val loadUids = - (transactionUids ++ cascadingUpdate.cascadingUids) -- identifierDelegationCache.keySet - if (loadUids.isEmpty && cascadingUpdate.cascadingNamespaces.isEmpty) { - Future.successful(Set.empty[UniqueIdentifier]) - } else loadIdentifierDelegations(timestamp, cascadingUpdate.cascadingNamespaces.toSeq, loadUids) - } - private def processIdentifierDelegation( tx: AuthorizedIdentifierDelegation - ): Boolean = { - // check authorization - val check = getAuthorizationCheckForNamespace(tx.mapping.identifier.namespace) - val keysAreValid = check.existsAuthorizedKeyIn(tx.signingKeys, requireRoot = false) - // update identifier delegation cache if necessary - if (keysAreValid) { - val updateOp: Set[AuthorizedIdentifierDelegation] => Set[AuthorizedIdentifierDelegation] = - tx.operation match { - case TopologyChangeOp.Replace => - x => x + tx - case TopologyChangeOp.Remove => - x => // using a filter as the key that authorized the removal might be different that authorized the addition - x.filter(cur => cur.mapping != tx.mapping) - } - updateIdentifierDelegationCache(tx.mapping.identifier, updateOp) + )(implicit traceContext: TraceContext): Unit = { + val uid = tx.mapping.identifier + // This will succeed, because loading of the uid is requested in AuthorizationKeys.requiredForProcessing + val oldIDDs = tryGetIdentifierDelegationsForUid(uid) + val withTxRemoved = oldIDDs.filter(_.mapping.uniqueKey != tx.mapping.uniqueKey) + val newIDDs = tx.operation match { + case TopologyChangeOp.Replace => + // We also need to remove the old mapping so that the new mapping actually *replaces* the old one. + withTxRemoved + tx + case TopologyChangeOp.Remove => withTxRemoved } - keysAreValid + identifierDelegationCache.put(uid, newIDDs).discard } private def processNamespaceDelegation( tx: AuthorizedNamespaceDelegation - )(implicit traceContext: TraceContext): Boolean = { - val graph = getAuthorizationGraphForNamespace(tx.mapping.namespace) + )(implicit traceContext: TraceContext): Unit = { + // This will succeed, because loading of the graph is requested in AuthorizationKeys.requiredForProcessing + val graph = tryGetAuthorizationGraphForNamespace(tx.mapping.namespace) + // add or remove including authorization check tx.operation match { - case TopologyChangeOp.Replace => graph.add(tx) + case TopologyChangeOp.Replace => graph.replace(tx) case TopologyChangeOp.Remove => graph.remove(tx) } } @@ -348,77 +288,32 @@ class IncomingTopologyTransactionAuthorizationValidator( */ private def processDecentralizedNamespaceDefinition( tx: AuthorizedDecentralizedNamespaceDefinition - )(implicit traceContext: TraceContext): Boolean = { + )(implicit traceContext: TraceContext): Unit = { val decentralizedNamespace = tx.mapping.namespace - val dnsGraph = decentralizedNamespaceCache - .get(decentralizedNamespace) - .map { case (_, dnsGraph) => dnsGraph } - .getOrElse { - val serialToValidate = tx.serial - if (serialToValidate > PositiveInt.one) { - logger.warn( - s"decentralizedNamespaceCache did not contain namespace $decentralizedNamespace even though the serial to validate is $serialToValidate" - ) - } - val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace) - val newDecentralizedNamespaceGraph = DecentralizedNamespaceAuthorizationGraph( - tx.mapping, - ownerGraphs, + + tx.operation match { + case TopologyChangeOp.Remove => + decentralizedNamespaceCache.put(decentralizedNamespace, None).discard + + case TopologyChangeOp.Replace => + val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map( + // This will succeed, because loading of owner graphs is requested in AuthorizationKeys.requiredForProcessing + tryGetAuthorizationGraphForNamespace ) - newDecentralizedNamespaceGraph - } - val isAuthorized = dnsGraph.existsAuthorizedKeyIn(tx.signingKeys, requireRoot = false) - - if (isAuthorized) { - tx.operation match { - case TopologyChangeOp.Remove => - decentralizedNamespaceCache.remove(decentralizedNamespace).discard - - case TopologyChangeOp.Replace => - val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace) - decentralizedNamespaceCache - .put( - decentralizedNamespace, - (tx.mapping, dnsGraph.copy(dnd = tx.mapping, ownerGraphs = ownerGraphs)), - ) - .discard - } + val decentralizedGraph = DecentralizedNamespaceAuthorizationGraph(tx.mapping, ownerGraphs) + decentralizedNamespaceCache.put(decentralizedNamespace, Some(decentralizedGraph)).discard } - isAuthorized } - private def determineRelevantUidsAndNamespaces( - toValidate: GenericSignedTopologyTransaction, - inStore: Option[GenericTopologyTransaction], - ): Future[(UpdateAggregation, GenericValidatedTopologyTransaction)] = { - def verifyDomain( - tx: GenericSignedTopologyTransaction - ): Either[TopologyTransactionRejection, Unit] = - tx.restrictedToDomain match { - case Some(txDomainId) => - Either.cond( - domainId.forall(_ == txDomainId), - (), - TopologyTransactionRejection.InvalidDomain(txDomainId), - ) - case None => Right(()) - } - - // we need to figure out for which namespaces and uids we need to load the validation checks - // and for which uids and namespaces we'll have to perform a cascading update - EitherT - .fromEither[Future](verifyDomain(toValidate)) - .fold( - rejection => - (UpdateAggregation(), ValidatedTopologyTransaction(toValidate, Some(rejection))), - _ => - ( - UpdateAggregation().add( - toValidate.mapping, - inStore, - ), - ValidatedTopologyTransaction(toValidate, None), - ), - ) - } + private def verifyDomain( + toValidate: GenericSignedTopologyTransaction + ): GenericValidatedTopologyTransaction = + toValidate.restrictedToDomain.zip(domainId) match { + case Some((txDomainId, underlyingDomainId)) if txDomainId != underlyingDomainId => + ValidatedTopologyTransaction( + toValidate, + Some(TopologyTransactionRejection.InvalidDomain(txDomainId)), + ) + case _ => ValidatedTopologyTransaction(toValidate, None) + } } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala index 55614483fada..b3d31a65c092 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.topology.processing import cats.syntax.bifunctor.* import cats.syntax.foldable.* -import com.digitalasset.canton.crypto.{CryptoPureApi, Fingerprint, SigningPublicKey} +import com.digitalasset.canton.crypto.{CryptoPureApi, SigningPublicKey} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.NamedLogging @@ -16,11 +16,12 @@ import com.digitalasset.canton.topology.store.{ TopologyTransactionRejection, } import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.transaction.TopologyMapping.RequiredAuthAuthorizations +import com.digitalasset.canton.topology.transaction.TopologyMapping.ReferencedAuthorizations import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.topology.{Namespace, UniqueIdentifier} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.util.ShowUtil.* import scala.collection.concurrent.TrieMap import scala.concurrent.{ExecutionContext, Future} @@ -30,19 +31,36 @@ trait TransactionAuthorizationValidator { this: NamedLogging => + /** Invariants: + * - If it stores ns -> graph, then graph consists of all active namespace delegations for ns. + * - If it stores ns -> graph and graph is non-empty, then there is no decentralized namespace delegation active for ns. + */ protected val namespaceCache = new TrieMap[Namespace, AuthorizationGraph]() - protected val identifierDelegationCache = - new TrieMap[UniqueIdentifier, Set[AuthorizedIdentifierDelegation]]() + + /** Invariants: + * - If it stores ns -> Some(graph), then the graph corresponds to the active decentralized namespace delegation for ns. + * Moreover, for each owner o, the owner graph is namespaceCache(o). + * - If it stores ns -> None, then there is no decentralized namespace delegation active for ns. + * - If it stores ns -> Some(graph), then there is no direct namespace delegation active for ns. + */ protected val decentralizedNamespaceCache = new TrieMap[ Namespace, - (DecentralizedNamespaceDefinition, DecentralizedNamespaceAuthorizationGraph), + Option[DecentralizedNamespaceAuthorizationGraph], ]() + /** Invariants: + * - If it stores id -> ids, then ids consists of all active identifier delegations for id. + */ + protected val identifierDelegationCache = + new TrieMap[UniqueIdentifier, Set[AuthorizedIdentifierDelegation]]() + protected def store: TopologyStore[TopologyStoreId] protected def pureCrypto: CryptoPureApi + implicit protected def executionContext: ExecutionContext + def validateSignaturesAndDetermineMissingAuthorizers( toValidate: GenericSignedTopologyTransaction, inStore: Option[GenericSignedTopologyTransaction], @@ -50,51 +68,45 @@ trait TransactionAuthorizationValidator { traceContext: TraceContext ): Either[ TopologyTransactionRejection, - (GenericSignedTopologyTransaction, RequiredAuthAuthorizations), + (GenericSignedTopologyTransaction, ReferencedAuthorizations), ] = { // first determine all possible namespaces and uids that need to sign the transaction val requiredAuth = toValidate.mapping.requiredAuth(inStore.map(_.transaction)) logger.debug(s"Required authorizations: $requiredAuth") - val required = requiredAuth - .foldMap( - namespaceCheck = rns => - RequiredAuthAuthorizations( - namespacesWithRoot = - if (rns.requireRootDelegation) rns.namespaces else Set.empty[Namespace], - namespaces = if (rns.requireRootDelegation) Set.empty[Namespace] else rns.namespaces, - ), - uidCheck = ruid => RequiredAuthAuthorizations(uids = ruid.uids, extraKeys = ruid.extraKeys), - ) + val referencedAuth = requiredAuth.referenced val signingKeys = toValidate.signatures.map(_.signedBy) val namespaceWithRootAuthorizations = - required.namespacesWithRoot.map { ns => - val check = getAuthorizationCheckForNamespace(ns) + referencedAuth.namespacesWithRoot.map { ns => + // This succeeds because loading of uid is requested in AuthorizationKeys.requiredForCheckingAuthorization + val check = tryGetAuthorizationCheckForNamespace(ns) val keysUsed = check.keysSupportingAuthorization( signingKeys, requireRoot = true, ) val keysAuthorizeNamespace = check.existsAuthorizedKeyIn(signingKeys, requireRoot = true) - (ns -> (keysAuthorizeNamespace, keysUsed)) + ns -> (keysAuthorizeNamespace, keysUsed) }.toMap // Now let's determine which namespaces and uids actually delegated to any of the keys - val namespaceAuthorizations = required.namespaces.map { ns => - val check = getAuthorizationCheckForNamespace(ns) + val namespaceAuthorizations = referencedAuth.namespaces.map { ns => + // This succeeds because loading of uid is requested in AuthorizationKeys.requiredForCheckingAuthorization + val check = tryGetAuthorizationCheckForNamespace(ns) val keysUsed = check.keysSupportingAuthorization( signingKeys, requireRoot = false, ) val keysAuthorizeNamespace = check.existsAuthorizedKeyIn(signingKeys, requireRoot = false) - (ns -> (keysAuthorizeNamespace, keysUsed)) + ns -> (keysAuthorizeNamespace, keysUsed) }.toMap val uidAuthorizations = - required.uids.map { uid => - val check = getAuthorizationCheckForNamespace(uid.namespace) + referencedAuth.uids.map { uid => + // This succeeds because loading of uid.namespace is requested in AuthorizationKeys.requiredForCheckingAuthorization + val check = tryGetAuthorizationCheckForNamespace(uid.namespace) val keysUsed = check.keysSupportingAuthorization( signingKeys, requireRoot = false, @@ -103,15 +115,23 @@ trait TransactionAuthorizationValidator { check.existsAuthorizedKeyIn(signingKeys, requireRoot = false) val keyForUid = - getAuthorizedIdentifierDelegation(check, uid, toValidate.signatures.map(_.signedBy)) + // This succeeds because loading of uid is requested in AuthorizationKeys.requiredForCheckingAuthorization + tryGetIdentifierDelegationsForUid(uid) + .find(aid => + signingKeys.contains(aid.mapping.target.id) && + check.existsAuthorizedKeyIn( + aid.signingKeys, + requireRoot = false, + ) + ) .map(_.mapping.target) - (uid -> (keysAuthorizeNamespace || keyForUid.nonEmpty, keysUsed ++ keyForUid)) + uid -> (keysAuthorizeNamespace || keyForUid.nonEmpty, keysUsed ++ keyForUid) }.toMap val extraKeyAuthorizations = { // assume extra keys are not found - required.extraKeys.map(k => k -> (false, Set.empty[SigningPublicKey])).toMap ++ + referencedAuth.extraKeys.map(k => k -> (false, Set.empty[SigningPublicKey])).toMap ++ // and replace with those that were actually found // we have to dive into the owner to key mapping directly here, because we don't // need to keep it around (like we do for namespace delegations) and the OTK is the @@ -125,7 +145,7 @@ trait TransactionAuthorizationValidator { // only consider the public key as "found" if: // * it's required and // * actually used to sign the transaction - if required.extraKeys(k.fingerprint) && signingKeys(k.fingerprint) => + if referencedAuth.extraKeys(k.fingerprint) && signingKeys(k.fingerprint) => k.fingerprint -> (true, Set(k)) } } @@ -195,7 +215,7 @@ trait TransactionAuthorizationValidator { def onlyFullyAuthorized[A](map: Map[A, (Boolean, ?)]): Set[A] = map.collect { case (a, (true, _)) => a }.toSet - val actual = RequiredAuthAuthorizations( + val actual = ReferencedAuthorizations( namespacesWithRoot = onlyFullyAuthorized(namespaceWithRootAuthorizations), namespaces = onlyFullyAuthorized(namespaceAuthorizations), uids = onlyFullyAuthorized(uidAuthorizations), @@ -205,7 +225,7 @@ trait TransactionAuthorizationValidator { txWithSignaturesToVerify, requiredAuth .satisfiedByActualAuthorizers(actual) - .fold(identity, _ => RequiredAuthAuthorizations.empty), + .fold(identity, _ => ReferencedAuthorizations.empty), ) } } @@ -229,80 +249,75 @@ trait TransactionAuthorizationValidator { } } - private def getAuthorizedIdentifierDelegation( - graph: AuthorizationCheck, - uid: UniqueIdentifier, - authKeys: Set[Fingerprint], - ): Option[AuthorizedIdentifierDelegation] = { - getIdentifierDelegationsForUid(uid) - .find(aid => - authKeys(aid.mapping.target.id) && graph.existsAuthorizedKeyIn( - aid.signingKeys, - requireRoot = false, - ) - ) - } - - protected def getIdentifierDelegationsForUid( + protected def tryGetIdentifierDelegationsForUid( uid: UniqueIdentifier - ): Set[AuthorizedIdentifierDelegation] = { - identifierDelegationCache - .getOrElse(uid, Set()) - } + )(implicit traceContext: TraceContext): Set[AuthorizedIdentifierDelegation] = + identifierDelegationCache.getOrElse( + uid, + ErrorUtil.invalidState(s"Cache miss for identifier $uid"), + ) - protected def getAuthorizationCheckForNamespace( + private def tryGetAuthorizationCheckForNamespace( namespace: Namespace - ): AuthorizationCheck = { - val decentralizedNamespaceCheck = decentralizedNamespaceCache.get(namespace).map(_._2) - val namespaceCheck = namespaceCache.get(namespace) - decentralizedNamespaceCheck - .orElse(namespaceCheck) - .getOrElse(AuthorizationCheck.empty) + )(implicit traceContext: TraceContext): AuthorizationCheck = { + val directGraph = tryGetAuthorizationGraphForNamespace(namespace) + val decentralizedGraphO = decentralizedNamespaceCache.getOrElse( + namespace, + ErrorUtil.invalidState(s"Cache miss for decentralized namespace $namespace"), + ) + + decentralizedGraphO match { + case Some(decentralizedGraph) => + val directGraphNodes = directGraph.nodes + ErrorUtil.requireState( + directGraphNodes.isEmpty, + show"Namespace $namespace has both direct and decentralized delegations.\n${decentralizedGraph.dnd}\nDirect delegations for: $directGraphNodes", + ) + decentralizedGraph + case None => directGraph + } } - protected def getAuthorizationGraphForNamespace( + protected def tryGetAuthorizationGraphForNamespace( namespace: Namespace - ): AuthorizationGraph = { - namespaceCache.getOrElseUpdate( + )(implicit traceContext: TraceContext): AuthorizationGraph = { + namespaceCache.getOrElse( namespace, - new AuthorizationGraph(namespace, extraDebugInfo = false, loggerFactory), + ErrorUtil.invalidState(s"Cache miss for direct namespace $namespace"), ) } - protected def loadAuthorizationGraphs( - timestamp: CantonTimestamp, + protected def loadNamespaceCaches( + effectiveTime: CantonTimestamp, namespaces: Set[Namespace], - )(implicit executionContext: ExecutionContext, traceContext: TraceContext): Future[Unit] = { - val uncachedNamespaces = - namespaces -- namespaceCache.keySet -- decentralizedNamespaceCache.keySet // only load the ones we don't already hold in memory + )(implicit traceContext: TraceContext): Future[Unit] = { + + // only load the ones we don't already hold in memory + val decentralizedNamespacesToLoad = namespaces -- decentralizedNamespaceCache.keys for { storedDecentralizedNamespace <- store.findPositiveTransactions( - timestamp, + effectiveTime, asOfInclusive = false, isProposal = false, types = Seq(DecentralizedNamespaceDefinition.code), filterUid = None, - filterNamespace = Some(uncachedNamespaces.toSeq), + filterNamespace = Some(decentralizedNamespacesToLoad.toSeq), ) - decentralizedNamespaces = storedDecentralizedNamespace + decentralizedNamespaceDefinitions = storedDecentralizedNamespace .collectOfMapping[DecentralizedNamespaceDefinition] .collectLatestByUniqueKey .result .map(_.transaction) - foundDecentralizedNamespaces = decentralizedNamespaces.map(_.mapping.namespace) - decentralizedNamespaceOwnersToLoad = decentralizedNamespaces + + // We need to add queries for owners here, because the caller cannot know them up front. + decentralizedNamespaceOwners = decentralizedNamespaceDefinitions .flatMap(_.mapping.owners) - .toSet -- namespaceCache.keySet - namespacesToLoad = - uncachedNamespaces - // load decentralized namespaces for DND owners that we haven't loaded yet - ++ decentralizedNamespaceOwnersToLoad - // if we found a decentralized namespace, we don't need to look for a namespace delegation for the DND namespace - -- foundDecentralizedNamespaces + .toSet + namespacesToLoad = namespaces ++ decentralizedNamespaceOwners -- namespaceCache.keys storedNamespaceDelegations <- store.findPositiveTransactions( - timestamp, + effectiveTime, asOfInclusive = false, isProposal = false, types = Seq(NamespaceDelegation.code), @@ -315,112 +330,105 @@ trait TransactionAuthorizationValidator { .result .map(_.transaction) } yield { - val missingNSDs = - namespacesToLoad -- namespaceDelegations.map(_.mapping.namespace).toSet - if (missingNSDs.nonEmpty) - logger.debug(s"Didn't find a namespace delegations for $missingNSDs at $timestamp") - val namespaceToTx = namespaceDelegations + namespaceDelegations .groupBy(_.mapping.namespace) - namespaceToTx .foreach { case (namespace, transactions) => - ErrorUtil.requireArgument( - !namespaceCache.isDefinedAt(namespace), - s"graph shouldn't exist before loading ${namespace} vs ${namespaceCache.get(namespace)}", - ) val graph = new AuthorizationGraph( namespace, extraDebugInfo = false, loggerFactory, ) - namespaceCache.put(namespace, graph).discard - // use un-authorized batch load. while we are checking for proper authorization when we - // add a certificate the first time, we allow for the situation where an intermediate certificate - // is currently expired, but might be replaced with another cert. in this case, - // the authorization check would fail. - // unauthorized certificates are not really an issue as we'll simply exclude them when calculating - // the connected graph - graph.unauthorizedAdd(transactions.map(AuthorizedTopologyTransaction(_))) + graph.replace(transactions.map(AuthorizedTopologyTransaction(_))) + val previous = namespaceCache.put(namespace, graph) + ErrorUtil.requireState( + previous.isEmpty, + s"Unexpected cache hit for namespace $namespace: $previous", + ) + val conflictingDecentralizedNamespaceDefinition = + decentralizedNamespaceCache.get(namespace).flatten + ErrorUtil.requireState( + conflictingDecentralizedNamespaceDefinition.isEmpty, + s"Conflicting decentralized namespace definition for namespace $namespace: $conflictingDecentralizedNamespaceDefinition", + ) } - decentralizedNamespaces.foreach { dns => - val namespace = dns.mapping.namespace - ErrorUtil.requireArgument( - !decentralizedNamespaceCache.isDefinedAt(namespace), - s"decentralized namespace shouldn't already be cached before loading $namespace vs ${decentralizedNamespaceCache - .get(namespace)}", - ) - val graphs = dns.mapping.owners.forgetNE.toSeq.map(ns => - namespaceCache.getOrElseUpdate( - ns, + namespacesToLoad.foreach { namespace => + namespaceCache + .putIfAbsent( + namespace, new AuthorizationGraph( - ns, + namespace, extraDebugInfo = false, loggerFactory, ), ) - ) - decentralizedNamespaceCache - .put( - namespace, - ( - dns.mapping, - DecentralizedNamespaceAuthorizationGraph( - dns.mapping, - graphs, - ), - ), - ) .discard } + + decentralizedNamespaceDefinitions.foreach { dns => + val namespace = dns.mapping.namespace + val ownerGraphs = + dns.mapping.owners.forgetNE.toSeq.map( + // This will succeed, because owner graphs have been loaded just above. + tryGetAuthorizationGraphForNamespace(_) + ) + val decentralizedGraph = DecentralizedNamespaceAuthorizationGraph( + dns.mapping, + ownerGraphs, + ) + val previous = decentralizedNamespaceCache.put(namespace, Some(decentralizedGraph)) + ErrorUtil.requireState( + previous.isEmpty, + s"Unexpected cache hit for decentralized namespace $namespace: $previous", + ) + val conflictingDirectGraphNodes = namespaceCache.get(namespace).toList.flatMap(_.nodes) + ErrorUtil.requireState( + conflictingDirectGraphNodes.isEmpty, + s"Conflicting direct namespace graph for namespace $namespace: $conflictingDirectGraphNodes", + ) + } + + decentralizedNamespacesToLoad.foreach( + decentralizedNamespaceCache.putIfAbsent(_, None).discard + ) } } - protected def loadIdentifierDelegations( - timestamp: CantonTimestamp, - namespaces: Seq[Namespace], + protected def loadIdentifierDelegationCaches( + effectiveTime: CantonTimestamp, uids: Set[UniqueIdentifier], )(implicit - traceContext: TraceContext, - executionContext: ExecutionContext, - ): Future[Set[UniqueIdentifier]] = { - val uidFilter = (uids -- identifierDelegationCache.keySet) - store - .findPositiveTransactions( - timestamp, - asOfInclusive = false, - isProposal = false, - types = Seq(IdentifierDelegation.code), - filterUid = Some(uidFilter.toSeq), - filterNamespace = None, - ) - .map { stored => - val loaded = stored.result.flatMap( - _.transaction.selectMapping[IdentifierDelegation].map(AuthorizedTopologyTransaction(_)) + traceContext: TraceContext + ): Future[Unit] = { + val identifierDelegationsToLoad = uids -- identifierDelegationCache.keySet + for { + stored <- store + .findPositiveTransactions( + effectiveTime, + asOfInclusive = false, + isProposal = false, + types = Seq(IdentifierDelegation.code), + filterUid = Some(identifierDelegationsToLoad.toSeq), + filterNamespace = None, + ) + } yield { + val identifierDelegations = stored + .collectOfMapping[IdentifierDelegation] + .collectLatestByUniqueKey + .result + .map(identifierDelegation => + AuthorizedTopologyTransaction(identifierDelegation.transaction) ) - val start = - identifierDelegationCache.keySet - .filter(cached => namespaces.contains(cached.namespace)) - .toSet - loaded.foldLeft(start) { case (acc, item) => - mergeLoadedIdentifierDelegation(item) - val uid = item.mapping.identifier - if (namespaces.contains(uid.namespace)) - acc + uid - else acc - } + identifierDelegations.groupBy(_.mapping.identifier).foreach { case (uid, delegations) => + val previous = identifierDelegationCache.put(uid, delegations.toSet) + ErrorUtil.requireState( + previous.isEmpty, + s"Unexpected cache hit for identiefier $uid: $previous", + ) } - } - - private def mergeLoadedIdentifierDelegation(item: AuthorizedIdentifierDelegation): Unit = - updateIdentifierDelegationCache(item.mapping.identifier, _ + item) - - protected def updateIdentifierDelegationCache( - uid: UniqueIdentifier, - op: Set[AuthorizedIdentifierDelegation] => Set[AuthorizedIdentifierDelegation], - ): Unit = { - val cur = identifierDelegationCache.getOrElseUpdate(uid, Set()) - identifierDelegationCache.update(uid, op(cur)).discard + identifierDelegationsToLoad.foreach(identifierDelegationCache.putIfAbsent(_, Set()).discard) + } } } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala index 9b69028e82f9..26f4977f23a2 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala @@ -454,7 +454,7 @@ object TopologyStore { // TopologyStore.filterInitialParticipantDispatchingTransactions transactions.map(_.transaction).collect { case tx @ SignedTopologyTransaction( - TopologyTransaction(_, _, DomainTrustCertificate(`participantId`, `domainId`, _, _)), + TopologyTransaction(_, _, DomainTrustCertificate(`participantId`, `domainId`)), _, _, ) => diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala index 317053556c97..332f1a604c18 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala @@ -174,25 +174,6 @@ object TopologyTransactionRejection { ) } - final case class PartyExceedsHostingLimit( - partyId: PartyId, - limit: Int, - numParticipants: Int, - ) extends TopologyTransactionRejection { - override def asString: String = - s"Party $partyId exceeds hosting limit of $limit with desired number of $numParticipants hosting participants." - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.PartyExceedsHostingLimit.Reject(partyId, limit, numParticipants) - - override def pretty: Pretty[PartyExceedsHostingLimit.this.type] = - prettyOfClass( - param("partyId", _.partyId), - param("limit", _.limit), - param("number of hosting participants", _.numParticipants), - ) - } - final case class MissingMappings(missing: Map[Member, Seq[TopologyMapping.Code]]) extends TopologyTransactionRejection { override def asString: String = { @@ -229,26 +210,28 @@ object TopologyTransactionRejection { ) } - final case class PartyIdIsAdminParty(partyId: PartyId) extends TopologyTransactionRejection { + final case class PartyIdConflictWithAdminParty(partyId: PartyId) + extends TopologyTransactionRejection { override def asString: String = s"The partyId $partyId is the same as an already existing admin party." override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.PartyIdIsAdminParty.Reject(partyId) + TopologyManagerError.PartyIdConflictWithAdminParty.Reject(partyId) - override def pretty: Pretty[PartyIdIsAdminParty.this.type] = prettyOfClass( + override def pretty: Pretty[PartyIdConflictWithAdminParty.this.type] = prettyOfClass( param("partyId", _.partyId) ) } - final case class ParticipantIdClashesWithPartyId(participantId: ParticipantId, partyId: PartyId) + final case class ParticipantIdConflictWithPartyId(participantId: ParticipantId, partyId: PartyId) extends TopologyTransactionRejection { - override def asString: String = ??? + override def asString: String = + s"Tried to onboard participant $participantId while party $partyId with the same UID already exists." override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.ParticipantIdClashesWithPartyId.Reject(participantId, partyId) + TopologyManagerError.ParticipantIdConflictWithPartyId.Reject(participantId, partyId) - override def pretty: Pretty[ParticipantIdClashesWithPartyId.this.type] = + override def pretty: Pretty[ParticipantIdConflictWithPartyId.this.type] = prettyOfClass( param("participantId", _.participantId), param("partyId", _.partyId), diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala index 383e95d069e1..4026af4ba817 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala @@ -165,7 +165,7 @@ object TopologyMapping { } // Small wrapper to not have to work with (Set[Namespace], Set[Namespace], Set[Uid]) - final case class RequiredAuthAuthorizations( + final case class ReferencedAuthorizations( namespacesWithRoot: Set[Namespace] = Set.empty, namespaces: Set[Namespace] = Set.empty, uids: Set[UniqueIdentifier] = Set.empty, @@ -174,7 +174,7 @@ object TopologyMapping { def isEmpty: Boolean = namespacesWithRoot.isEmpty && namespaces.isEmpty && uids.isEmpty && extraKeys.isEmpty - override def pretty: Pretty[RequiredAuthAuthorizations.this.type] = prettyOfClass( + override def pretty: Pretty[ReferencedAuthorizations.this.type] = prettyOfClass( paramIfNonEmpty("namespacesWithRoot", _.namespacesWithRoot), paramIfNonEmpty("namespaces", _.namespaces), paramIfNonEmpty("uids", _.uids), @@ -182,19 +182,19 @@ object TopologyMapping { ) } - object RequiredAuthAuthorizations { + object ReferencedAuthorizations { - val empty: RequiredAuthAuthorizations = RequiredAuthAuthorizations() + val empty: ReferencedAuthorizations = ReferencedAuthorizations() - implicit val monoid: Monoid[RequiredAuthAuthorizations] = - new Monoid[RequiredAuthAuthorizations] { - override def empty: RequiredAuthAuthorizations = RequiredAuthAuthorizations.empty + implicit val monoid: Monoid[ReferencedAuthorizations] = + new Monoid[ReferencedAuthorizations] { + override def empty: ReferencedAuthorizations = ReferencedAuthorizations.empty override def combine( - x: RequiredAuthAuthorizations, - y: RequiredAuthAuthorizations, - ): RequiredAuthAuthorizations = - RequiredAuthAuthorizations( + x: ReferencedAuthorizations, + y: ReferencedAuthorizations, + ): ReferencedAuthorizations = + ReferencedAuthorizations( namespacesWithRoot = x.namespacesWithRoot ++ y.namespacesWithRoot, namespaces = x.namespaces ++ y.namespaces, uids = x.uids ++ y.uids, @@ -206,36 +206,28 @@ object TopologyMapping { sealed trait RequiredAuth extends PrettyPrinting { def requireRootDelegation: Boolean = false def satisfiedByActualAuthorizers( - provided: RequiredAuthAuthorizations - ): Either[RequiredAuthAuthorizations, Unit] + provided: ReferencedAuthorizations + ): Either[ReferencedAuthorizations, Unit] final def or(next: RequiredAuth): RequiredAuth = RequiredAuth.Or(this, next) - final def foldMap[T]( - namespaceCheck: RequiredNamespaces => T, - uidCheck: RequiredUids => T, - )(implicit T: Monoid[T]): T = { - def loop(x: RequiredAuth): T = x match { - case ns @ RequiredNamespaces(_, _) => namespaceCheck(ns) - case uids @ RequiredUids(_, _) => uidCheck(uids) - case EmptyAuthorization => T.empty - case Or(first, second) => T.combine(loop(first), loop(second)) - } - loop(this) - } - - def authorizations: RequiredAuthAuthorizations + /** Authorizations referenced by this instance. + * Note that the result is not equivalent to this instance, as an "or" gets translated to an "and". + * Instead, the result indicates which authorization keys need to be evaluated in order to check + * if this RequiredAuth is met. + */ + def referenced: ReferencedAuthorizations } object RequiredAuth { private[transaction] case object EmptyAuthorization extends RequiredAuth { override def satisfiedByActualAuthorizers( - provided: RequiredAuthAuthorizations - ): Either[RequiredAuthAuthorizations, Unit] = Either.unit + provided: ReferencedAuthorizations + ): Either[ReferencedAuthorizations, Unit] = Either.unit - override def authorizations: RequiredAuthAuthorizations = RequiredAuthAuthorizations() + override def referenced: ReferencedAuthorizations = ReferencedAuthorizations() override def pretty: Pretty[EmptyAuthorization.this.type] = adHocPrettyInstance } @@ -245,21 +237,21 @@ object TopologyMapping { override val requireRootDelegation: Boolean = false, ) extends RequiredAuth { override def satisfiedByActualAuthorizers( - provided: RequiredAuthAuthorizations - ): Either[RequiredAuthAuthorizations, Unit] = { + provided: ReferencedAuthorizations + ): Either[ReferencedAuthorizations, Unit] = { val filter = if (requireRootDelegation) provided.namespacesWithRoot else provided.namespaces val missing = namespaces.filter(ns => !filter(ns)) Either.cond( missing.isEmpty, (), - RequiredAuthAuthorizations( + ReferencedAuthorizations( namespacesWithRoot = if (requireRootDelegation) missing else Set.empty, namespaces = if (requireRootDelegation) Set.empty else missing, ), ) } - override def authorizations: RequiredAuthAuthorizations = RequiredAuthAuthorizations( + override def referenced: ReferencedAuthorizations = ReferencedAuthorizations( namespacesWithRoot = if (requireRootDelegation) namespaces else Set.empty, namespaces = if (requireRootDelegation) Set.empty else namespaces, ) @@ -275,13 +267,13 @@ object TopologyMapping { extraKeys: Set[Fingerprint] = Set.empty, ) extends RequiredAuth { override def satisfiedByActualAuthorizers( - provided: RequiredAuthAuthorizations - ): Either[RequiredAuthAuthorizations, Unit] = { + provided: ReferencedAuthorizations + ): Either[ReferencedAuthorizations, Unit] = { val missingUids = uids.filter(uid => !provided.uids(uid) && !provided.namespaces(uid.namespace)) val missingExtraKeys = extraKeys -- provided.extraKeys val missingAuth = - RequiredAuthAuthorizations(uids = missingUids, extraKeys = missingExtraKeys) + ReferencedAuthorizations(uids = missingUids, extraKeys = missingExtraKeys) Either.cond( missingAuth.isEmpty, (), @@ -289,7 +281,7 @@ object TopologyMapping { ) } - override def authorizations: RequiredAuthAuthorizations = RequiredAuthAuthorizations( + override def referenced: ReferencedAuthorizations = ReferencedAuthorizations( uids = uids, extraKeys = extraKeys, ) @@ -305,14 +297,14 @@ object TopologyMapping { second: RequiredAuth, ) extends RequiredAuth { override def satisfiedByActualAuthorizers( - provided: RequiredAuthAuthorizations - ): Either[RequiredAuthAuthorizations, Unit] = + provided: ReferencedAuthorizations + ): Either[ReferencedAuthorizations, Unit] = first .satisfiedByActualAuthorizers(provided) .orElse(second.satisfiedByActualAuthorizers(provided)) - override def authorizations: RequiredAuthAuthorizations = - RequiredAuthAuthorizations.monoid.combine(first.authorizations, second.authorizations) + override def referenced: ReferencedAuthorizations = + ReferencedAuthorizations.monoid.combine(first.referenced, second.referenced) override def pretty: Pretty[Or.this.type] = prettyOfClass(unnamedParam(_.first), unnamedParam(_.second)) @@ -493,23 +485,18 @@ final case class DecentralizedNamespaceDefinition private ( override def requiredAuth( previous: Option[TopologyTransaction[TopologyChangeOp, TopologyMapping]] ): RequiredAuth = { - previous match { - case None => - RequiredNamespaces(owners.forgetNE) - case Some( - TopologyTransaction( + previous + .collect { + case TopologyTransaction( _op, _serial, DecentralizedNamespaceDefinition(`namespace`, _previousThreshold, previousOwners), - ) - ) => - val added = owners.diff(previousOwners) - // all added owners and the quorum of existing owners MUST sign - RequiredNamespaces(added + namespace) - case Some(_topoTx) => - // TODO(#14048): proper error or ignore - sys.error(s"unexpected transaction data: $previous") - } + ) => + val added = owners.diff(previousOwners) + // all added owners and the quorum of existing owners MUST sign + RequiredNamespaces(added + namespace) + } + .getOrElse(RequiredNamespaces(owners.forgetNE)) } override def uniqueKey: MappingHash = DecentralizedNamespaceDefinition.uniqueKey(namespace) @@ -708,17 +695,12 @@ object OwnerToKeyMapping { final case class DomainTrustCertificate( participantId: ParticipantId, domainId: DomainId, - // TODO(#15399): respect this restriction when reassigning contracts - transferOnlyToGivenTargetDomains: Boolean, - targetDomains: Seq[DomainId], ) extends TopologyMapping { def toProto: v30.DomainTrustCertificate = v30.DomainTrustCertificate( participantUid = participantId.uid.toProtoPrimitive, domain = domainId.toProtoPrimitive, - transferOnlyToGivenTargetDomains = transferOnlyToGivenTargetDomains, - targetDomains = targetDomains.map(_.toProtoPrimitive), ) override def toProtoV30: v30.TopologyMapping = @@ -761,15 +743,9 @@ object DomainTrustCertificate { "participant_uid", ) domainId <- DomainId.fromProtoPrimitive(value.domain, "domain") - transferOnlyToGivenTargetDomains = value.transferOnlyToGivenTargetDomains - targetDomains <- value.targetDomains.traverse( - DomainId.fromProtoPrimitive(_, "target_domains") - ) } yield DomainTrustCertificate( participantId, domainId, - transferOnlyToGivenTargetDomains, - targetDomains, ) } @@ -951,14 +927,12 @@ object ParticipantDomainPermission { final case class PartyHostingLimits( domainId: DomainId, partyId: PartyId, - quota: Int, ) extends TopologyMapping { def toProto: v30.PartyHostingLimits = v30.PartyHostingLimits( domain = domainId.toProtoPrimitive, party = partyId.toProtoPrimitive, - quota = quota, ) override def toProtoV30: v30.TopologyMapping = @@ -998,8 +972,7 @@ object PartyHostingLimits { for { domainId <- DomainId.fromProtoPrimitive(value.domain, "domain") partyId <- PartyId.fromProtoPrimitive(value.party, "party") - quota = value.quota - } yield PartyHostingLimits(domainId, partyId, quota) + } yield PartyHostingLimits(domainId, partyId) } // Package vetting diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala index 052d2644e80a..777211f9c13a 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala @@ -7,6 +7,7 @@ import cats.data.EitherT import cats.instances.future.* import cats.instances.order.* import cats.syntax.semigroup.* +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.KeyPurpose import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.protocol.{DynamicDomainParameters, OnboardingRestriction} @@ -350,11 +351,15 @@ class ValidatingTopologyMappingChecks( .collectOfMapping[PartyToParticipant] .result .headOption - .map(_.mapping.partyId) + .map(_.mapping) _ <- conflictingPartyIdO match { - case Some(partyId) => - EitherT.leftT[Future, Unit][TopologyTransactionRejection]( - TopologyTransactionRejection.ParticipantIdClashesWithPartyId(participantId, partyId) + case Some(ptp) => + isExplicitAdminPartyAllocation( + ptp, + TopologyTransactionRejection.ParticipantIdConflictWithPartyId( + participantId, + ptp.partyId, + ), ) case None => EitherTUtil.unit[TopologyTransactionRejection] } @@ -398,14 +403,17 @@ class ValidatingTopologyMappingChecks( ) ) - // check that the PTP doesn't try to allocate a party that is the same as an already existing admin party + // if we found a DTC with the same uid as the partyId, + // check that the PTP is an explicit admin party allocation, otherwise reject the PTP foundAdminPartyWithSameUID = participantTransactions .collectOfMapping[DomainTrustCertificate] .result - .find(_.mapping.participantId.uid == mapping.partyId.uid) - _ <- EitherTUtil.condUnitET[Future]( - foundAdminPartyWithSameUID.isEmpty, - TopologyTransactionRejection.PartyIdIsAdminParty(mapping.partyId), + .exists(_.mapping.participantId.uid == mapping.partyId.uid) + _ <- EitherTUtil.ifThenET(foundAdminPartyWithSameUID)( + isExplicitAdminPartyAllocation( + mapping, + TopologyTransactionRejection.PartyIdConflictWithAdminParty(mapping.partyId), + ) ) // check that all participants are known on the domain @@ -442,45 +450,8 @@ class ValidatingTopologyMappingChecks( } } - def checkHostingLimits(effective: EffectiveTime) = for { - hostingLimitsCandidates <- loadFromStore( - effective, - code = PartyHostingLimits.code, - filterUid = Some(Seq(toValidate.mapping.partyId.uid)), - ) - hostingLimits = hostingLimitsCandidates.result.view - .flatMap(_.selectMapping[PartyHostingLimits]) - .map(_.mapping.quota) - .toList - partyHostingLimit = hostingLimits match { - case Nil => // No hosting limits found. This is expected if no restrictions are in place - None - case quota :: Nil => Some(quota) - case multiple @ quota :: _ => - logger.error( - s"Multiple PartyHostingLimits at $effective ${multiple.size}. Using first one with quota $quota." - ) - Some(quota) - } - // TODO(#14050) load default party hosting limits from dynamic domain parameters in case the party - // doesn't have a specific PartyHostingLimits mapping issued by the domain. - _ <- partyHostingLimit match { - case Some(limit) => - EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( - toValidate.mapping.participants.size <= limit, - TopologyTransactionRejection.PartyExceedsHostingLimit( - toValidate.mapping.partyId, - limit, - toValidate.mapping.participants.size, - ), - ) - case None => EitherTUtil.unit[TopologyTransactionRejection] - } - } yield () - for { _ <- checkParticipants() - _ <- checkHostingLimits(effective) } yield () } @@ -697,4 +668,44 @@ class ValidatingTopologyMappingChecks( checkNoClashWithDecentralizedNamespaces() } + + /** Checks whether the given PTP is considered an explicit admin party allocation. This is true if all following conditions are met: + *
        + *
      • threshold == 1
      • + *
      • groupAddressing == false
      • + *
      • there is only a single hosting participant
      • + *
          + *
        • with Submission permission
        • + *
        • participantId.adminParty == partyId
        • + *
        + *
      • + *
      TopologyTransactionRejection, + ): EitherT[Future, TopologyTransactionRejection, Unit] = { + // check that the PTP doesn't try to allocate a party that is the same as an already existing admin party. + // we allow an explicit allocation of an admin like party though on the same participant + val singleHostingParticipant = + ptp.participants.sizeCompare(1) == 0 + + val partyIsAdminParty = + ptp.participants.forall(participant => + participant.participantId.adminParty == ptp.partyId && + participant.permission == ParticipantPermission.Submission + ) + + val noGroupAddressing = !ptp.groupAddressing + + // technically we don't need to check for threshold == 1, because we already require that there is only a single participant + // and the threshold may not exceed the number of participants. this is checked in PartyToParticipant.create + val threshold1 = ptp.threshold == PositiveInt.one + + EitherTUtil.condUnitET[Future]( + singleHostingParticipant && partyIsAdminParty && noGroupAddressing && threshold1, + rejection, + ) + } + } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/ExceptionRetryPolicy.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/ExceptionRetryPolicy.scala new file mode 100644 index 000000000000..bb11af1a9569 --- /dev/null +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/ExceptionRetryPolicy.scala @@ -0,0 +1,148 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.util.retry + +import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.LoggerUtil +import com.digitalasset.canton.util.retry.ErrorKind.* +import org.slf4j.event.Level + +import java.sql.SQLException +import scala.util.{Failure, Try} + +/** When using retry code in different contexts, different exceptions should be retried on. This trait provides a + * way to define what exceptions should be retried and which are fatal. + */ +trait ExceptionRetryPolicy { + + /** Classify the error kind for a given exception */ + protected def determineExceptionErrorKind(exception: Throwable, logger: TracedLogger)(implicit + tc: TraceContext + ): ErrorKind + + /** Determines what kind of error resulted in the outcome, + * and gives a recommendation on how many times to retry. + * + * Also logs the embedded exception. + */ + def logAndDetermineErrorKind( + outcome: Try[_], + logger: TracedLogger, + lastErrorKind: Option[ErrorKind], + )(implicit + tc: TraceContext + ): ErrorKind = { + outcome match { + case util.Success(_) => NoSuccessErrorKind + case Failure(exception) => + val errorKind = determineExceptionErrorKind(exception, logger) + // only log the full exception if the error kind changed such that we avoid spamming the logs + if (!lastErrorKind.contains(errorKind)) { + logThrowable(exception, logger) + } else { + logger.debug( + s"Retrying on same error kind ${errorKind} for ${exception.getClass.getSimpleName}/${exception.getMessage}" + ) + } + errorKind + } + } + + protected def logThrowable(e: Throwable, logger: TracedLogger)(implicit + traceContext: TraceContext + ): Unit = { + val level = retryLogLevel(e).getOrElse(Level.INFO) + implicit val errorLoggingContext: ErrorLoggingContext = + ErrorLoggingContext.fromTracedLogger(logger) + e match { + case sqlE: SQLException => + // Unfortunately, the sql state and error code won't get logged automatically. + LoggerUtil.logThrowableAtLevel( + level, + s"Detected an SQLException. SQL state: ${sqlE.getSQLState}, error code: ${sqlE.getErrorCode}", + e, + ) + case _: Throwable => + LoggerUtil.logThrowableAtLevel(level, s"Detected an error.", e) + } + } + + /** Return an optional log level to log an exception with. + * + * This allows to override the log level for particular exceptions on retry globally. + */ + def retryLogLevel(e: Throwable): Option[Level] = None + + def retryLogLevel(outcome: Try[Any]): Option[Level] = outcome match { + case Failure(exception) => retryLogLevel(exception) + case util.Success(_value) => None + } +} + +sealed trait ErrorKind { + def maxRetries: Int +} + +object ErrorKind { + + /** The outcome of the future was success, but the success predicate was false, we retry indefinitely */ + case object NoSuccessErrorKind extends ErrorKind { + override val maxRetries: Int = Int.MaxValue + + override def toString: String = "no success error (request infinite retries)" + } + + /** We don't classify the kind of error, so we default to infinite retries */ + case object UnknownErrorKind extends ErrorKind { + override val maxRetries: Int = Int.MaxValue + + override def toString: String = "unknown error (request infinite retries)" + } + + /** A fatal error that we should not retry on */ + case object FatalErrorKind extends ErrorKind { + override val maxRetries = 0 + + override def toString: String = "fatal error (give up immediately)" + } + + /** Main use case is a network outage. Infinite retries are needed, as we don't know how long the outage takes. + */ + final case class TransientErrorKind(maxRetries: Int = Int.MaxValue) extends ErrorKind { + private lazy val numRetriesString = + if (maxRetries == Int.MaxValue) "infinite" else maxRetries.toString + + override def toString: String = s"transient error (request $numRetriesString retries)" + } + +} + +/** Retry on any exception. + * + * This is a sensible default choice for non-db tasks with a finite maximum number of retries. + */ +case object AllExceptionRetryPolicy extends ExceptionRetryPolicy { + + override protected def determineExceptionErrorKind(exception: Throwable, logger: TracedLogger)( + implicit tc: TraceContext + ): ErrorKind = { + // We don't classify the kind of error, always retry indefinitely + UnknownErrorKind + } + +} + +/** Don't retry on any exception. + */ +case object NoExceptionRetryPolicy extends ExceptionRetryPolicy { + + override protected def determineExceptionErrorKind(exception: Throwable, logger: TracedLogger)( + implicit tc: TraceContext + ): ErrorKind = { + // We treat the exception as fatal, never retry + FatalErrorKind + } + +} diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala index 3b6cb24c45c8..0eaa9c81f2bc 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/Policy.scala @@ -15,12 +15,6 @@ import com.digitalasset.canton.lifecycle.{ import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.util.retry.RetryUtil.{ - AllExnRetryable, - ErrorKind, - ExceptionRetryable, - NoErrorKind, -} import com.digitalasset.canton.util.retry.RetryWithDelay.{RetryOutcome, RetryTermination} import com.digitalasset.canton.util.{DelayUtil, LoggerUtil} import org.slf4j.event.Level @@ -40,13 +34,13 @@ abstract class Policy(logger: TracedLogger) { protected val directExecutionContext: DirectExecutionContext = DirectExecutionContext(logger) - def apply[T](task: => Future[T], retryOk: ExceptionRetryable)(implicit + def apply[T](task: => Future[T], retryOk: ExceptionRetryPolicy)(implicit success: Success[T], executionContext: ExecutionContext, traceContext: TraceContext, ): Future[T] - def unlessShutdown[T](task: => FutureUnlessShutdown[T], retryOk: ExceptionRetryable)(implicit + def unlessShutdown[T](task: => FutureUnlessShutdown[T], retryOk: ExceptionRetryPolicy)(implicit success: Success[T], executionContext: ExecutionContext, traceContext: TraceContext, @@ -93,7 +87,7 @@ object Policy { retryInterval, operationName = operationName, actionable = Some(actionable), - ).unlessShutdown(task, AllExnRetryable)( + ).unlessShutdown(task, AllExceptionRetryPolicy)( Success.always, executionContext, loggingContext.traceContext, @@ -125,7 +119,7 @@ abstract class RetryWithDelay( */ override def apply[T]( task: => Future[T], - retryable: ExceptionRetryable, + retryable: ExceptionRetryPolicy, )(implicit success: Success[T], executionContext: ExecutionContext, @@ -150,7 +144,7 @@ abstract class RetryWithDelay( */ override def unlessShutdown[T]( task: => FutureUnlessShutdown[T], - retryable: ExceptionRetryable, + retryable: ExceptionRetryPolicy, )(implicit success: Success[T], executionContext: ExecutionContext, @@ -169,7 +163,7 @@ abstract class RetryWithDelay( private def retryWithDelay[T]( task: => Future[T], - retryable: ExceptionRetryable, + retryable: ExceptionRetryPolicy, executionContext: ExecutionContext, )(implicit success: Success[T], traceContext: TraceContext): Future[RetryOutcome[T]] = { implicit val loggingContext: ErrorLoggingContext = ErrorLoggingContext.fromTracedLogger(logger) @@ -181,7 +175,7 @@ abstract class RetryWithDelay( def run( previousResult: Future[T], totalRetries: Int, - lastErrorKind: ErrorKind, + lastErrorKind: Option[ErrorKind], retriesOfLastErrorKind: Int, delay: FiniteDuration, ): Future[RetryOutcome[T]] = logOnThrow { @@ -216,8 +210,9 @@ abstract class RetryWithDelay( case outcome => // this will also log the exception in outcome - val errorKind = retryable.retryOK(outcome, logger, Some(lastErrorKind)) - val retriesOfErrorKind = if (errorKind == lastErrorKind) retriesOfLastErrorKind else 0 + val errorKind = retryable.logAndDetermineErrorKind(outcome, logger, lastErrorKind) + val retriesOfErrorKind = + if (lastErrorKind.contains(errorKind)) retriesOfLastErrorKind else 0 if ( errorKind.maxRetries == Int.MaxValue || retriesOfErrorKind < errorKind.maxRetries ) { @@ -229,7 +224,7 @@ abstract class RetryWithDelay( DelayUtil .delayIfNotClosing(operationName, suspendDuration, performUnlessClosing) .onShutdown(())(directExecutionContext) - .flatMap(_ => run(previousResult, 0, errorKind, 0, initialDelay))( + .flatMap(_ => run(previousResult, 0, Some(errorKind), 0, initialDelay))( directExecutionContext ) } else { @@ -239,7 +234,7 @@ abstract class RetryWithDelay( retryable.retryLogLevel(outcome).getOrElse(Level.INFO) } else Level.WARN } - val change = if (errorKind == lastErrorKind) { + val change = if (lastErrorKind.contains(errorKind)) { "" } else { s"New kind of error: $errorKind. " @@ -283,7 +278,7 @@ abstract class RetryWithDelay( run( nextRunF, totalRetries + 1, - errorKind, + Some(errorKind), retriesOfErrorKind + 1, nextDelay(totalRetries + 1, delay), ) @@ -327,7 +322,13 @@ abstract class RetryWithDelay( // Run 1 onwards: Only run this if `flagCloseable` is not closing. // (The check is performed at the recursive call.) // Checking at the client would be very difficult, because the client would have to deal with a closed EC. - run(runTask(), 0, NoErrorKind, 0, initialDelay) + run( + runTask(), + totalRetries = 0, + lastErrorKind = None, + retriesOfLastErrorKind = 0, + delay = initialDelay, + ) } private def messageOfOutcome( @@ -505,7 +506,7 @@ final case class When( depends: PartialFunction[Any, Policy], ) extends Policy(logger) { - override def apply[T](task: => Future[T], retryable: ExceptionRetryable)(implicit + override def apply[T](task: => Future[T], retryable: ExceptionRetryPolicy)(implicit success: Success[T], executionContext: ExecutionContext, traceContext: TraceContext, @@ -517,13 +518,18 @@ final case class When( else depends(res)(task, retryable) }(directExecutionContext) .recoverWith { case NonFatal(e) => - if (depends.isDefinedAt(e) && retryable.retryOK(Failure(e), logger, None).maxRetries > 0) + if ( + depends + .isDefinedAt(e) && retryable + .logAndDetermineErrorKind(Failure(e), logger, None) + .maxRetries > 0 + ) depends(e)(task, retryable) else fut }(directExecutionContext) } - override def unlessShutdown[T](task: => FutureUnlessShutdown[T], retryOk: ExceptionRetryable)( + override def unlessShutdown[T](task: => FutureUnlessShutdown[T], retryOk: ExceptionRetryPolicy)( implicit success: Success[T], executionContext: ExecutionContext, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryUtil.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryUtil.scala deleted file mode 100644 index cb60e7f26c30..000000000000 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/util/retry/RetryUtil.scala +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.util.retry - -import com.digitalasset.canton.discard.Implicits.DiscardOps -import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} -import com.digitalasset.canton.resource.DatabaseStorageError.DatabaseStorageDegradation.DatabaseTaskRejected -import com.digitalasset.canton.resource.DbStorage.NoConnectionAvailable -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.LoggerUtil -import com.digitalasset.canton.util.TryUtil.ForFailedOps -import org.postgresql.util.PSQLException -import org.slf4j.event.Level - -import java.sql.* -import scala.annotation.tailrec -import scala.util.{Failure, Try} - -object RetryUtil { - - /** When using retry code in different contexts, different exceptions should be retried on. This trait provides a - * way to define what exceptions should be retried and which are fatal. - */ - trait ExceptionRetryable { - - /** Determines what kind of error (if any) resulted in the outcome, - * and gives a recommendation on how many times to retry. - * - * Also logs the embedded exception. - */ - def retryOK(outcome: Try[_], logger: TracedLogger, lastErrorKind: Option[ErrorKind])(implicit - tc: TraceContext - ): ErrorKind - - protected def logThrowable(e: Throwable, logger: TracedLogger)(implicit - traceContext: TraceContext - ): Unit = { - val level = retryLogLevel(e).getOrElse(Level.INFO) - implicit val errorLoggingContext: ErrorLoggingContext = - ErrorLoggingContext.fromTracedLogger(logger) - e match { - case sqlE: SQLException => - // Unfortunately, the sql state and error code won't get logged automatically. - LoggerUtil.logThrowableAtLevel( - level, - s"Detected an SQLException. SQL state: ${sqlE.getSQLState}, error code: ${sqlE.getErrorCode}", - e, - ) - case _: Throwable => - LoggerUtil.logThrowableAtLevel(level, s"Detected an error.", e) - } - } - - /** Return an optional log level to log an exception with. - * - * This allows to override the log level for particular exceptions on retry globally. - */ - def retryLogLevel(e: Throwable): Option[Level] = None - - def retryLogLevel(outcome: Try[Any]): Option[Level] = outcome match { - case Failure(exception) => retryLogLevel(exception) - case util.Success(_value) => None - } - } - - sealed trait ErrorKind { - def maxRetries: Int - } - - case object NoErrorKind extends ErrorKind { - override val maxRetries: Int = Int.MaxValue - - override def toString: String = "no error (request infinite retries)" - } - - case object FatalErrorKind extends ErrorKind { - override val maxRetries = 0 - - override def toString: String = "fatal error (give up immediately)" - } - - /** Main use case is a network outage. Infinite retries are needed, as we don't know how long the outage takes. - */ - case object TransientErrorKind extends ErrorKind { - override val maxRetries: Int = Int.MaxValue - - override def toString: String = "transient error (request infinite retries)" - } - - /** Main use case is a transient unique constraint violation due to racy merge statements. - * Should go away after a very limited amount of retries. - */ - case object SpuriousTransientErrorKind extends ErrorKind { - // Value determined empirically in UpsertTestOracle. - // For single row inserts, 1 is sufficient. - // For batched inserts, 3 was more than sufficient in the test. - override val maxRetries = 10 - - override def toString: String = - s"possibly spurious transient error (request up to $maxRetries retries)" - } - - /** Defines which should be retryable when thrown by the database. - */ - case object DbExceptionRetryable extends ExceptionRetryable { - - def retryOKForever(error: Throwable, logger: TracedLogger)(implicit - tc: TraceContext - ): Boolean = { - // Don't retry forever on "contention" errors, as these may not actually be due to contention and get stuck - // forever. Eg unique constraint violation exceptions can be caused by contention in H2 leading to data anomalies. - DbExceptionRetryable.retryOK(Failure(error), logger, None).maxRetries == Int.MaxValue - } - - override def retryOK( - outcome: Try[_], - logger: TracedLogger, - lastErrorKind: Option[ErrorKind], - )(implicit - tc: TraceContext - ): ErrorKind = { - outcome match { - case util.Success(_) => NoErrorKind - case ff @ Failure(exception) => - val errorKind = retryOKInternal(ff, logger) - // only log the full exception if the error kind changed such that we avoid spamming the logs - if (!lastErrorKind.contains(errorKind)) { - logThrowable(exception, logger) - } else { - logger.debug( - s"Retrying on same error kind ${errorKind} for ${exception.getClass.getSimpleName}/${exception.getMessage}" - ) - } - errorKind - } - } - - private def retryOKInternal( - outcome: Failure[_], - logger: TracedLogger, - )(implicit - tc: TraceContext - ): ErrorKind = { - outcome.exception match { - case exn: java.util.concurrent.RejectedExecutionException => - // This occurs when slick's task queue is full - - // Create a CantonError so that the error code gets logged. - DatabaseTaskRejected(exn.toString)( - ErrorLoggingContext.fromTracedLogger(logger) - ).discard - - TransientErrorKind - case other => determineErrorKind(other, logger) - } - } - - @tailrec def determineErrorKind( - exception: Throwable, - logger: TracedLogger, - )(implicit - tc: TraceContext - ): ErrorKind = exception match { - case exception: PSQLException => - // Error codes documented here: https://www.postgresql.org/docs/9.6/errcodes-appendix.html - val error = exception.getSQLState - - if (error.startsWith("08")) { - // Class 08 — Connection Exception - TransientErrorKind - } else if (error == "40001") { - // Class 40 — Transaction Rollback: 40001 serialization_failure - // Failure to serialize db accesses, happens due to contention - TransientErrorKind - } else if (error == "40P01") { - // Deadlock - // See DatabaseDeadlockTestPostgres - // This also covers deadlocks reported as BatchUpdateExceptions, - // because they refer to a PSQLException has cause. - TransientErrorKind - } else if (error == "25006") { - // Retry on read only transaction, which can occur on Azure - TransientErrorKind - } else if (error.startsWith("57P") && error != "57P014" && error != "57P04") { - // Retry on operator invention errors, otherwise Canton components crash in an uncontrolled manner when - // the exception bubbles up (don't retry on `query_canceled` and `database_dropped`) - TransientErrorKind - } else if ( - error == "53000" || error == "53100" || error == "53200" || error == "53300" || error == "53400" - ) { - // Retry insufficient db resource errors - TransientErrorKind - } else { - // Don't retry on other exceptions. These other exceptions should be those for which retrying typically won't - // help, for example a unique constraint violation. - logger.info(s"Fatal sql exception has error code: $error") - FatalErrorKind - } - - case _: SQLIntegrityConstraintViolationException => - // Both H2 and Oracle may fail with spurious constraint violations, due to racy implementation of the MERGE statements. - // In H2, this may also occur because it does not properly implement the serializable isolation level. - // See UpsertTestOracle - // See https://github.com/h2database/h2database/issues/2167 - SpuriousTransientErrorKind - - case _: SQLRecoverableException | _: SQLTransientException | - _: SQLNonTransientConnectionException => - TransientErrorKind - - // Handle SQLException and all classes that derive from it (e.g. java.sql.BatchUpdateException) - // Note that if the exception is not known but has a cause, we'll base the retry on the cause - case ex: SQLException => - val code = ex.getErrorCode - if (ex.getErrorCode == 1) { - // Retry on ORA-00001: unique constraint violated exception - SpuriousTransientErrorKind - } else if (ex.getMessage == "Connection is closed") { - // May fail with a "Connection is closed" message if the db has gone down - TransientErrorKind - } else if (ex.getErrorCode == 4021) { - // ORA timeout occurred while waiting to lock object - TransientErrorKind - } else if (ex.getErrorCode == 54) { - // ORA timeout occurred while waiting to lock object or because NOWAIT has been set - // e.g. as part of truncate table - TransientErrorKind - } else if (ex.getErrorCode == 60) { - // Deadlock - // See DatabaseDeadlockTestOracle - TransientErrorKind - } else if ( - ex.getErrorCode == 604 && - List("ORA-08176", "ORA-08177").exists(ex.getMessage.contains) - ) { - // Oracle failure in a batch operation - // For Oracle, the `cause` is not always set properly for exceptions. This is a problem for batched queries. - // So, look through an exception's `message` to see if it contains a retryable problem. - TransientErrorKind - } else if (ex.getErrorCode == 8176) { - // consistent read failure; rollback data not available - // Cause: Encountered data changed by an operation that does not generate rollback data - // Action: In read/write transactions, retry the intended operation. - TransientErrorKind - } else if (ex.getErrorCode == 8177) { - // failure to serialize transaction with serializable isolation level - TransientErrorKind - } else if (ex.getErrorCode == 17410) { - // No more data to read from socket, can be caused by network problems - SpuriousTransientErrorKind - } else if (code == 17002) { - // This has been observed as either IO Error: Connection reset by peer or IO Error: Broken pipe - // when straight-up killing an Oracle database server (`kill -9 `) - TransientErrorKind - } else if (code == 1088 || code == 1089 || code == 1090 || code == 1092) { - // Often observed for orderly Oracle shutdowns - // https://docs.oracle.com/en/database/oracle/oracle-database/19/errmg/ORA-00910.html#GUID-D9EBDFFA-88C6-4185-BD2C-E1B959A97274 - TransientErrorKind - } else if (ex.getCause != null) { - logger.info("Unable to retry on exception, checking cause.") - determineErrorKind(ex.getCause, logger) - } else { - FatalErrorKind - } - - case _ => FatalErrorKind - } - - override def retryLogLevel(e: Throwable): Option[Level] = e match { - case _: NoConnectionAvailable => - // Avoid log noise if no connection is available either due to contention or a temporary network problem - Some(Level.DEBUG) - case _ => None - } - } - - /** Retry on any exception. - * - * This is a sensible default choice for non-db tasks with a finite maximum number of retries. - */ - case object AllExnRetryable extends ExceptionRetryable { - - override def retryOK(outcome: Try[_], logger: TracedLogger, lastErrorKind: Option[ErrorKind])( - implicit tc: TraceContext - ): ErrorKind = { - outcome.forFailed(t => logThrowable(t, logger)) - NoErrorKind - } - - } - - /** Don't retry on any exception. - */ - case object NoExnRetryable extends ExceptionRetryable { - - override def retryOK(outcome: Try[_], logger: TracedLogger, lastErrorKind: Option[ErrorKind])( - implicit tc: TraceContext - ): ErrorKind = outcome match { - case Failure(ex) => - logThrowable(ex, logger) - FatalErrorKind - case util.Success(_) => NoErrorKind - } - } -} diff --git a/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml b/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml index 5cc53ecce0b3..06d970dcd86b 100644 --- a/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml +++ b/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --target=2.1 name: CantonExamples diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/SequencerBasedRegisterTopologyTransactionHandle.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/SequencerBasedRegisterTopologyTransactionHandle.scala index ada6cb0072eb..a262dba9f18a 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/SequencerBasedRegisterTopologyTransactionHandle.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/SequencerBasedRegisterTopologyTransactionHandle.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.common.domain import cats.data.EitherT +import com.daml.metrics.api.MetricsContext import com.daml.nameof.NameOf.functionFullName import com.digitalasset.canton.config.CantonRequireTypes.String255 import com.digitalasset.canton.config.{ProcessingTimeout, TopologyConfig} @@ -131,6 +132,9 @@ class DomainTopologyService( )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] = { + implicit val metricsContext: MetricsContext = MetricsContext( + "type" -> "send-topology" + ) logger.debug(s"Broadcasting topology transaction: ${request.broadcasts}") EitherTUtil.logOnErrorU( sequencerClient.sendAsync( diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/GrpcSequencerConnectClient.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/GrpcSequencerConnectClient.scala index d6d7618d49a2..4a7a6be616b5 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/GrpcSequencerConnectClient.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/GrpcSequencerConnectClient.scala @@ -31,8 +31,7 @@ import com.digitalasset.canton.topology.{ UniqueIdentifier, } import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} -import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable -import com.digitalasset.canton.util.retry.Success +import com.digitalasset.canton.util.retry.{AllExceptionRetryPolicy, Success} import com.digitalasset.canton.util.{Thereafter, retry} import com.digitalasset.canton.version.HandshakeErrors.DeprecatedProtocolVersion import com.digitalasset.canton.{DomainAlias, ProtoDeserializationError} @@ -205,7 +204,7 @@ class GrpcSequencerConnectClient( EitherT( retry .Pause(logger, this, maxRetries, interval, "verify active") - .apply(verifyActive(), AllExnRetryable) + .apply(verifyActive(), AllExceptionRetryPolicy) ).thereafter(_ => closeableChannel.close()) } diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoader.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoader.scala index f00a696b3a57..898002783d1d 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoader.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoader.scala @@ -34,7 +34,7 @@ import com.digitalasset.canton.topology.* import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.Thereafter.syntax.* -import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable +import com.digitalasset.canton.util.retry.NoExceptionRetryPolicy import com.digitalasset.canton.util.{FutureUtil, LoggerUtil, retry} import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting @@ -120,7 +120,7 @@ class SequencerInfoLoader( ) .apply( getBootstrapInfoDomainParameters(domainAlias, sequencerAlias, client).value, - NoExnRetryable, + NoExceptionRetryPolicy, ) ) } diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/config/CryptoConfig.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/config/CryptoConfig.scala index 7a1e032657fe..0f687826579d 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/config/CryptoConfig.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/config/CryptoConfig.scala @@ -27,6 +27,16 @@ final case class CryptoSchemeConfig[S]( allowed: Option[NonEmpty[Set[S]]] = None, ) +/** Stores the configuration of the encryption scheme. + * + * @param algorithms the algorithm specifications + * @param keys the key specifications + */ +final case class EncryptionSchemeConfig( + algorithms: CryptoSchemeConfig[EncryptionAlgorithmSpec] = CryptoSchemeConfig(), + keys: CryptoSchemeConfig[EncryptionKeySpec] = CryptoSchemeConfig(), +) + /** Cryptography configuration. */ trait CryptoConfig { @@ -36,11 +46,8 @@ trait CryptoConfig { /** the signing key scheme configuration */ def signing: CryptoSchemeConfig[SigningKeyScheme] - /** the encryption algorithm configuration */ - def encryptionAlgorithms: CryptoSchemeConfig[EncryptionAlgorithmSpec] - - /** the encryption key configuration */ - def encryptionKeys: CryptoSchemeConfig[EncryptionKeySpec] + /** the encryption scheme configuration */ + def encryption: EncryptionSchemeConfig /** the symmetric key scheme configuration */ def symmetric: CryptoSchemeConfig[SymmetricKeyScheme] @@ -55,8 +62,7 @@ trait CryptoConfig { final case class CommunityCryptoConfig( provider: CommunityCryptoProvider = CommunityCryptoProvider.Jce, signing: CryptoSchemeConfig[SigningKeyScheme] = CryptoSchemeConfig(), - encryptionAlgorithms: CryptoSchemeConfig[EncryptionAlgorithmSpec] = CryptoSchemeConfig(), - encryptionKeys: CryptoSchemeConfig[EncryptionKeySpec] = CryptoSchemeConfig(), + encryption: EncryptionSchemeConfig = EncryptionSchemeConfig(), symmetric: CryptoSchemeConfig[SymmetricKeyScheme] = CryptoSchemeConfig(), hash: CryptoSchemeConfig[HashAlgorithm] = CryptoSchemeConfig(), pbkdf: CryptoSchemeConfig[PbkdfScheme] = CryptoSchemeConfig(), diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/CryptoFactory.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/CryptoFactory.scala index 45b1e0d4a3a0..e59d18d9a178 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/CryptoFactory.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/CryptoFactory.scala @@ -50,7 +50,7 @@ trait CryptoFactory { symmetricKeyScheme <- selectSchemes(config.symmetric, config.provider.symmetric) .map(_.default) encryptionAlgorithmSpec <- selectSchemes( - config.encryptionAlgorithms, + config.encryption.algorithms, config.provider.encryptionAlgorithms, ) supportedEncryptionAlgorithmSpecs <- selectAllowedEncryptionAlgorithmSpecs(config) @@ -104,12 +104,12 @@ trait CryptoFactory { .map(_.default) .toEitherT[FutureUnlessShutdown] encryptionCryptoAlgorithmSpec <- selectSchemes( - config.encryptionAlgorithms, + config.encryption.algorithms, config.provider.encryptionAlgorithms, ) .map(_.default) .toEitherT[FutureUnlessShutdown] - encryptionKeySpec <- selectSchemes(config.encryptionKeys, config.provider.encryptionKeys) + encryptionKeySpec <- selectSchemes(config.encryption.keys, config.provider.encryptionKeys) .map(_.default) .toEitherT[FutureUnlessShutdown] // TODO(#18934): Ensure required/allowed schemes are enforced by private/pure crypto classes @@ -183,12 +183,12 @@ object CryptoFactory { def selectAllowedEncryptionAlgorithmSpecs( config: CryptoConfig ): Either[String, NonEmpty[Set[EncryptionAlgorithmSpec]]] = - selectSchemes(config.encryptionAlgorithms, config.provider.encryptionAlgorithms).map(_.allowed) + selectSchemes(config.encryption.algorithms, config.provider.encryptionAlgorithms).map(_.allowed) def selectAllowedEncryptionKeySpecs( config: CryptoConfig ): Either[String, NonEmpty[Set[EncryptionKeySpec]]] = - selectSchemes(config.encryptionKeys, config.provider.encryptionKeys).map(_.allowed) + selectSchemes(config.encryption.keys, config.provider.encryptionKeys).map(_.allowed) } diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/CryptoHandshakeValidator.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/CryptoHandshakeValidator.scala index 480e69f148af..288875984536 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/CryptoHandshakeValidator.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/CryptoHandshakeValidator.scala @@ -46,12 +46,12 @@ object CryptoHandshakeValidator { ) _ <- validateScheme( parameters.requiredEncryptionSpecs.algorithms, - selectSchemes(config.encryptionAlgorithms, config.provider.encryptionAlgorithms) + selectSchemes(config.encryption.algorithms, config.provider.encryptionAlgorithms) .map(cs => CryptoScheme(cs.default, cs.allowed)), ) _ <- validateScheme( parameters.requiredEncryptionSpecs.keys, - selectSchemes(config.encryptionKeys, config.provider.encryptionKeys) + selectSchemes(config.encryption.keys, config.provider.encryptionKeys) .map(cs => CryptoScheme(cs.default, cs.allowed)), ) _ <- validateScheme( diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/environment/BootstrapStage.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/environment/BootstrapStage.scala index 7246d18b9304..81ba812f6486 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/environment/BootstrapStage.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/environment/BootstrapStage.scala @@ -21,8 +21,7 @@ import com.digitalasset.canton.resource.DbStorage.PassiveInstanceException import com.digitalasset.canton.resource.Storage import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.Thereafter.syntax.* -import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable -import com.digitalasset.canton.util.retry.Success +import com.digitalasset.canton.util.retry.{NoExceptionRetryPolicy, Success} import com.digitalasset.canton.util.{EitherTUtil, SimpleExecutionQueue, retry} import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} @@ -204,7 +203,7 @@ abstract class BootstrapStageWithStorage[ ) // on shutdown, the retry loop will return the last value so if // we get None back, we know that the retry loop was aborted due to a shutdown - .unlessShutdown(attemptAndStore().value, NoExnRetryable)( + .unlessShutdown(attemptAndStore().value, NoExceptionRetryPolicy)( success, executionContext, traceContext, diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/DomainOutboxDispatchHelper.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/DomainOutboxDispatchHelper.scala index 3993b9fecc99..42f198d9169b 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/DomainOutboxDispatchHelper.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/DomainOutboxDispatchHelper.scala @@ -23,7 +23,7 @@ import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable +import com.digitalasset.canton.util.retry.AllExceptionRetryPolicy import com.digitalasset.canton.util.{FutureUtil, retry} import com.digitalasset.canton.version.ProtocolVersion @@ -59,19 +59,8 @@ trait DomainOutboxDispatchHelper extends NamedLogging { protected def onlyApplicable( transactions: Seq[GenericSignedTopologyTransaction] ): Future[Seq[GenericSignedTopologyTransaction]] = { - def notAlien(tx: GenericSignedTopologyTransaction): Boolean = { - val mapping = tx.mapping - mapping match { - // TODO(#14048) add filter criteria here - case _ => true - } - } - - def domainRestriction(tx: GenericSignedTopologyTransaction): Boolean = - tx.mapping.restrictedToDomain.forall(_ == domainId) - Future.successful( - transactions.filter(x => notAlien(x) && domainRestriction(x)) + transactions.filter(x => x.mapping.restrictedToDomain.forall(_ == domainId)) ) } @@ -198,7 +187,7 @@ trait DomainOutboxDispatch extends NamedLogging with FlagCloseable { s"Pushing topology transactions to $domain", ) }, - AllExnRetryable, + AllExceptionRetryPolicy, ) .map { responses => if (responses.length != transactions.length) { diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/QueueBasedDomainOutbox.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/QueueBasedDomainOutbox.scala index 8ffa39d47931..754736e80e38 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/QueueBasedDomainOutbox.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/QueueBasedDomainOutbox.scala @@ -21,7 +21,7 @@ import com.digitalasset.canton.topology.client.DomainTopologyClientWithInit import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable +import com.digitalasset.canton.util.retry.AllExceptionRetryPolicy import com.digitalasset.canton.util.{DelayUtil, EitherTUtil, FutureUtil, retry} import com.digitalasset.canton.version.ProtocolVersion @@ -314,7 +314,7 @@ class QueueBasedDomainOutbox( s"Pushing topology transactions to $domain", ) }, - AllExnRetryable, + AllExceptionRetryPolicy, ) .map { responses => if (responses.length != transactions.length) { diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcIdentityInitializationService.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcIdentityInitializationService.scala index 16c4f3d5788a..6b795edb4826 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcIdentityInitializationService.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcIdentityInitializationService.scala @@ -25,7 +25,6 @@ class GrpcIdentityInitializationService( with NamedLogging { override def initId(request: adminProto.InitIdRequest): Future[adminProto.InitIdResponse] = { - // TODO(#14048) propagate trace context implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext val adminProto.InitIdRequest(uidP) = request // TODO(#14048) proper error reporting diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala index 24800dad9683..ba3c16ed2edd 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala @@ -594,14 +594,16 @@ class GrpcTopologyManagerReadService( namespaceFilter = Some(namespaceFilter), ) } yield { + def partyPredicate(x: PartyToParticipant) = + x.partyId.toProtoPrimitive.startsWith(request.filterParty) + def participantPredicate(x: PartyToParticipant) = + request.filterParticipant.isEmpty || x.participantIds.exists( + _.toProtoPrimitive.contains(request.filterParticipant) + ) + val results = res .collect { - case (result, x: PartyToParticipant) - if x.partyId.toProtoPrimitive.startsWith( - request.filterParty - ) && (request.filterParticipant.isEmpty || x.participantIds.exists( - _.toProtoPrimitive.contains(request.filterParticipant) - )) => + case (result, x: PartyToParticipant) if partyPredicate(x) && participantPredicate(x) => (result, x) } .map { case (context, elem) => diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/metrics/MetricsUtils.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/metrics/MetricsUtils.scala index 496c039322e3..7c227e773fbd 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/metrics/MetricsUtils.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/metrics/MetricsUtils.scala @@ -21,7 +21,7 @@ trait MetricsUtils { this: BaseTest => .fromMetricData( onDemandMetricsReader .read() - .find(_.getName.contains(name)) + .find(_.getName.endsWith(name)) .value ) .flatMap { metricData => @@ -32,8 +32,10 @@ trait MetricsUtils { this: BaseTest => def assertInContext(name: String, key: String, value: String)(implicit onDemandMetricsReader: OnDemandMetricsReader ): Assertion = { - getMetricValues[MetricValue.LongPoint](name).headOption - .flatMap(_.attributes.get(key)) shouldBe Some(value) + clue(s"metric $name has value $value for key $key in context") { + getMetricValues[MetricValue.LongPoint](name).headOption + .flatMap(_.attributes.get(key)) shouldBe Some(value) + } } def assertSenderIsInContext(name: String, sender: Member)(implicit @@ -45,15 +47,18 @@ trait MetricsUtils { this: BaseTest => def assertLongValue(name: String, expected: Long)(implicit onDemandMetricsReader: OnDemandMetricsReader ): Assertion = { - getMetricValues[MetricValue.LongPoint](name).loneElement.value shouldBe expected + clue(s"metric $name has value $expected") { + getMetricValues[MetricValue.LongPoint](name).loneElement.value shouldBe expected + } } def assertNoValue(name: String)(implicit onDemandMetricsReader: OnDemandMetricsReader ): Assertion = { - onDemandMetricsReader - .read() - .exists(_.getName.contains(name)) shouldBe false + clue(s"metric $name has no value") { + onDemandMetricsReader + .read() + .exists(_.getName.contains(name)) shouldBe false + } } - } diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala index 0b2e1b4fa654..b490378a6a80 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.sequencing.client import cats.data.EitherT import cats.syntax.either.* import cats.syntax.foldable.* +import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.* import com.digitalasset.canton.concurrent.{FutureSupervisor, Threading} import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} @@ -1082,8 +1083,10 @@ class SequencerClientTest messageId: MessageId = client.generateMessageId, )(implicit traceContext: TraceContext - ): EitherT[Future, SendAsyncClientError, Unit] = + ): EitherT[Future, SendAsyncClientError, Unit] = { + implicit val metricsContext: MetricsContext = MetricsContext.Empty client.sendAsync(batch, messageId = messageId).onShutdown(fail()) + } } private class MockSubscription[E] extends SequencerSubscription[E] { diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/TestSequencerClientSend.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/TestSequencerClientSend.scala index 02f0f431c776..0f9bdb2a1d0f 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/TestSequencerClientSend.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/TestSequencerClientSend.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.sequencing.client import cats.data.EitherT +import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.protocol.messages.DefaultOpenEnvelope @@ -37,7 +38,8 @@ class TestSequencerClientSend extends SequencerClientSend { callback: SendCallback, amplify: Boolean, )(implicit - traceContext: TraceContext + traceContext: TraceContext, + metricsContext: MetricsContext, ): EitherT[FutureUnlessShutdown, SendAsyncClientError, Unit] = { requestsQueue.add( Request(batch, topologyTimestamp, maxSequencingTime, messageId, aggregationRule, None) diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DatabaseLimitNbParamTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DatabaseLimitNbParamTest.scala index cbd0aa4a7f82..bdd2d0b526e3 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DatabaseLimitNbParamTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DatabaseLimitNbParamTest.scala @@ -4,8 +4,8 @@ package com.digitalasset.canton.store.db import com.daml.nameof.NameOf.functionFullName -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.util.retry.RetryUtil.{DbExceptionRetryable, FatalErrorKind} +import com.digitalasset.canton.resource.{DbExceptionRetryPolicy, DbStorage} +import com.digitalasset.canton.util.retry.ErrorKind.FatalErrorKind import com.digitalasset.canton.{BaseTestWordSpec, HasExecutionContext} import org.scalatest.BeforeAndAfterAll import slick.sql.SqlAction @@ -60,7 +60,7 @@ trait DatabaseLimitNbParamTest rawStorage .update(query.asUpdate, "parameter limit query", maxRetries = 1) .transformWith { outcome => - val errorKind = DbExceptionRetryable.retryOK(outcome, logger, None) + val errorKind = DbExceptionRetryPolicy.logAndDetermineErrorKind(outcome, logger, None) errorKind match { case FatalErrorKind => case _ => fail("Database error kind should be fatal") diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala index ea8c0465c8c5..5e1cee478293 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala @@ -93,33 +93,33 @@ class AuthorizationGraphTest "under normal conditions" should { "add simple" in { val graph = mkGraph - graph.add(nsk1k1) + graph.replace(nsk1k1) check(graph, key1, requireRoot = true, valid = true) check(graph, key1, requireRoot = false, valid = true) check(graph, key2, requireRoot = false, valid = false) } "support longer chains" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk2k1) - graph.add(nsk3k2) + graph.replace(nsk1k1) + graph.replace(nsk2k1) + graph.replace(nsk3k2) check(graph, key2, requireRoot = false, valid = true) check(graph, key3, requireRoot = false, valid = true) } "support removal" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk2k1) + graph.replace(nsk1k1) + graph.replace(nsk2k1) graph.remove(nsk2k1_remove) check(graph, key2, requireRoot = false, valid = false) check(graph, key1, requireRoot = false, valid = true) } "support breaking and re-creating chains" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk2k1) - graph.add(nsk3k2) + graph.replace(nsk1k1) + graph.replace(nsk2k1) + graph.replace(nsk3k2) check(graph, key2, requireRoot = false, valid = true) check(graph, key3, requireRoot = false, valid = true) loggerFactory.assertLogs( @@ -128,16 +128,16 @@ class AuthorizationGraphTest ) check(graph, key2, requireRoot = false, valid = false) check(graph, key3, requireRoot = false, valid = false) - graph.add(nsk2k1) + graph.replace(nsk2k1) check(graph, key3, requireRoot = false, valid = true) } "not support several chains" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk2k1) - graph.add(nsk3k2) + graph.replace(nsk1k1) + graph.replace(nsk2k1) + graph.replace(nsk3k2) check(graph, key3, requireRoot = false, valid = true) - graph.add(nsk3k1_nonRoot) + graph.replace(nsk3k1_nonRoot) check(graph, key3, requireRoot = false, valid = true) graph.remove(nsk3k1_nonRoot_remove) check(graph, key3, requireRoot = false, valid = false) @@ -145,14 +145,14 @@ class AuthorizationGraphTest "deal with cycles" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk2k1) - graph.add(nsk3k2) + graph.replace(nsk1k1) + graph.replace(nsk2k1) + graph.replace(nsk3k2) val danglingKeys = List(key2, key3).map(_.fingerprint).sorted.mkString(", ") loggerFactory.assertLogs( // this overwrites nsk2k1, leading to a break in the authorization chain for the now dangling k2 and k3 - graph.add(nsk2k3), + graph.replace(nsk2k3), _.warningMessage should (include regex s"dangling.*$danglingKeys"), ) check(graph, key1, requireRoot = false, valid = true) @@ -162,9 +162,9 @@ class AuthorizationGraphTest "deal with root revocations" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk2k1) - graph.add(nsk3k2) + graph.replace(nsk1k1) + graph.replace(nsk2k1) + graph.replace(nsk3k2) val danglingKeys = List(key1, key2, key3).map(_.fingerprint).sorted.mkString(", ") loggerFactory.assertLogs( @@ -178,8 +178,8 @@ class AuthorizationGraphTest "correctly distinguish on root delegations" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk3k1_nonRoot) + graph.replace(nsk1k1) + graph.replace(nsk3k1_nonRoot) check(graph, key1, requireRoot = true, valid = true) check(graph, key3, requireRoot = true, valid = false) check(graph, key3, requireRoot = false, valid = true) @@ -187,8 +187,8 @@ class AuthorizationGraphTest "deal with same mappings used twice" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk2k1) + graph.replace(nsk1k1) + graph.replace(nsk2k1) check(graph, key2, requireRoot = true, valid = true) // test that random key is not authorized check(graph, key3, requireRoot = false, valid = false) @@ -196,7 +196,7 @@ class AuthorizationGraphTest graph.remove(nsk2k1_remove) check(graph, key2, requireRoot = true, valid = false) // add other certificate (we don't remember removes, so we can do that in this test) - graph.add(nsk2k1) + graph.replace(nsk2k1) check(graph, key2, requireRoot = true, valid = true) } @@ -205,7 +205,7 @@ class AuthorizationGraphTest val fakeNs = Namespace(key8.fingerprint) val nsk1k1 = mkAdd(mkNs(fakeNs, key1, isRootDelegation = true), key1) loggerFactory.assertThrowsAndLogs[IllegalArgumentException]( - graph.add(nsk1k1), + graph.replace(nsk1k1), _.errorMessage should include("internal error"), ) } @@ -213,9 +213,9 @@ class AuthorizationGraphTest "test removal of transactions authorized with different keys" in { // can actually do it (add k2 with one key, remove k2 permission with another, but fail to remove it with the other is not valid) val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk2k1) - graph.add(nsk3k2) + graph.replace(nsk1k1) + graph.replace(nsk2k1) + graph.replace(nsk3k2) check(graph, key3, requireRoot = true, valid = true) graph.remove(replaceSignature(nsk3k2_remove, key1)) @@ -225,63 +225,46 @@ class AuthorizationGraphTest // tested elsewhere: an authorized transaction is rejected if the signature does not match the content or key "under adverse conditions" should { - "prevent an unauthorized key to authorize an addition" in { + "ensure that an unauthorized addition has no effect" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk3k2) shouldBe false + graph.replace(nsk1k1) + check(graph, key2, requireRoot = false, valid = false) + + loggerFactory.assertLogs( + graph.replace(nsk3k2), + _.warningMessage should (include regex s"${namespace} are dangling: .*${key3.fingerprint}"), + ) check(graph, key3, requireRoot = false, valid = false) } - "prevent an unauthorized key to authorize a removal" in { + + "process an unauthorized removal" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk2k1) - check(graph, key2, requireRoot = false, valid = true) + graph.replace(nsk1k1) + graph.replace(nsk2k1) + check(graph, key2, requireRoot = true, valid = true) val fakeRemove = replaceSignature(nsk2k1_remove, key6) - graph.remove(fakeRemove) shouldBe false - check(graph, key2, requireRoot = false, valid = true) - graph.remove(nsk2k1_remove) + check(graph, key6, requireRoot = false, valid = false) + graph.remove(fakeRemove) check(graph, key2, requireRoot = false, valid = false) } - "prevent a non-root authorization to authorize other namespace delegations" in { - val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk3k1_nonRoot) - check(graph, key3, requireRoot = false, valid = true) - graph.add(nsk4k3) shouldBe false - check(graph, key4, requireRoot = false, valid = false) - } - "prevent a non-root authorization to authorize other authorization when adding via unauthorizedAdd (restart scenario)" in { - /* This could happen in the following scenario: - 1. root -k1-> NSD(k2,root=true) -k2-> NSD(k3,root=true) - 2. downgrade to NSD(k3,root=false) - 3. downgrade to NSD(k2,root=false) - 4. restart the node - 5. upon restart, the NSDs from the topology store are added via unauthorizedAdd, which does not go through - a check that the NSDs were authorized by a root delegation - */ + "ensure that a non-root authorization does not authorize other namespace delegations" in { val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk3k1_nonRoot) + graph.replace(nsk1k1) + graph.replace(nsk3k1_nonRoot) check(graph, key3, requireRoot = false, valid = true) check(graph, key3, requireRoot = true, valid = false) // add a root delegation signed by k3 via unauthorized add loggerFactory.assertLogs( - graph.unauthorizedAdd(Seq(nsk4k3)), - _.warningMessage should (include regex s"${namespace} are dangling: .*${key4.fingerprint}"), + graph.replace(Seq(nsk4k3, nsk5k3_nonRoot)), + _.warningMessage should ((include regex s"${namespace} are dangling: .*${key4.fingerprint}") and (include regex s"${namespace} are dangling: .*${key5.fingerprint}")), ) check(graph, key4, requireRoot = false, valid = false) - - // add a non-root delegation signed by k3 via unauthorized add - loggerFactory.assertLogs( - graph.unauthorizedAdd(Seq(nsk5k3_nonRoot)), - _.warningMessage should (include regex s"${namespace} are dangling: .*${key5.fingerprint}"), - ) check(graph, key5, requireRoot = false, valid = false) } - "prevent downgrading to a non-root delegation to leak previous authorization" in { + "update authorizations when downgrading to non-root delegations" in { /* This could happen in the following scenario: 1. root -k1-> NSD(k2,root=true) -k2-> NSD(k3,root=true) 2. downgrade to NSD(k3,root=false) @@ -289,15 +272,15 @@ class AuthorizationGraphTest */ val graph = mkGraph - graph.add(nsk1k1) + graph.replace(nsk1k1) // first set up the root delegations and verify that they work - graph.add(nsk2k1) - graph.add(nsk3k2) + graph.replace(nsk2k1) + graph.replace(nsk3k2) check(graph, key2, requireRoot = true, valid = true) check(graph, key3, requireRoot = true, valid = true) // now downgrade in reverse order - graph.add(nsk3k2_nonRoot) + graph.replace(nsk3k2_nonRoot) check(graph, key2, requireRoot = true, valid = true) // key3 still has a non-root delegation check(graph, key3, requireRoot = false, valid = true) @@ -306,7 +289,7 @@ class AuthorizationGraphTest loggerFactory.assertLogs( // downgrading key2 to a non-root delegation breaks the authorization chain for key3 - graph.add(nsk2k1_nonRoot), + graph.replace(nsk2k1_nonRoot), _.warningMessage should (include regex s"${namespace} are dangling: .*${key3.fingerprint}"), ) // key2 only has a non-root delegation @@ -318,27 +301,16 @@ class AuthorizationGraphTest check(graph, key3, requireRoot = false, valid = false) } - "prevent a non-root authorization to authorize a removal" in { - val graph = mkGraph - graph.add(nsk1k1) - graph.add(nsk3k1_nonRoot) - graph.add(nsk2k1) - check(graph, key3, requireRoot = false, valid = true) - check(graph, key2, requireRoot = true, valid = true) - graph.remove(replaceSignature(nsk2k1_remove, key3)) shouldBe false - check(graph, key2, requireRoot = true, valid = true) - } - "ensure once a delegation is revoked, all depending authorizations will become unauthorized" in { val graph = mkGraph val nsk4k3 = mkAdd(mkNs(namespace, key4, isRootDelegation = true), key3) - val nsk5k2 = mkAdd(mkNs(namespace, key5, isRootDelegation = true), key3) - graph.add(nsk1k1) - graph.add(nsk2k1) - graph.add(nsk3k2) - graph.add(nsk4k3) - graph.add(nsk5k2) - Seq(key3, key4, key5).foreach(check(graph, _, requireRoot = false, valid = true)) + val nsk5k3 = mkAdd(mkNs(namespace, key5, isRootDelegation = true), key3) + graph.replace(nsk1k1) + graph.replace(nsk2k1) + graph.replace(nsk3k2) + graph.replace(nsk4k3) + graph.replace(nsk5k3) + Seq(key3, key4, key5).foreach(check(graph, _, requireRoot = true, valid = true)) loggerFactory.assertLogs( { graph.remove(nsk2k1_remove) diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala index 41b26a007bb9..c88dd9c6b8bf 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala @@ -53,16 +53,15 @@ class DecentralizedNamespaceAuthorizationGraphTest implicit class DecentralizedNamespaceAuthorizationGraphExtension( dns: DecentralizedNamespaceAuthorizationGraph ) { - def addAuth(authorizedNSD: AuthorizedNamespaceDelegation) = { - val found = dns.ownerGraphs.find(_.namespace == authorizedNSD.mapping.namespace) - found.exists(_.add(authorizedNSD)) - } - - def removeAuth(authorizedNSD: AuthorizedNamespaceDelegation) = + def addAuth(authorizedNSD: AuthorizedNamespaceDelegation): Unit = dns.ownerGraphs .find(_.namespace == authorizedNSD.mapping.namespace) - .exists(_.remove(authorizedNSD)) + .foreach(_.replace(authorizedNSD)) + def removeAuth(authorizedNSD: AuthorizedNamespaceDelegation): Unit = + dns.ownerGraphs + .find(_.namespace == authorizedNSD.mapping.namespace) + .foreach(_.remove(authorizedNSD)) } def mkAdd( @@ -136,9 +135,9 @@ class DecentralizedNamespaceAuthorizationGraphTest import fixture.factory.SigningKeys.* "work for a simple quorum" in { val graph = mkGraph - graph.addAuth(ns1k1k1) shouldBe true - graph.addAuth(ns2k2k2) shouldBe true - graph.addAuth(ns3k3k3) shouldBe true + graph.addAuth(ns1k1k1) + graph.addAuth(ns2k2k2) + graph.addAuth(ns3k3k3) // Individual keys are not enough for { diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorTest.scala index b58e6244f93b..1d41a535789f 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorTest.scala @@ -5,7 +5,6 @@ package com.digitalasset.canton.topology.processing import cats.Apply import cats.instances.list.* -import cats.syntax.foldable.* import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.SignatureCheckError.InvalidSignature @@ -17,6 +16,7 @@ import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{ NoDelegationFoundForKeys, NotAuthorized, } +import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction import com.digitalasset.canton.topology.store.* import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction @@ -28,6 +28,8 @@ import com.digitalasset.canton.{BaseTest, HasExecutionContext, ProtocolVersionCh import com.google.protobuf.ByteString import org.scalatest.wordspec.AsyncWordSpec +import scala.concurrent.Future + class IncomingTopologyTransactionAuthorizationValidatorTest extends AsyncWordSpec with BaseTest @@ -75,7 +77,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest toValidate: Seq[GenericSignedTopologyTransaction], inStore: Map[MappingHash, GenericSignedTopologyTransaction], expectFullAuthorization: Boolean, - )(implicit traceContext: TraceContext) = { + )(implicit traceContext: TraceContext): Future[Seq[GenericValidatedTopologyTransaction]] = MonadUtil .sequentialTraverse(toValidate)(tx => validator.validateAndUpdateHeadAuthState( @@ -85,11 +87,6 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization, ) ) - .map { results => - val (aggregations, transactions) = results.unzip - (aggregations.combineAll, transactions) - } - } "receiving transactions with signatures" should { "succeed to add if the signature is valid" in { @@ -104,7 +101,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = true, ) } yield { - check(res._2, Seq(None, None)) + check(res, Seq(None, None)) } } "fail to add if the signature is invalid" in { @@ -112,7 +109,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest import Factory.* val invalid = ns1k2_k1.copy(signatures = ns1k1_k1.signatures) for { - (_, validatedTopologyTransactions) <- validate( + validatedTopologyTransactions <- validate( validator, ts(0), List(ns1k1_k1, invalid), @@ -140,7 +137,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest val okmS1k7_k1_missing_k7 = okmS1k7_k1.removeSignatures(Set(SigningKeys.key7.fingerprint)).value for { - (_, validatedTopologyTransactions) <- validate( + validatedTopologyTransactions <- validate( validator, ts(0), List(ns1k1_k1, okmS1k7_k1_missing_k7), @@ -167,8 +164,6 @@ class IncomingTopologyTransactionAuthorizationValidatorTest DomainTrustCertificate( pid, wrongDomain, - false, - Seq.empty, ), Factory.SigningKeys.key1, ) @@ -182,7 +177,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ) } yield { check( - res._2, + res, Seq( None, Some({ @@ -256,7 +251,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = false, ) } yield { - result._2.loneElement.rejectionReason shouldBe None + result.loneElement.rejectionReason shouldBe None } } } @@ -274,7 +269,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = true, ) } yield { - check(res._2, Seq(None, None, None)) + check(res, Seq(None, None, None)) } } "fail if the signature of a root certificate is not valid" in { @@ -297,7 +292,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ) } yield { check( - res._2, + res, Seq( Some({ case TopologyTransactionRejection.SignatureCheckFailed( @@ -324,7 +319,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ) } yield { check( - res._2, + res, Seq( None, Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key6.fingerprint))), @@ -356,7 +351,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = true, ) } yield { - check(res._2, Seq(None, None)) + check(res, Seq(None, None)) } } @@ -374,7 +369,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest } yield { check( - res._2, + res, Seq( None, Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))), @@ -402,7 +397,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = true, ) } yield { - check(res._2, Seq(None, None, None, None)) + check(res, Seq(None, None, None, None)) } } "fail if transaction is not properly authorized" in { @@ -418,7 +413,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ) } yield { check( - res._2, + res, Seq( Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key1.fingerprint))), None, @@ -444,7 +439,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = true, ) } yield { - check(res._2, Seq(None, None, None, None, None, None, None)) + check(res, Seq(None, None, None, None, None, None, None)) } } "fail if transaction is not properly authorized" in { @@ -469,7 +464,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest } yield { check( - resultExpectFullAuthorization._2, + resultExpectFullAuthorization, Seq( None, Some(_ == NotAuthorized), @@ -478,7 +473,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ) check( - resultDontExpectFullAuthorization._2, + resultDontExpectFullAuthorization, Seq( None, Some(_ == NotAuthorized), @@ -509,7 +504,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = true, ) } yield { - check(res._2, Seq(None, None, None)) + check(res, Seq(None, None, None)) } } } @@ -529,7 +524,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = true, ) } yield { - check(res._2, Seq(None, None, None, None, None)) + check(res, Seq(None, None, None, None, None)) } } @@ -548,7 +543,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ) } yield { check( - res._2, + res, Seq( None, None, @@ -564,141 +559,6 @@ class IncomingTopologyTransactionAuthorizationValidatorTest } - "correctly determine cascading update for" should { - "namespace additions" in { - val store = - new InMemoryTopologyStore(TopologyStoreId.AuthorizedStore, loggerFactory, timeouts) - val validator = mk(store) - import Factory.* - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = List(ns6k6_k6).map(ValidatedTopologyTransaction(_)), - ) - res <- validate( - validator, - ts(1), - List(ns1k1_k1, okm1bk5k1E_k1, p1p6_k6), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - res._1.cascadingNamespaces shouldBe Set(ns1) - } - } - - "namespace removals" in { - val store = - new InMemoryTopologyStore(TopologyStoreId.AuthorizedStore, loggerFactory, timeouts) - val validator = mk(store) - import Factory.* - val Rns1k1_k1 = mkTrans(ns1k1_k1.transaction.reverse) - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)), - ) - res <- validate( - validator, - ts(1), - List(Rns1k1_k1, okm1bk5k1E_k1), - Map(Rns1k1_k1.mapping.uniqueKey -> ns1k1_k1), - expectFullAuthorization = true, - ) - } yield { - check( - res._2, - Seq(None, Some(_ == NotAuthorized)), - ) - res._1.cascadingNamespaces shouldBe Set(ns1) - } - } - - "identifier additions and removals" in { - val store = - new InMemoryTopologyStore(TopologyStoreId.AuthorizedStore, loggerFactory, timeouts) - val validator = mk(store) - import Factory.* - val Rid1ak4_k1 = mkTrans(id1ak4_k1.transaction.reverse) - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)), - ) - res <- validate( - validator, - ts(1), - List(id1ak4_k1), - Map.empty, - expectFullAuthorization = true, - ) - res2 <- validate( - validator, - ts(2), - List(Rid1ak4_k1), - Map.empty, - expectFullAuthorization = true, - ) - } yield { - res._1.cascadingNamespaces shouldBe Set() - res._1.cascadingUids shouldBe Set(uid1a) - res2._1.cascadingUids shouldBe Set(uid1a) - } - } - - "cascading invalidation pre-existing identifier uids" in { - val store = - new InMemoryTopologyStore(TopologyStoreId.AuthorizedStore, loggerFactory, timeouts) - val validator = mk(store) - import Factory.* - import Factory.SigningKeys.{ec as _, *} - // scenario: we have id1ak4_k2 previously loaded. now we get a removal on k2. we need to ensure that - // nothing can be added by k4 - val Rns1k2_k1 = mkTrans(ns1k2_k1.transaction.reverse) - val id6ak7_k6 = mkAdd(IdentifierDelegation(uid6, key7), key6) - for { - _ <- store.update( - SequencedTime(ts(0)), - EffectiveTime(ts(0)), - removeMapping = Map.empty, - removeTxs = Set.empty, - additions = - List(ns1k1_k1, ns1k2_k1, id1ak4_k2, ns6k6_k6).map(ValidatedTopologyTransaction(_)), - ) - res <- validate( - validator, - ts(1), - List(p1p6_k2k6, Rns1k2_k1, id6ak7_k6, p1p6_k2), - Map( - ns1k2_k1.mapping.uniqueKey -> ns1k2_k1 - ), - expectFullAuthorization = true, - ) - } yield { - check( - res._2, - Seq( - None, - None, - None, - Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))), - ), - ) - res._1.cascadingNamespaces shouldBe Set(ns1) - res._1.filteredCascadingUids shouldBe Set(uid6) - } - } - } - "observing PartyToParticipant mappings" should { "allow participants to unilaterally disassociate themselves from parties" in { val store = @@ -810,11 +670,11 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = false, ) } yield { - check(hostingResult._2, Seq(None)) - check(unhostingResult._2, Seq(None)) - check(unhostingMixedResult._2, Seq(None)) + check(hostingResult, Seq(None)) + check(unhostingResult, Seq(None)) + check(unhostingMixedResult, Seq(None)) check( - unhostingAndThresholdChangeResult._2, + unhostingAndThresholdChangeResult, Seq(Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint)))), ) } @@ -847,7 +707,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = false, ) } yield { - check(res._2, Seq(None)) + check(res, Seq(None)) } } @@ -890,7 +750,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization = true, ) } yield { - check(res._2, Seq(None)) + check(res, Seq(None)) } } @@ -908,13 +768,13 @@ class IncomingTopologyTransactionAuthorizationValidatorTest Map.empty, expectFullAuthorization = true, ) - _ = resultAddOwners._2.foreach(_.rejectionReason shouldBe None) + _ = resultAddOwners.foreach(_.rejectionReason shouldBe None) _ <- store.update( SequencedTime(ts(0)), EffectiveTime(ts(0)), removeMapping = Map.empty, removeTxs = Set.empty, - additions = resultAddOwners._2, + additions = resultAddOwners, ) // 2. validate and store the decentralized namespace definition @@ -926,13 +786,13 @@ class IncomingTopologyTransactionAuthorizationValidatorTest Map.empty, expectFullAuthorization = true, ) - _ = resultAddDND._2.foreach(_.rejectionReason shouldBe None) + _ = resultAddDND.foreach(_.rejectionReason shouldBe None) _ <- store.update( SequencedTime(ts(1)), EffectiveTime(ts(1)), removeMapping = Map.empty, removeTxs = Set.empty, - additions = resultAddDND._2, + additions = resultAddDND, ) // 3. now process the removal of the decentralized namespace definition @@ -944,13 +804,13 @@ class IncomingTopologyTransactionAuthorizationValidatorTest Map(dns1.mapping.uniqueKey -> dns1), expectFullAuthorization = true, ) - _ = resRemoveDND._2.foreach(_.rejectionReason shouldBe None) + _ = resRemoveDND.foreach(_.rejectionReason shouldBe None) _ <- store.update( SequencedTime(ts(2)), EffectiveTime(ts(2)), removeMapping = Map(dns1Removal.mapping.uniqueKey -> dns1Removal.serial), removeTxs = Set.empty, - additions = resRemoveDND._2, + additions = resRemoveDND, ) // 4. Now to the actual test: try to authorize something for the decentralized namespace. @@ -965,7 +825,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ) } yield { check( - resultUnauthorizedIDD._2, + resultUnauthorizedIDD, Seq( Some( _ == NoDelegationFoundForKeys( @@ -1031,7 +891,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ) } yield { - val validatedPkgTx = result_packageVetting._2.loneElement + val validatedPkgTx = result_packageVetting.loneElement validatedPkgTx.rejectionReason shouldBe None withClue("package transaction is proposal")( @@ -1111,7 +971,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ) } yield { - val validatedPkgTx = resultPackageVetting._2.loneElement + val validatedPkgTx = resultPackageVetting.loneElement val signatures = validatedPkgTx.transaction.signatures validatedPkgTx.rejectionReason shouldBe None @@ -1119,7 +979,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest _.id )) - resultOnlySuperfluousSignatures._2.loneElement.rejectionReason shouldBe Some( + resultOnlySuperfluousSignatures.loneElement.rejectionReason shouldBe Some( TopologyTransactionRejection.NoDelegationFoundForKeys(Set(key3.id, key5.id)) ) } @@ -1159,21 +1019,22 @@ class IncomingTopologyTransactionAuthorizationValidatorTest isProposal: Boolean, expectFullAuthorization: Boolean, signingKeys: SigningPublicKey* - ) = TraceContext.withNewTraceContext { freshTraceContext => - validate( - validator, - ts(1), - toValidate = List( - mkTrans( - pkgTx, - isProposal = isProposal, - signingKeys = NonEmpty.from(signingKeys.toSet).value, - ) - ), - inStore = Map.empty, - expectFullAuthorization = expectFullAuthorization, - )(freshTraceContext) - .map(_._2.loneElement) + ): Future[GenericValidatedTopologyTransaction] = TraceContext.withNewTraceContext { + freshTraceContext => + validate( + validator, + ts(1), + toValidate = List( + mkTrans( + pkgTx, + isProposal = isProposal, + signingKeys = NonEmpty.from(signingKeys.toSet).value, + ) + ), + inStore = Map.empty, + expectFullAuthorization = expectFullAuthorization, + )(freshTraceContext) + .map(_.loneElement) } for { @@ -1214,7 +1075,7 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ) // try with 2/3 signatures - key1_key8_notAuthorized <- MonadUtil.sequentialTraverse(combinationsThatAreNotAuthorized) { + _ <- MonadUtil.sequentialTraverse(combinationsThatAreNotAuthorized) { case (isProposal, expectFullAuthorization) => clueF( s"key1, key8: isProposal=$isProposal, expectFullAuthorization=$expectFullAuthorization" diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala index 7a2d7053ad35..b722253ef159 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala @@ -94,7 +94,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc: } val dtcp1_k1 = - mkAdd(DomainTrustCertificate(participant1, DomainId(uid1a), false, Seq.empty), key1) + mkAdd(DomainTrustCertificate(participant1, DomainId(uid1a)), key1) val defaultDomainParameters = TestDomainParameters.defaultDynamic @@ -227,7 +227,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc: ) val decentralizedNamespaceOwners = List(ns1k1_k1, ns8k8_k8, ns9k9_k9) val decentralizedNamespaceWithMultipleOwnerThreshold = - List(ns1k1_k1, ns8k8_k8, ns9k9_k9, ns7k7_k7, dns1) + List(ns1k1_k1, ns8k8_k8, ns9k9_k9, dns1) private val dndOwners = NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)) diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala index 33f91f4baccf..eb7b6b280939 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala @@ -128,16 +128,12 @@ class TopologyStoreTestData( DomainTrustCertificate( participantId2, domainId1, - transferOnlyToGivenTargetDomains = false, - targetDomains = Seq.empty, ) ) val tx6_DTC_Update = makeSignedTx( DomainTrustCertificate( participantId2, domainId1, - transferOnlyToGivenTargetDomains = true, - targetDomains = Seq(domainId1), ), serial = PositiveInt.tryCreate(2), ) diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala index e97fbc034fb9..c69d8c907a9f 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala @@ -11,10 +11,7 @@ import com.digitalasset.canton.topology.DefaultTestIdentities.{mediatorId, seque import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore -import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{ - InvalidTopologyMapping, - PartyExceedsHostingLimit, -} +import com.digitalasset.canton.topology.store.TopologyTransactionRejection.InvalidTopologyMapping import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore import com.digitalasset.canton.topology.store.{ StoredTopologyTransaction, @@ -288,38 +285,63 @@ class ValidatingTopologyMappingChecksTest } } - "reject when the party exceeds the explicitly issued PartyHostingLimits" in { - def mkPTP(numParticipants: Int) = { - val hostingParticipants = Seq[HostingParticipant]( - participant1 -> Observation, - participant2 -> Submission, - participant3 -> Submission, - ) - factory.mkAdd( - PartyToParticipant.tryCreate( - partyId = party1, + "handle conflicts between partyId and existing admin parties from domain trust certificates" in { + // the defaults below are a valid explicit admin party allocation for participant1.adminParty + def mkPTP( + partyId: PartyId = participant1.adminParty, + participants: Seq[HostingParticipant] = + Seq(HostingParticipant(participant1, Submission)), + groupdAddressing: Boolean = false, + ) = factory.mkAdd( + PartyToParticipant + .create( + partyId = partyId, domainId = None, threshold = PositiveInt.one, - participants = hostingParticipants.take(numParticipants), - groupAddressing = false, + participants = participants, + groupAddressing = groupdAddressing, ) - ) - } + .value + ) val (checks, store) = mk() - val limits = factory.mkAdd(PartyHostingLimits(domainId, party1, 2)) - addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc, p3_otk, p3_dtc, limits) + addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc) + + // handle the happy case + checkTransaction(checks, mkPTP()) shouldBe Right(()) - // 2 participants are at the limit - val twoParticipants = mkPTP(numParticipants = 2) - checkTransaction(checks, twoParticipants) shouldBe Right(()) + // unhappy scenarios + val invalidParticipantPermission = Seq( + mkPTP(participants = Seq(HostingParticipant(participant1, Confirmation))), + mkPTP(participants = Seq(HostingParticipant(participant1, Observation))), + ) + + val invalidNumberOfHostingParticipants = mkPTP(participants = + Seq( + HostingParticipant(participant1, Submission), + HostingParticipant(participant2, Submission), + ) + ) + + val foreignParticipant = + mkPTP(participants = Seq(HostingParticipant(participant2, Submission))) + + val invalidGroupAddressing = mkPTP(groupdAddressing = true) + + // we don't need to explicitly check threshold > 1, because we already reject the PTP if participants.size > 1 + // and the threshold can never be higher than the number of participants - // 3 participants exceed the limit imposed by the domain - val threeParticipants = mkPTP(numParticipants = 3) - checkTransaction(checks, threeParticipants) shouldBe Left( - PartyExceedsHostingLimit(party1, 2, 3) + val unhappyCases = invalidParticipantPermission ++ Seq( + foreignParticipant, + invalidNumberOfHostingParticipants, + invalidGroupAddressing, ) + forAll(unhappyCases)(ptp => + checkTransaction(checks, ptp) shouldBe Left( + TopologyTransactionRejection.PartyIdConflictWithAdminParty(ptp.mapping.partyId) + ) + ) } "report no errors for valid mappings" in { @@ -370,22 +392,74 @@ class ValidatingTopologyMappingChecksTest store, ptp, ) - val prior = factory.mkAdd(DomainTrustCertificate(participant1, domainId, false, Seq.empty)) + val prior = factory.mkAdd(DomainTrustCertificate(participant1, domainId)) val dtc = - factory.mkRemove(DomainTrustCertificate(participant1, domainId, false, Seq.empty)) + factory.mkRemove(DomainTrustCertificate(participant1, domainId)) checkTransaction(checks, dtc, Some(prior)) shouldBe Left( TopologyTransactionRejection.ParticipantStillHostsParties(participant1, Seq(party1)) ) + } + + "handle conflicts with existing party allocations" in { + val explicitAdminPartyParticipant1 = factory.mkAdd( + PartyToParticipant + .create( + partyId = participant1.adminParty, + domainId = None, + threshold = PositiveInt.one, + participants = Seq(HostingParticipant(participant1, Submission)), + groupAddressing = false, + ) + .value + ) + + // we allocate a party with participant2's UID on participant1. + // this is not an explicit admin party allocation, the party just so happens to use the same UID as participant2. + val partyWithParticipant2Uid = factory.mkAdd( + PartyToParticipant + .create( + partyId = participant2.adminParty, + domainId = None, + threshold = PositiveInt.one, + participants = Seq(HostingParticipant(participant1, Submission)), + groupAddressing = false, + ) + .value + ) + + val dop = factory.mkAdd( + DomainParametersState( + domainId, + DynamicDomainParameters.defaultValues(testedProtocolVersion), + ) + ) + + val (checks, store) = mk() + + // normally it's not possible to have a valid PTP without an already existing DTC of the hosting participants. + // but let's pretend for this check. + addToStore(store, dop, explicitAdminPartyParticipant1, partyWithParticipant2Uid) + // happy case: we allow the DTC (either a creation or modifying an existing one) + // if there is a valid explicit admin party allocation + checkTransaction(checks, p1_dtc, None) shouldBe Right(()) + + // unhappy case: there already exists a normal party allocation with the same UID + checkTransaction(checks, p2_dtc, None) shouldBe Left( + TopologyTransactionRejection.ParticipantIdConflictWithPartyId( + participant2, + partyWithParticipant2Uid.mapping.partyId, + ) + ) } "reject the addition if the domain is locked" in { Seq(OnboardingRestriction.RestrictedLocked, OnboardingRestriction.UnrestrictedLocked) .foreach { restriction => val (checks, store) = mk() - val ptp = factory.mkAdd( + val dop = factory.mkAdd( DomainParametersState( domainId, DynamicDomainParameters @@ -393,10 +467,10 @@ class ValidatingTopologyMappingChecksTest .tryUpdate(onboardingRestriction = restriction), ) ) - addToStore(store, ptp) + addToStore(store, dop) val dtc = - factory.mkAdd(DomainTrustCertificate(participant1, domainId, false, Seq.empty)) + factory.mkAdd(DomainTrustCertificate(participant1, domainId)) checkTransaction(checks, dtc) shouldBe Left( TopologyTransactionRejection.OnboardingRestrictionInPlace( @@ -410,7 +484,7 @@ class ValidatingTopologyMappingChecksTest "reject the addition if the domain is restricted" in { val (checks, store) = mk() - val ptp = factory.mkAdd( + val dop = factory.mkAdd( DomainParametersState( domainId, DynamicDomainParameters @@ -420,7 +494,7 @@ class ValidatingTopologyMappingChecksTest ) addToStore( store, - ptp, + dop, factory.mkAdd( ParticipantDomainPermission( domainId, @@ -435,7 +509,7 @@ class ValidatingTopologyMappingChecksTest // participant2 does not have permission from the domain to join checkTransaction( checks, - factory.mkAdd(DomainTrustCertificate(participant2, domainId, false, Seq.empty)), + factory.mkAdd(DomainTrustCertificate(participant2, domainId)), ) shouldBe Left( TopologyTransactionRejection.OnboardingRestrictionInPlace( participant2, @@ -447,7 +521,7 @@ class ValidatingTopologyMappingChecksTest // participant1 has been permissioned by the domain checkTransaction( checks, - factory.mkAdd(DomainTrustCertificate(participant1, domainId, false, Seq.empty)), + factory.mkAdd(DomainTrustCertificate(participant1, domainId)), None, ) shouldBe Right(()) } diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/traffic/TrafficPurchasedSubmissionHandlerTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/traffic/TrafficPurchasedSubmissionHandlerTest.scala index ef34768ef1f5..41569b20b3e2 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/traffic/TrafficPurchasedSubmissionHandlerTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/traffic/TrafficPurchasedSubmissionHandlerTest.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.traffic import cats.data.EitherT +import com.daml.metrics.api.MetricsContext import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} import com.digitalasset.canton.data.CantonTimestamp @@ -95,7 +96,7 @@ class TrafficPurchasedSubmissionHandlerTest aggregationRuleCapture.capture(), callbackCapture.capture(), any[Boolean], - )(any[TraceContext]) + )(any[TraceContext], any[MetricsContext]) ).thenReturn(EitherT.pure(())) val resultF = handler @@ -179,7 +180,7 @@ class TrafficPurchasedSubmissionHandlerTest any[Option[AggregationRule]], callbackCapture.capture(), any[Boolean], - )(any[TraceContext]) + )(any[TraceContext], any[MetricsContext]) ).thenReturn(EitherT.pure(())) val resultF = handler @@ -234,7 +235,7 @@ class TrafficPurchasedSubmissionHandlerTest any[Option[AggregationRule]], any[SendCallback], any[Boolean], - )(any[TraceContext]) + )(any[TraceContext], any[MetricsContext]) ) .thenReturn(EitherT.leftT(SendAsyncClientError.RequestFailed("failed"))) @@ -270,7 +271,7 @@ class TrafficPurchasedSubmissionHandlerTest any[Option[AggregationRule]], callbackCapture.capture(), any[Boolean], - )(any[TraceContext]) + )(any[TraceContext], any[MetricsContext]) ) .thenReturn(EitherT.pure(())) @@ -323,7 +324,7 @@ class TrafficPurchasedSubmissionHandlerTest any[Option[AggregationRule]], callbackCapture.capture(), any[Boolean], - )(any[TraceContext]) + )(any[TraceContext], any[MetricsContext]) ) .thenReturn(EitherT.pure(())) clearInvocations(domainTimeTracker) diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/util/retry/PolicyTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/util/retry/PolicyTest.scala index 92695073632b..c38551de9aae 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/util/retry/PolicyTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/util/retry/PolicyTest.scala @@ -17,13 +17,8 @@ import com.digitalasset.canton.lifecycle.{ import com.digitalasset.canton.logging.{SuppressionRule, TracedLogger} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.retry.ErrorKind.TransientErrorKind import com.digitalasset.canton.util.retry.Jitter.RandomSource -import com.digitalasset.canton.util.retry.RetryUtil.{ - AllExnRetryable, - DbExceptionRetryable, - ExceptionRetryable, - NoExnRetryable, -} import com.digitalasset.canton.util.{DelayUtil, FutureUtil} import com.digitalasset.canton.{BaseTest, HasExecutorService} import org.scalatest.funspec.AsyncFunSpec @@ -33,7 +28,7 @@ import java.util.Random import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicLong, AtomicReference} import scala.concurrent.duration.* import scala.concurrent.{Await, ExecutionContext, Future} -import scala.util.{Failure, Success as TrySuccess, Try} +import scala.util.{Failure, Success as TrySuccess} class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { @@ -62,7 +57,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { it("should retry a future for a specified number of times") { implicit val success: Success[Int] = Success(_ == 3) val tries = forwardCountingFutureStream().iterator - Directly(logger, flagCloseable, 3, "op")(tries.next(), AllExnRetryable).map(result => + Directly(logger, flagCloseable, 3, "op")(tries.next(), AllExceptionRetryPolicy).map(result => assert(success.predicate(result) === true) ) } @@ -70,7 +65,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { it("should fail when expected") { val success = implicitly[Success[Option[Int]]] val tries = Future(None: Option[Int]) - Directly(logger, flagCloseable, 2, "op")(tries, AllExnRetryable).map(result => + Directly(logger, flagCloseable, 2, "op")(tries, AllExceptionRetryPolicy).map(result => assert(success.predicate(result) === false) ) } @@ -84,7 +79,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { counter.incrementAndGet() Future.failed(new RuntimeException("always failing")) }, - AllExnRetryable, + AllExceptionRetryPolicy, ) // expect failure after 1+3 tries future.failed.map { t => @@ -102,7 +97,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { counter.incrementAndGet() Future.failed(new RuntimeException("always failing")) }, - AllExnRetryable, + AllExceptionRetryPolicy, ) future.failed.map(t => assert(counter.get() === 2 && t.getMessage === "always failing")) } @@ -117,7 +112,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { case _ => Future.failed(new RuntimeException("failed")) } }, - AllExnRetryable, + AllExceptionRetryPolicy, ) future.map(result => assert(counter.get() === 2 && result === "yay!")) } @@ -136,7 +131,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { } val policy = Directly(logger, flagCloseable, Forever, "op") - policy(run(), AllExnRetryable).map { result => + policy(run(), AllExceptionRetryPolicy).map { result => assert(result === true) assert(retried.get() == retriedUntilSuccess) } @@ -170,7 +165,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { { marker.set(System.currentTimeMillis); tries.next() }, - AllExnRetryable, + AllExceptionRetryPolicy, ) runF.map { result => val delta = marker.get() - marker_base @@ -195,7 +190,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { } val policy = Pause(logger, flagCloseable, Forever, 1.millis, "op") - policy(run(), AllExnRetryable).map { result => + policy(run(), AllExceptionRetryPolicy).map { result => assert(result === true) assert(retried.get() == retriedUntilSuccess) } @@ -237,7 +232,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { { marker.set(System.currentTimeMillis); tries.next() }, - AllExnRetryable, + AllExceptionRetryPolicy, ) runF.map { result => val delta = marker.get() - marker_base @@ -262,7 +257,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { } val policy = Backoff(logger, flagCloseable, Forever, 1.millis, Duration.Inf, "op") - policy(run(), AllExnRetryable).map { result => + policy(run(), AllExceptionRetryPolicy).map { result => assert(result === true) assert(retried.get() == 5) } @@ -309,7 +304,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { implicit val algo: Jitter = algoCreator(10.millis) val tries = forwardCountingFutureStream().iterator val policy = Backoff(logger, flagCloseable, 3, 1.milli, Duration.Inf, "op") - policy(tries.next(), AllExnRetryable).map(result => + policy(tries.next(), AllExceptionRetryPolicy).map(result => assert(success.predicate(result) === true) ) } @@ -319,7 +314,9 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { val success = implicitly[Success[Option[Int]]] val tries = Future(None: Option[Int]) val policy = Backoff(logger, flagCloseable, 3, 1.milli, Duration.Inf, "op") - policy(tries, AllExnRetryable).map(result => assert(success.predicate(result) === false)) + policy(tries, AllExceptionRetryPolicy).map(result => + assert(success.predicate(result) === false) + ) } it("should deal with future failures") { @@ -332,7 +329,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { counter.incrementAndGet() Future.failed(new RuntimeException("always failing")) }, - AllExnRetryable, + AllExceptionRetryPolicy, ) future.failed.map(t => assert(counter.get() === 4 && t.getMessage === "always failing")) } @@ -349,7 +346,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { case _ => Future.failed(new RuntimeException("failed")) } }, - AllExnRetryable, + AllExceptionRetryPolicy, ) future.map(result => assert(counter.get() == 2 && result === "yay!")) } @@ -366,7 +363,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { { marker.set(System.currentTimeMillis); tries.next() }, - AllExnRetryable, + AllExceptionRetryPolicy, ).map { result => val delta = marker.get() - marker_base assert( @@ -388,7 +385,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { { marker.set(System.currentTimeMillis); tries.next() }, - AllExnRetryable, + AllExceptionRetryPolicy, ).map { result => val delta = marker.get() - marker_base assert( @@ -413,7 +410,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { } val policy = Backoff(logger, flagCloseable, Forever, 1.millis, Duration.Inf, "op") - policy(run(), AllExnRetryable).map { result => + policy(run(), AllExceptionRetryPolicy).map { result => assert(result === true) assert(retried.get() == retriedUntilSuccess) } @@ -450,7 +447,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { ) }, ) - val future = policy(tries.next(), AllExnRetryable) + val future = policy(tries.next(), AllExceptionRetryPolicy) future.map(result => assert(success.predicate(result) === true)) } @@ -466,7 +463,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { }, ) - val future = policy(tries.next(), AllExnRetryable) + val future = policy(tries.next(), AllExceptionRetryPolicy) future.map(result => assert(success.predicate(result) === false)) } @@ -489,7 +486,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { case RetryAfter(duration) => Pause(logger, flagCloseable, 4, delay = duration, "op") }, ) - policy(run(), AllExnRetryable).map(result => assert(result === true)) + policy(run(), AllExceptionRetryPolicy).map(result => assert(result === true)) } it("should handle synchronous failures") { @@ -511,7 +508,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { case RetryAfter(duration) => Pause(logger, flagCloseable, 4, delay = duration, "op") }, ) - policy(run(), AllExnRetryable).map(result => assert(result === true)) + policy(run(), AllExceptionRetryPolicy).map(result => assert(result === true)) } it("should repeat on failure until success") { @@ -535,7 +532,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { case _: MyException => Directly(logger, flagCloseable, Forever, "op") }, ) - policy(run(), AllExnRetryable).map { result => + policy(run(), AllExceptionRetryPolicy).map { result => assert(result === true) assert(retried.get() == retriedUntilSuccess) } @@ -562,7 +559,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { case _: MyException => Pause(logger, flagCloseable, Forever, 1.millis, "op") }, ) - policy(run(), AllExnRetryable).map { result => + policy(run(), AllExceptionRetryPolicy).map { result => assert(result === true) assert(retried.get() == retriedUntilSuccess) } @@ -591,7 +588,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { Backoff(logger, flagCloseable, Forever, 1.millis, Duration.Inf, "op") }, ) - policy(run(), AllExnRetryable).map { result => + policy(run(), AllExceptionRetryPolicy).map { result => assert(result === true) assert(retried.get() == 5) } @@ -619,7 +616,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { Backoff(logger, flagCloseable, Forever, 1.millis, Duration.Inf, "op") }, ) - policy(run(), AllExnRetryable).map { result => + policy(run(), AllExceptionRetryPolicy).map { result => assert(result === true) assert(retried.get() == 10) } @@ -640,7 +637,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { counter.incrementAndGet() Future.failed(new RuntimeException(s"unexpected problem")) }, - DbExceptionRetryable, + NoExceptionRetryPolicy, ) future.failed.map(t => assert(counter.get() === 1 && t.getMessage === "unexpected problem")) } @@ -655,7 +652,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { counter.incrementAndGet() throw new RuntimeException("always failing") }, - AllExnRetryable, + AllExceptionRetryPolicy, ) // expect failure after 1+maxRetries tries future.failed.map { t => @@ -686,7 +683,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { Future.successful(incr) } - val result = policy(closeable)(run(), AllExnRetryable)( + val result = policy(closeable)(run(), AllExceptionRetryPolicy)( success, executorService, traceContext, @@ -711,7 +708,11 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { val retryF = { implicit val executionContext: ExecutionContext = executorService - policy(closeable)(run(), AllExnRetryable)(Success.never, executorService, traceContext) + policy(closeable)(run(), AllExceptionRetryPolicy)( + Success.never, + executorService, + traceContext, + ) .thereafter { count => logger.debug(s"Stopped retry after $count") } @@ -751,7 +752,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { try { FutureUtil.doNotAwait( // This future probably never completes because we are likely to close the execution context during a `Delay` - policy(closeable)(run(), AllExnRetryable)(Success.never, closeableEc, implicitly), + policy(closeable)(run(), AllExceptionRetryPolicy)(Success.never, closeableEc, implicitly), "retrying forever until the execution context closes", ) @@ -784,7 +785,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { } } - policy(flagCloseable1).unlessShutdown(run(), AllExnRetryable).unwrap.map { result => + policy(flagCloseable1).unlessShutdown(run(), AllExceptionRetryPolicy).unwrap.map { result => result shouldBe AbortedDueToShutdown retried.get() shouldBe retriedUntilShutdown } @@ -814,7 +815,7 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { } val policy = mkPolicy(maxRetries)(Eval.always(suspend.get())) - policy.apply(run(), NoExnRetryable).map { _ => + policy.apply(run(), NoExceptionRetryPolicy).map { _ => retried.get() shouldBe maxRetries + 3 } } @@ -827,17 +828,21 @@ class PolicyTest extends AsyncFunSpec with BaseTest with HasExecutorService { case class TestException() extends RuntimeException("test exception") - val retryable = new ExceptionRetryable() { - override def retryOK( - outcome: Try[_], + val retryable = new ExceptionRetryPolicy() { + + override protected def determineExceptionErrorKind( + exception: Throwable, logger: TracedLogger, - lastErrorKind: Option[RetryUtil.ErrorKind], - )(implicit tc: TraceContext): RetryUtil.ErrorKind = RetryUtil.TransientErrorKind + )(implicit + tc: TraceContext + ): ErrorKind = + TransientErrorKind() override def retryLogLevel(e: Throwable): Option[Level] = e match { case TestException() => Some(Level.WARN) case _ => None } + } loggerFactory diff --git a/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml b/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml index 9bdde4318697..b2048b1d81af 100644 --- a/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --target=2.1 name: ai-analysis diff --git a/sdk/canton/community/demo/src/main/daml/bank/daml.yaml b/sdk/canton/community/demo/src/main/daml/bank/daml.yaml index 3600d7aa88ae..2478cbc4ae1b 100644 --- a/sdk/canton/community/demo/src/main/daml/bank/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/bank/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --target=2.1 name: bank diff --git a/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml b/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml index a09641d67353..7d052c3f692d 100644 --- a/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --target=2.1 name: doctor diff --git a/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml b/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml index 507e8cb1b6b7..c7445d5ec7dd 100644 --- a/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --target=2.1 name: health-insurance diff --git a/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml b/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml index 3568f68060a2..8902150b74c5 100644 --- a/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --target=2.1 name: medical-records diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala index bb550912d3a6..a0f2811b3cc9 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala @@ -233,7 +233,9 @@ private[update] class TrafficControlValidator( metricsContext: MetricsContext, ): Unit = { receipt.map(_.consumedCost.value).foreach { cost => - metrics.trafficControl.eventDelivered.mark(cost)(metricsContext) + metrics.trafficControl.trafficConsumption.trafficCostOfDeliveredSequencedEvent + .mark(cost)(metricsContext) + metrics.trafficControl.trafficConsumption.deliveredEventCounter.inc()(metricsContext) } } @@ -245,17 +247,18 @@ private[update] class TrafficControlValidator( )(implicit traceContext: TraceContext): Unit = { val costO = receipt.map(_.consumedCost.value) val messageId = signedOrderingRequest.submissionRequest.messageId - val sequencerFingerprint = signedOrderingRequest.signature.signedBy + val sequencerId = signedOrderingRequest.content.sequencerId.member val sender = signedOrderingRequest.submissionRequest.sender // Note that the fingerprint of the submitting sequencer is not validated yet by the driver layer // So it does not protect against malicious sequencers, only notifies when honest sequencers let requests to be sequenced // which end up being invalidated on the read path logger.debug( - s"Wasted traffic cost${costO.map(c => s" (cost = $c)").getOrElse("")} for messageId $messageId accepted by sequencer $sequencerFingerprint from sender $sender." + s"Wasted traffic cost${costO.map(c => s" (cost = $c)").getOrElse("")} for messageId $messageId accepted by sequencer $sequencerId from sender $sender." ) costO.foreach { cost => metrics.trafficControl.wastedTraffic.mark(cost)(metricsContext) + metrics.trafficControl.wastedTrafficCounter.inc()(metricsContext) } } @@ -278,6 +281,7 @@ private[update] class TrafficControlValidator( s"Wasted sequencing of event with raw byte size $byteSize for messageId $messageId accepted by sequencer $sequencerId from sender $sender." ) metrics.trafficControl.wastedSequencing.mark(byteSize)(metricsContext) + metrics.trafficControl.wastedSequencingCounter.inc()(metricsContext) } } diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/VerdictSender.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/VerdictSender.scala index ea5de0797192..364d18970fc3 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/VerdictSender.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/VerdictSender.scala @@ -8,6 +8,7 @@ import cats.implicits.toFunctorFilterOps import cats.syntax.foldable.* import cats.syntax.functor.* import cats.syntax.parallel.* +import com.daml.metrics.api.MetricsContext import com.daml.nonempty.NonEmpty import com.digitalasset.canton.LfPartyId import com.digitalasset.canton.crypto.{DomainSyncCryptoClient, SyncCryptoError} @@ -152,6 +153,7 @@ private[mediator] class DefaultVerdictSender( } val sendET = if (sendVerdict) { + implicit val metricsContext: MetricsContext = MetricsContext("type" -> "send-verdict") // the result of send request will be logged within the returned future however any error is effectively // discarded. Any error logged by the eventual callback will most likely occur after the returned future has // completed. diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BftOrderingMetrics.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BftOrderingMetrics.scala index 238d8c622c58..71338488e311 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BftOrderingMetrics.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BftOrderingMetrics.scala @@ -185,7 +185,17 @@ class BftOrderingMetrics( MetricInfo( prefix :+ "requests-queued", summary = "Requests queued", - description = "Measures the size of the mempool.", + description = "Measures the size of the mempool in requests.", + qualification = MetricQualification.Saturation, + ), + 0, + ) + + val bytesQueued: Gauge[Int] = openTelemetryMetricsFactory.gauge( + MetricInfo( + prefix :+ "bytes-queued", + summary = "Bytes queued", + description = "Measures the size of the mempool in bytes.", qualification = MetricQualification.Saturation, ), 0, @@ -204,6 +214,78 @@ class BftOrderingMetrics( openTelemetryMetricsFactory.histogram(histograms.ingress.requestsSize.info) } + object mempool { + private val prefix = BftOrderingMetrics.this.prefix :+ "mempool" + + val requestedBatches: Gauge[Int] = openTelemetryMetricsFactory.gauge( + MetricInfo( + prefix :+ "requested-batches", + summary = "Requested batches", + description = "Number of batches requested from the mempool by the availability module.", + qualification = MetricQualification.Saturation, + ), + 0, + ) + } + + object availability { + private val prefix = BftOrderingMetrics.this.prefix :+ "availability" + + val requestedProposals: Gauge[Int] = openTelemetryMetricsFactory.gauge( + MetricInfo( + prefix :+ "requested-proposals", + summary = "Requested proposals", + description = "Number of proposals requested from availability by the consensus module.", + qualification = MetricQualification.Saturation, + ), + 0, + ) + + val requestedBatches: Gauge[Int] = openTelemetryMetricsFactory.gauge( + MetricInfo( + prefix :+ "requested-batches", + summary = "Requested batches", + description = + "Maximum number of batches requested from availability by the consensus module.", + qualification = MetricQualification.Saturation, + ), + 0, + ) + + val readyBytes: Gauge[Int] = openTelemetryMetricsFactory.gauge( + MetricInfo( + prefix :+ "ready-bytes", + summary = "Bytes ready for consensus", + description = + "Number of bytes disseminated, provably highly available and ready for consensus.", + qualification = MetricQualification.Saturation, + ), + 0, + ) + + val readyRequests: Gauge[Int] = openTelemetryMetricsFactory.gauge( + MetricInfo( + prefix :+ "ready-requests", + summary = "Requests ready for consensus", + description = + "Number of requests disseminated, provably highly available and ready for consensus.", + qualification = MetricQualification.Saturation, + ), + 0, + ) + + val readyBatches: Gauge[Int] = openTelemetryMetricsFactory.gauge( + MetricInfo( + prefix :+ "ready-batches", + summary = "Batches ready for consensus", + description = + "Number of batches disseminated, provably highly available and ready for consensus.", + qualification = MetricQualification.Saturation, + ), + 0, + ) + } + object security { private val prefix = BftOrderingMetrics.this.prefix :+ "security" diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BlockMetrics.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BlockMetrics.scala index 9f2332944b54..5ed7a1d92707 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BlockMetrics.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BlockMetrics.scala @@ -30,6 +30,23 @@ class BlockMetrics( 0L, )(MetricsContext.Empty) + val delay: Gauge[Long] = openTelemetryMetricsFactory.gauge( + MetricInfo( + prefix :+ "delay", + summary = "The block processing delay in milliseconds, relative to wall clock", + description = + """Every block carries a timestamp that was assigned by the ordering service when it ordered the block. + |This metric shows the difference between the wall clock of the sequencer node and the timestamp + |of the last processed block. The difference will include the clock-skew and the processing latency + |of the ordering service. If the delay is large compared to the usual latencies, clock skew can be ruled out, + |and enough sequencers are not slow, then it means that the node is still trying to catch up reading blocks + |from the ordering service. This can happen after having been offline for a while or if the node is + |too slow to keep up with the block processing load.""", + qualification = MetricQualification.Latency, + ), + 0L, + )(MetricsContext.Empty) + private val labels = Map("sender" -> "The sender of the submission request", "type" -> "Type of request") val blockEvents: Meter = diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/DatabaseSequencerMetrics.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/DatabaseSequencerMetrics.scala new file mode 100644 index 000000000000..140d4d722664 --- /dev/null +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/DatabaseSequencerMetrics.scala @@ -0,0 +1,32 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.domain.metrics + +import com.daml.metrics.api.MetricHandle.Gauge +import com.daml.metrics.api.* + +class DatabaseSequencerMetrics( + parent: MetricName, + metricsFactory: MetricHandle.LabeledMetricsFactory, +) { + private val prefix: MetricName = parent :+ "db" + + val watermarkDelay: Gauge[Long] = metricsFactory.gauge( + MetricInfo( + prefix :+ "watermark_delay", + summary = "The event processing delay in milliseconds, relative to wall clock", + description = """Sequencer writes events in parallel using a watermark. + |This metric shows the difference between the wall clock of the sequencer node and the current watermark + |of the last written events. The difference will include the clock-skew and the processing latency + |of the sequencer database write. + |For block sequencers if the delay is large compared to the usual latencies, clock skew can be ruled out, + |and enough sequencers are not slow, then it means that the node is still trying to catch up reading blocks + |from the ordering service. This can happen after having been offline for a while or if the node is + |too slow to keep up with the block processing load. + |For database sequencers it means that database system is not being able to keep up with the write load.""", + qualification = MetricQualification.Latency, + ), + 0L, + )(MetricsContext.Empty) +} diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/DomainMetrics.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/DomainMetrics.scala index 01807be31b6e..95c1e93dc344 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/DomainMetrics.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/DomainMetrics.scala @@ -62,6 +62,12 @@ class SequencerMetrics( new HealthMetrics(openTelemetryMetricsFactory), ) + lazy val dbSequencer: DatabaseSequencerMetrics = + new DatabaseSequencerMetrics( + prefix, + openTelemetryMetricsFactory, + ) + override def storageMetrics: DbStorageMetrics = dbStorage object block extends BlockMetrics(prefix, openTelemetryMetricsFactory) @@ -161,6 +167,16 @@ class SequencerMetrics( ) ) + val wastedSequencingCounter: Counter = openTelemetryMetricsFactory.counter( + MetricInfo( + prefix :+ "wasted-sequencing-counter", + summary = + "Number of events that failed traffic validation and were not delivered because of it.", + description = """Counter for wasted-sequencing.""", + qualification = MetricQualification.Traffic, + ) + ) + val wastedTraffic: Meter = openTelemetryMetricsFactory.meter( MetricInfo( prefix :+ "wasted-traffic", @@ -172,11 +188,11 @@ class SequencerMetrics( ) ) - val eventDelivered: Meter = openTelemetryMetricsFactory.meter( + val wastedTrafficCounter: Counter = openTelemetryMetricsFactory.counter( MetricInfo( - prefix :+ "event-delivered-cost", - summary = "Cost of delivered event.", - description = """Cost of an event that was delivered.""", + prefix :+ "wasted-traffic-counter", + summary = "Number of events that cost traffic but were not delivered.", + description = """Counter for wasted-traffic.""", qualification = MetricQualification.Traffic, ) ) @@ -348,18 +364,4 @@ class MediatorMetrics( )( MetricsContext.Empty ) - - // TODO(i14580): add testing - object trafficControl { - val eventRejected: Meter = openTelemetryMetricsFactory.meter( - MetricInfo( - prefix :+ "event-rejected", - summary = "Event rejected because of traffic limit exceeded", - description = - """This metric is being incremented every time a sequencer rejects an event because - the sender does not have enough credit.""", - qualification = MetricQualification.Traffic, - ) - ) - } } diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencer.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencer.scala index b06593e71bd2..018cffe40df2 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencer.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencer.scala @@ -160,6 +160,7 @@ class DatabaseSequencer( loggerFactory, topologyClientMember, unifiedSequencer = unifiedSequencer, + metrics = metrics, ) private lazy val storageForAdminChanges: Storage = exclusiveStorage.getOrElse( diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerIntegration.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerIntegration.scala index 2f5fe13f32d9..7a89ad5c39ad 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerIntegration.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerIntegration.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.sequencing.protocol.SendAsyncError import com.digitalasset.canton.topology.Member import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.FutureInstances.parallelFuture -import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable +import com.digitalasset.canton.util.retry.NoExceptionRetryPolicy import com.digitalasset.canton.util.{MonadUtil, retry} import scala.concurrent.ExecutionContext @@ -105,7 +105,7 @@ trait DatabaseSequencerIntegration extends SequencerIntegration { .blockSequencerWriteInternal(outcome)(outcome.submissionTraceContext) .value .unwrap, - NoExnRetryable, + NoExceptionRetryPolicy, ) ) ) diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriter.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriter.scala index e991fe1aa7f4..7823b9bb441c 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriter.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriter.scala @@ -14,6 +14,7 @@ import com.digitalasset.canton.config import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveNumeric} import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.metrics.SequencerMetrics import com.digitalasset.canton.domain.sequencing.sequencer.SequencerWriter.ResetWatermark import com.digitalasset.canton.domain.sequencing.sequencer.WriterStartupError.FailedToInitializeFromSnapshot import com.digitalasset.canton.domain.sequencing.sequencer.store.* @@ -28,8 +29,7 @@ import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.Thereafter.syntax.* -import com.digitalasset.canton.util.retry.Pause -import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable +import com.digitalasset.canton.util.retry.{AllExceptionRetryPolicy, Pause} import com.digitalasset.canton.util.{EitherTUtil, FutureUtil, PekkoUtil, retry} import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting @@ -310,7 +310,7 @@ class SequencerWriter( .mapK(FutureUnlessShutdown.outcomeK) } yield writerStore }.value, - AllExnRetryable, + AllExceptionRetryPolicy, ) } } @@ -498,6 +498,7 @@ object SequencerWriter { loggerFactory: NamedLoggerFactory, sequencerMember: Member, unifiedSequencer: Boolean, + metrics: SequencerMetrics, )(implicit materializer: Materializer, executionContext: ExecutionContext): SequencerWriter = { val logger = TracedLogger(SequencerWriter.getClass, loggerFactory) @@ -515,6 +516,7 @@ object SequencerWriter { eventSignaller, loggerFactory, protocolVersion, + metrics, ) .toMat(Sink.ignore)(Keep.both) .mapMaterializedValue(m => new RunningSequencerWriterFlow(m._1, m._2.void)), diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSource.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSource.scala index 20dc4514145e..3e0b4ee41c0b 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSource.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSource.scala @@ -13,6 +13,7 @@ import com.daml.nonempty.NonEmpty import com.daml.nonempty.catsinstances.* import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.metrics.SequencerMetrics import com.digitalasset.canton.domain.sequencing.sequencer.errors.SequencerError.{ ExceededMaxSequencingTime, PayloadToEventTimeBoundExceeded, @@ -25,6 +26,7 @@ import com.digitalasset.canton.logging.{ NamedLogging, TracedLogger, } +import com.digitalasset.canton.resource.DbExceptionRetryPolicy import com.digitalasset.canton.sequencing.protocol.{ Batch, ClosedEnvelope, @@ -38,7 +40,6 @@ import com.digitalasset.canton.tracing.BatchTracing.withTracedBatch import com.digitalasset.canton.tracing.{HasTraceContext, TraceContext, Traced} import com.digitalasset.canton.util.EitherTUtil import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.util.retry.RetryUtil.DbExceptionRetryable import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting import org.apache.pekko.NotUsed @@ -48,6 +49,7 @@ import org.apache.pekko.stream.scaladsl.{Flow, GraphDSL, Keep, Merge, Source} import java.util.UUID import java.util.concurrent.atomic.AtomicBoolean import scala.concurrent.{ExecutionContext, Future} +import scala.util.Failure /** A write we want to make to the db */ sealed trait Write @@ -186,6 +188,7 @@ object SequencerWriterSource { eventSignaller: EventSignaller, loggerFactory: NamedLoggerFactory, protocolVersion: ProtocolVersion, + metrics: SequencerMetrics, )(implicit executionContext: ExecutionContext, traceContext: TraceContext, @@ -270,6 +273,7 @@ object SequencerWriterSource { } } .via(UpdateWatermarkFlow(store, logger)) + .via(RecordWatermarkDelayMetricFlow(clock, metrics)) .via(NotifyEventSignallerFlow(eventSignaller)) } } @@ -675,6 +679,15 @@ object WritePayloadsFlow { } object UpdateWatermarkFlow { + + private def retryDbException(error: Throwable, logger: TracedLogger)(implicit + tc: TraceContext + ): Boolean = { + DbExceptionRetryPolicy + .logAndDetermineErrorKind(Failure(error), logger, None) + .maxRetries == Int.MaxValue + } + def apply(store: SequencerWriterStore, logger: TracedLogger)(implicit executionContext: ExecutionContext ): Flow[Traced[BatchWritten], Traced[BatchWritten], NotUsed] = { @@ -694,8 +707,8 @@ object UpdateWatermarkFlow { // This is a workaround to avoid failing during shutdown that can be removed once saveWatermark returns // a FutureUnlessShutdown .recover { - case exception if DbExceptionRetryable.retryOKForever(exception, logger) => - // The exception itself is already being logged above by retryOKForever. Only logging here additional + case exception if retryDbException(exception, logger) => + // The exception itself is already being logged above by retryDbException. Only logging here additional // context logger.info( "Saving watermark failed with a retryable error. This can happen during shutdown." + @@ -719,3 +732,15 @@ object NotifyEventSignallerFlow { } }) } + +object RecordWatermarkDelayMetricFlow { + def apply( + clock: Clock, + metrics: SequencerMetrics, + ): Flow[Traced[BatchWritten], Traced[BatchWritten], NotUsed] = + Flow[Traced[BatchWritten]].wireTap { batchWritten => + metrics.dbSequencer.watermarkDelay.updateValue( + (clock.now - batchWritten.value.latestTimestamp).toMillis + ) + } +} diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala index 9c62b633706c..52098bd8d261 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala @@ -115,7 +115,7 @@ class BlockSequencer( Some(blockRateLimitManager.trafficConsumedStore), protocolVersion, cryptoApi, - SequencerMetrics.noop("TODO"), // TODO(#18406) + metrics, loggerFactory, unifiedSequencer, runtimeReady, @@ -189,8 +189,8 @@ class BlockSequencer( } val combinedSourceWithBlockHandling = combinedSource.async .via(stateManager.applyBlockUpdate(sequencerIntegration)) - .map { case Traced(lastTs) => - metrics.sequencerClient.handler.delay.updateValue((clock.now - lastTs).toMillis) + .wireTap { lastTs => + metrics.block.delay.updateValue((clock.now - lastTs.value).toMillis) } val ((killSwitchF, localEventsQueue), done) = PekkoUtil.runSupervised( ex => logger.error("Fatally failed to handle state changes", ex)(TraceContext.empty), { @@ -643,11 +643,7 @@ class BlockSequencer( /** Compute traffic states for the specified members at the provided timestamp. * @param requestedMembers members for which to compute traffic states - * @param approximateUsingWallClock if true, the max between wall clock time and last sequenced event timestamp - * will be used when computing the traffic states. This is useful mostly for testing - * when using SimClock to get updates on the state when domain time does not advance. - * When set to true, the result may NOT be the correct one by the time the domain - * reaches the wall clock timestamp. + * @param selector timestamp selector determining at what time the traffic states will be computed */ private def trafficStatesForMembers( requestedMembers: Set[Member], @@ -655,25 +651,31 @@ class BlockSequencer( )(implicit traceContext: TraceContext ): FutureUnlessShutdown[Map[Member, Either[String, TrafficState]]] = { - val timestamp = selector match { - case ExactTimestamp(timestamp) => Some(timestamp) - case LastUpdatePerMember => None - // For the latest safe timestamp, we use the last timestamp of the latest processed block. - // Even though it may be more recent than the TrafficConsumed timestamp of individual members, - // we are sure that nothing has been consumed since then, because by the time we update getHeadState.block.lastTs - // all traffic has been consumed for that block. This means we can use this timestamp to compute an updated - // base traffic that will be correct. - case LatestSafe => Some(stateManager.getHeadState.block.lastTs) - case LatestApproximate => Some(clock.now.max(stateManager.getHeadState.block.lastTs)) - } + if (requestedMembers.isEmpty) { + // getStates interprets an empty list of members as "return all members" + // so we handle it here. + FutureUnlessShutdown.pure(Map.empty) + } else { + val timestamp = selector match { + case ExactTimestamp(timestamp) => Some(timestamp) + case LastUpdatePerMember => None + // For the latest safe timestamp, we use the last timestamp of the latest processed block. + // Even though it may be more recent than the TrafficConsumed timestamp of individual members, + // we are sure that nothing has been consumed since then, because by the time we update getHeadState.block.lastTs + // all traffic has been consumed for that block. This means we can use this timestamp to compute an updated + // base traffic that will be correct. + case LatestSafe => Some(stateManager.getHeadState.block.lastTs) + case LatestApproximate => Some(clock.now.max(stateManager.getHeadState.block.lastTs)) + } - blockRateLimitManager.getStates( - requestedMembers, - timestamp, - stateManager.getHeadState.block.latestSequencerEventTimestamp, - // Warn on approximate topology or traffic purchased when getting exact traffic states only (so when selector is not LatestApproximate) - warnIfApproximate = selector != LatestApproximate, - ) + blockRateLimitManager.getStates( + requestedMembers, + timestamp, + stateManager.getHeadState.block.latestSequencerEventTimestamp, + // Warn on approximate topology or traffic purchased when getting exact traffic states only (so when selector is not LatestApproximate) + warnIfApproximate = selector != LatestApproximate, + ) + } } override def setTrafficPurchased( @@ -714,17 +716,17 @@ class BlockSequencer( members <- if (requestedMembers.isEmpty) { // If requestedMembers is not set get the traffic states of all known members - if (unifiedSequencer) { - FutureUnlessShutdown.outcomeF( - cryptoApi.currentSnapshotApproximation.ipsSnapshot.allMembers() - ) - } else { - FutureUnlessShutdown.pure( - stateManager.getHeadState.chunk.ephemeral.status.membersMap.keySet - ) - } + FutureUnlessShutdown.outcomeF( + cryptoApi.currentSnapshotApproximation.ipsSnapshot.allMembers() + ) } else { - FutureUnlessShutdown.pure(requestedMembers.toSet) + FutureUnlessShutdown.outcomeF( + cryptoApi.currentSnapshotApproximation.ipsSnapshot + .allMembers() + .map { registered => + requestedMembers.toSet.intersect(registered) + } + ) } trafficState <- trafficStatesForMembers( members, diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/service/GrpcSequencerConnectionService.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/service/GrpcSequencerConnectionService.scala index 1c73f464055d..3f7b82bfefb9 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/service/GrpcSequencerConnectionService.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/service/GrpcSequencerConnectionService.scala @@ -29,7 +29,7 @@ import com.digitalasset.canton.sequencing.{ import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.topology.{DomainId, Member} import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable +import com.digitalasset.canton.util.retry.NoExceptionRetryPolicy import com.digitalasset.canton.util.{EitherTUtil, retry} import io.grpc.{Status, StatusException} import monocle.Lens @@ -253,7 +253,7 @@ object GrpcSequencerConnectionService { ) .apply( tryNewConfig.value, - NoExnRetryable, + NoExceptionRetryPolicy, ) ) } diff --git a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSourceTest.scala b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSourceTest.scala index b7412dc2c1d1..67959c3ce699 100644 --- a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSourceTest.scala +++ b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSourceTest.scala @@ -9,6 +9,7 @@ import com.daml.nonempty.{NonEmpty, NonEmptyUtil} import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.metrics.SequencerMetrics import com.digitalasset.canton.domain.sequencing.sequencer.errors.SequencerError.PayloadToEventTimeBoundExceeded import com.digitalasset.canton.domain.sequencing.sequencer.store.* import com.digitalasset.canton.lifecycle.{ @@ -128,6 +129,7 @@ class SequencerWriterSourceTest extends AsyncWordSpec with BaseTest with HasExec eventSignaller, loggerFactory, testedProtocolVersion, + SequencerMetrics.noop(suiteName), )(executorService, implicitly[TraceContext]) .toMat(Sink.ignore)(Keep.both) }, diff --git a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala index f7022bcdafd9..63554cf238e8 100644 --- a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala +++ b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.domain.sequencing.service import cats.data.EitherT import com.daml.grpc.adapter.ExecutionSequencerFactory +import com.daml.metrics.api.MetricsContext import com.daml.nonempty.NonEmpty import com.digitalasset.canton.* import com.digitalasset.canton.concurrent.FutureSupervisor @@ -391,7 +392,7 @@ class GrpcSequencerIntegrationTest .thenReturn(EitherT.pure[FutureUnlessShutdown, SendAsyncError](())) when(env.sequencer.sendAsyncSigned(any[SignedContent[SubmissionRequest]])(anyTraceContext)) .thenReturn(EitherT.pure[FutureUnlessShutdown, SendAsyncError](())) - + implicit val metricsContext: MetricsContext = MetricsContext.Empty val result = for { response <- env.client .sendAsync( diff --git a/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala b/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala index e29100b5e090..debe17f7144c 100644 --- a/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala +++ b/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala @@ -20,13 +20,8 @@ import com.digitalasset.canton.store.db.DbDeserializationException import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction import com.digitalasset.canton.tracing.{TraceContext, Traced, W3CTraceContext} import com.digitalasset.canton.util.retry -import com.digitalasset.canton.util.retry.RetryUtil -import com.digitalasset.canton.util.retry.RetryUtil.{ - ErrorKind, - ExceptionRetryable, - FatalErrorKind, - TransientErrorKind, -} +import com.digitalasset.canton.util.retry.ErrorKind.{FatalErrorKind, TransientErrorKind} +import com.digitalasset.canton.util.retry.{ErrorKind, ExceptionRetryPolicy} import org.postgresql.util.PSQLException import slick.jdbc.{GetResult, SetParameter, TransactionIsolation} @@ -34,7 +29,6 @@ import java.util.UUID import scala.annotation.unused import scala.concurrent.duration.* import scala.concurrent.{ExecutionContext, Future} -import scala.util.Try class DbReferenceBlockOrderingStore( override protected val storage: DbStorage, @@ -145,29 +139,22 @@ class DbReferenceBlockOrderingStore( ) .apply( insertBlock(), - new ExceptionRetryable { - override def retryOK( - outcome: Try[?], + new ExceptionRetryPolicy { + override protected def determineExceptionErrorKind( + exception: Throwable, logger: TracedLogger, - lastErrorKind: Option[ErrorKind], )(implicit tc: TraceContext - ): RetryUtil.ErrorKind = { - outcome match { - case util.Success(_) => RetryUtil.NoErrorKind - case util.Failure(exception) => - exception match { - // We want to retry on duplicate key violation because multiple sequencers may try to assign - // the same block id (height) to a new block, in which case they should retry with a different - // value. - // This retry should not be hit very often due to us using serializable isolation level on this operation. - // Error codes documented here: https://www.postgresql.org/docs/9.6/errcodes-appendix.html - case exception: PSQLException if exception.getSQLState == "23505" => - logger.debug("Retrying block insert because of key collision")(tc) - TransientErrorKind - case _ => FatalErrorKind - } - } + ): ErrorKind = exception match { + // We want to retry on duplicate key violation because multiple sequencers may try to assign + // the same block id (height) to a new block, in which case they should retry with a different + // value. + // This retry should not be hit very often due to us using serializable isolation level on this operation. + // Error codes documented here: https://www.postgresql.org/docs/9.6/errcodes-appendix.html + case exception: PSQLException if exception.getSQLState == "23505" => + logger.debug("Retrying block insert because of key collision")(tc) + TransientErrorKind() + case _ => FatalErrorKind } }, ) diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/ResilientLedgerSubscription.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/ResilientLedgerSubscription.scala index 4b3ea8e9ef2e..3322b2fb0f81 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/ResilientLedgerSubscription.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/client/ResilientLedgerSubscription.scala @@ -14,7 +14,7 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.tracing.{NoTracing, Spanning} import com.digitalasset.canton.util.Thereafter.syntax.ThereafterOps import com.digitalasset.canton.util.TryUtil.ForFailedOps -import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable +import com.digitalasset.canton.util.retry.AllExceptionRetryPolicy import com.digitalasset.canton.util.{FutureUtil, retry} import io.grpc.StatusRuntimeException import org.apache.pekko.NotUsed @@ -62,7 +62,7 @@ class ResilientLedgerSubscription[S, T]( maxDelay = 5.seconds, operationName = s"restartable-$subscriptionName", ) - .apply(resilientSubscription(), AllExnRetryable) + .apply(resilientSubscription(), AllExceptionRetryPolicy) runOnShutdown_(new RunOnShutdown { override def name: String = s"$subscriptionName-shutdown" diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala index 9283bd8769ae..08cc462255ff 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.platform.index import com.daml.error.{ContextualizedErrorLogger, DamlErrorWithDefiniteAnswer} import com.daml.ledger.api.v2.command_completion_service.CompletionStreamResponse import com.daml.ledger.api.v2.event_query_service.GetEventsByContractIdResponse -import com.daml.ledger.api.v2.offset_checkpoint.OffsetCheckpoint.DomainTime import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse import com.daml.ledger.api.v2.update_service.{ GetTransactionResponse, @@ -14,7 +13,6 @@ import com.daml.ledger.api.v2.update_service.{ GetUpdateTreesResponse, GetUpdatesResponse, } -import com.daml.ledger.api.v2.offset_checkpoint as v2 import com.daml.metrics.InstrumentedGraph.* import com.daml.tracing.{Event, SpanAttribute, Spans} import com.digitalasset.canton.concurrent.DirectExecutionContext @@ -26,7 +24,6 @@ import com.digitalasset.canton.ledger.api.domain.{ TransactionId, } import com.digitalasset.canton.ledger.api.health.HealthStatus -import com.digitalasset.canton.ledger.api.util.TimestampConversion.fromInstant import com.digitalasset.canton.ledger.api.{TraceIdentifiers, domain} import com.digitalasset.canton.ledger.error.CommonErrors import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors @@ -155,7 +152,7 @@ private[index] class IndexServiceImpl( checkpointFlow( cond = isTailingStream, fetchOffsetCheckpoint = fetchOffsetCheckpoint, - responseFromCheckpoint = updatesResponseFromOffsetCheckpoint, + responseFromCheckpoint = updatesResponse, ) ) .mapError(shutdownError) @@ -188,20 +185,6 @@ private[index] class IndexServiceImpl( (off, elem) } - private def updatesResponseFromOffsetCheckpoint( - offsetCheckpoint: OffsetCheckpoint - ): GetUpdatesResponse = - GetUpdatesResponse.defaultInstance.withOffsetCheckpoint( - v2.OffsetCheckpoint( - offset = offsetCheckpoint.offset.toApiString, - domainTimes = offsetCheckpoint.domainTimes - .map({ case (domain, t) => - DomainTime(domain.toProtoPrimitive, Some(fromInstant(t.toInstant))) - }) - .toSeq, - ) - ) - override def transactionTrees( startExclusive: ParticipantOffset, endInclusive: Option[ParticipantOffset], @@ -213,6 +196,7 @@ private[index] class IndexServiceImpl( transactionFilter, getPackageMetadataSnapshot(contextualizedErrorLogger), ) { + val isTailingStream = endInclusive.isEmpty val parties = if (transactionFilter.filtersForAnyParty.isEmpty) Some(transactionFilter.filtersByParty.keySet) @@ -247,10 +231,19 @@ private[index] class IndexServiceImpl( parties, eventProjectionProperties, ) + .via(rangeDecorator(startExclusive, endInclusive)) } }, to, ) + // when a tailing stream is requested add checkpoint messages + .via( + checkpointFlow( + cond = isTailingStream, + fetchOffsetCheckpoint = fetchOffsetCheckpoint, + responseFromCheckpoint = updateTreesResponse, + ) + ) .mapError(shutdownError) .map(_._2) .buffered(metrics.index.transactionTreesBufferSize, LedgerApiStreamsBufferSize) @@ -875,4 +868,14 @@ object IndexServiceImpl { final case object RangeEnd extends Carrier[Nothing] final case class Element[T](element: T) extends Carrier[T] + private def updatesResponse( + offsetCheckpoint: OffsetCheckpoint + ): GetUpdatesResponse = + GetUpdatesResponse.defaultInstance.withOffsetCheckpoint(offsetCheckpoint.toApi) + + private def updateTreesResponse( + offsetCheckpoint: OffsetCheckpoint + ): GetUpdateTreesResponse = + GetUpdateTreesResponse.defaultInstance.withOffsetCheckpoint(offsetCheckpoint.toApi) + } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexer.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexer.scala index 7929023ddca3..daca68e4a7cb 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexer.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexer.scala @@ -7,9 +7,9 @@ import com.daml.ledger.resources.{Resource, ResourceContext} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.ledger.api.health.{HealthStatus, Healthy, ReportsHealth, Unhealthy} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.resource.DbExceptionRetryPolicy import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.retry.RetryUtil -import com.digitalasset.canton.util.retry.RetryUtil.DbExceptionRetryable +import com.digitalasset.canton.util.retry.ErrorKind.* import org.apache.pekko.actor.Scheduler import org.apache.pekko.pattern.after @@ -161,13 +161,15 @@ private[indexer] final class RecoveringIndexer( updateHealthStatus(Unhealthy) // determine if the exception indicates a transient error kind which we expect // to be able to recover from by retrying - DbExceptionRetryable.determineErrorKind(exception, logger)(TraceContext.empty) match { - case RetryUtil.TransientErrorKind => + // TODO(i20367): Actually decide to recover based on error kind and not just change the log level + DbExceptionRetryPolicy.determineExceptionErrorKind(exception, logger)( + TraceContext.empty + ) match { + case TransientErrorKind(_) => def collect(e: Throwable): List[Throwable] = e :: Option(e.getCause).map(collect).getOrElse(Nil) logger.warn(errorMessage + ": " + (collect(exception).mkString(","))) - case RetryUtil.NoErrorKind | RetryUtil.FatalErrorKind | - RetryUtil.SpuriousTransientErrorKind => + case UnknownErrorKind | FatalErrorKind | NoSuccessErrorKind => logger.error(errorMessage, exception) } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/OffsetCheckpointCache.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/OffsetCheckpointCache.scala index 2e37110a8662..01d8ba2ffdd4 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/OffsetCheckpointCache.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/OffsetCheckpointCache.scala @@ -3,7 +3,11 @@ package com.digitalasset.canton.platform.store.cache +import com.daml.ledger.api.v2.offset_checkpoint.OffsetCheckpoint.DomainTime +import com.daml.ledger.api.v2.offset_checkpoint as v2 import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.ledger.api.util.TimestampConversion.fromInstant +import com.digitalasset.canton.platform.ApiOffset.ApiOffsetConverter import com.digitalasset.canton.topology.DomainId import com.digitalasset.daml.lf.data.Time.Timestamp @@ -20,4 +24,15 @@ class OffsetCheckpointCache { } -final case class OffsetCheckpoint(offset: Offset, domainTimes: Map[DomainId, Timestamp]) +final case class OffsetCheckpoint(offset: Offset, domainTimes: Map[DomainId, Timestamp]) { + lazy val toApi: v2.OffsetCheckpoint = + v2.OffsetCheckpoint( + offset = offset.toApiString, + domainTimes = domainTimes + .map({ case (domain, t) => + DomainTime(domain.toProtoPrimitive, Some(fromInstant(t.toInstant))) + }) + .toSeq, + ) + +} diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/utils/ConcurrencyLimiterSpec.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/utils/ConcurrencyLimiterSpec.scala index a9737c2dc137..eca8be0dd5e4 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/utils/ConcurrencyLimiterSpec.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/utils/ConcurrencyLimiterSpec.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.platform.store.utils +import com.digitalasset.canton.annotations.UnstableTest import com.digitalasset.canton.concurrent.Threading import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.{Assertion, Assertions} @@ -11,6 +12,7 @@ import java.util.concurrent.Executors import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.{ExecutionContext, Future} +@UnstableTest // TODO(#20340) Remove UnstableTest annotation once it is fixed final class ConcurrencyLimiterSpec extends AsyncFlatSpec { behavior of "QueueBasedConcurrencyLimiter" diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml index b99f4f6b1f11..e6d48a70be4b 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --enable-interfaces=yes name: carbonv1-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml index 9362e50eb662..a84922eedfda 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --enable-interfaces=yes name: carbonv2-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml index 209bd7229c7a..c9936a270426 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 name: experimental-tests source: . version: 3.1.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml index f17bdba791c4..6a8cde2e7a2e 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --enable-interfaces=yes name: model-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml index f5898fd51cea..d823d624cb60 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 name: package-management-tests source: . version: 3.1.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml index 513fbb458c64..d60d91808b8a 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --enable-interfaces=yes name: semantic-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml index 20ad7ff51ebd..da03325b6fff 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 name: upgrade-tests source: . version: 1.0.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml index 90b8db5987bc..bdfa3538a544 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 name: upgrade-tests source: . version: 2.0.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml index 2193abb5d789..4eb5295a9862 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 name: upgrade-tests source: . version: 3.0.0 diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml index 6667ea4e7303..8d51fecd6b4e 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --target=2.1 name: JsonEncodingTest diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml index f86bc82ed5e3..d3cf7ee1efff 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --target=2.dev name: JsonEncodingTestDev diff --git a/sdk/canton/community/participant/src/main/daml/daml.yaml b/sdk/canton/community/participant/src/main/daml/daml.yaml index 42b6495f0295..bd5619d4d68c 100644 --- a/sdk/canton/community/participant/src/main/daml/daml.yaml +++ b/sdk/canton/community/participant/src/main/daml/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240722.13195.0.vd24c87ab +sdk-version: 3.2.0-snapshot.20240724.13201.0.v2357fdb4 build-options: - --target=2.1 name: AdminWorkflows diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala index 52c1e9815f6e..b82a2319fe4c 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala @@ -801,13 +801,6 @@ class ParticipantNodeBootstrap( executionContext, ) ) - adminServerRegistry - .addServiceU( - TransferServiceGrpc.bindService( - new GrpcTransferService(sync.transferService, participantId, loggerFactory), - executionContext, - ) - ) adminServerRegistry .addServiceU( InspectionServiceGrpc.bindService( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/TransferService.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/TransferService.scala deleted file mode 100644 index 73130343de88..000000000000 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/TransferService.scala +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.admin - -import cats.data.EitherT -import com.digitalasset.canton.data.{CantonTimestamp, TransferSubmitterMetadata} -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.participant.protocol.transfer.{ - TransferData, - TransferSubmissionHandle, -} -import com.digitalasset.canton.participant.store.TransferLookup -import com.digitalasset.canton.protocol.{LfContractId, SourceDomainId, TargetDomainId, TransferId} -import com.digitalasset.canton.topology.DomainId -import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion} -import com.digitalasset.canton.{DomainAlias, LfPartyId} - -import scala.concurrent.{ExecutionContext, Future} - -class TransferService( - domainIdOfAlias: DomainAlias => Option[DomainId], - submissionHandles: DomainId => Option[TransferSubmissionHandle], - transferLookups: TargetDomainId => Option[TransferLookup], - protocolVersionFor: Traced[DomainId] => Option[ProtocolVersion], -)(implicit ec: ExecutionContext) { - - private[admin] def transferOut( - submitterMetadata: TransferSubmitterMetadata, - contractId: LfContractId, - sourceDomain: DomainAlias, - targetDomain: DomainAlias, - )(implicit traceContext: TraceContext): EitherT[Future, String, TransferId] = - for { - submissionHandle <- EitherT.fromEither[Future](submissionHandleFor(sourceDomain)) - targetDomainId <- EitherT.fromEither[Future](domainIdFor(targetDomain)).map(TargetDomainId(_)) - - rawTargetProtocolVersion <- EitherT.fromEither[Future]( - protocolVersionFor(targetDomainId.unwrap, "target") - ) - targetProtocolVersion = TargetProtocolVersion(rawTargetProtocolVersion) - - transferId <- submissionHandle - .submitTransferOut( - submitterMetadata, - contractId, - targetDomainId, - targetProtocolVersion, - ) - .mapK(FutureUnlessShutdown.outcomeK) - .semiflatMap(Predef.identity) - .leftMap(_.toString) - .onShutdown(Left("Application is shutting down")) - .biflatMap( - error => EitherT.leftT[Future, TransferId](error), - result => - EitherT( - result.transferOutCompletionF.map(status => - Either.cond( - status.code == com.google.rpc.Code.OK_VALUE, - result.transferId, - s"Transfer-out failed with status $status", - ) - ) - ), - ) - } yield transferId - - private def protocolVersionFor(domain: DomainId, kind: String)(implicit - traceContext: TraceContext - ): Either[String, ProtocolVersion] = - protocolVersionFor(Traced(domain)) - .toRight(s"Unable to get protocol version of $kind domain") - - def transferIn( - submitterMetadata: TransferSubmitterMetadata, - targetDomain: DomainAlias, - transferId: TransferId, - )(implicit - traceContext: TraceContext - ): EitherT[Future, String, Unit] = - for { - submisisonHandle <- EitherT.fromEither[Future](submissionHandleFor(targetDomain)) - rawSourceProtocolVersion <- EitherT.fromEither[Future]( - protocolVersionFor(transferId.sourceDomain.unwrap, "source") - ) - sourceProtocolVersion = SourceProtocolVersion(rawSourceProtocolVersion) - result <- submisisonHandle - .submitTransferIn( - submitterMetadata, - transferId, - sourceProtocolVersion, - ) - .mapK(FutureUnlessShutdown.outcomeK) - .semiflatMap(Predef.identity) - .leftMap(_.toString) - .onShutdown(Left("Application is shutting down")) - _ <- EitherT( - result.transferInCompletionF.map(status => - Either.cond( - status.code == com.google.rpc.Code.OK_VALUE, - (), - s"Transfer-in failed with status $status. ID: $transferId", - ) - ) - ) - } yield () - - def transferSearch( - targetDomainAlias: DomainAlias, - filterSourceDomainAlias: Option[DomainAlias], - filterTimestamp: Option[CantonTimestamp], - filterSubmitter: Option[LfPartyId], - limit: Int, - )(implicit traceContext: TraceContext): EitherT[Future, String, Seq[TransferData]] = { - for { - rawTargetDomain <- EitherT.fromEither[Future](domainIdFor(targetDomainAlias)) - targetDomain = TargetDomainId(rawTargetDomain) - - transferLookup <- EitherT.fromEither[Future]( - transferLookups(targetDomain).toRight(s"Unknown domain alias $targetDomainAlias") - ) - - filterDomain <- EitherT.fromEither[Future](filterSourceDomainAlias match { - case None => Right(None) - case Some(alias) => - domainIdOfAlias(alias) - .toRight(s"Unknown domain alias `$alias`") - .map(id => Some(SourceDomainId(id))) - }) - result <- EitherT.right( - transferLookup.find(filterDomain, filterTimestamp, filterSubmitter, limit) - ) - } yield result - } - - private[this] def domainIdFor(alias: DomainAlias): Either[String, DomainId] = - domainIdOfAlias(alias).toRight(s"Unknown domain alias $alias") - - private[this] def submissionHandleFor( - alias: DomainAlias - ): Either[String, TransferSubmissionHandle] = - (for { - domainId <- domainIdOfAlias(alias) - sync <- submissionHandles(domainId) - } yield sync).toRight(s"Unknown domain alias $alias") -} diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcTransferService.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcTransferService.scala deleted file mode 100644 index 3db9ff5a4594..000000000000 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcTransferService.scala +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.admin.grpc - -import cats.syntax.either.* -import cats.syntax.traverse.* -import com.digitalasset.canton.DomainAlias -import com.digitalasset.canton.ProtoDeserializationError.{FieldNotSet, ProtoDeserializationFailure} -import com.digitalasset.canton.admin.participant.v30.* -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.participant.admin.TransferService -import com.digitalasset.canton.participant.protocol.transfer.TransferData -import com.digitalasset.canton.protocol.ContractIdSyntax.* -import com.digitalasset.canton.protocol.{LfContractId, TransferId} -import com.digitalasset.canton.serialization.ProtoConverter -import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.topology.ParticipantId -import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} -import com.digitalasset.canton.util.OptionUtil - -import scala.concurrent.{ExecutionContext, Future} - -class GrpcTransferService( - service: TransferService, - participantId: ParticipantId, - override protected val loggerFactory: NamedLoggerFactory, -)(implicit - ec: ExecutionContext -) extends TransferServiceGrpc.TransferService - with NamedLogging { - - override def transferSearch( - searchRequest: AdminTransferSearchQuery - ): Future[AdminTransferSearchResponse] = { - implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - - val AdminTransferSearchQuery( - searchDomainP, - filterSourceDomainP, - filterTimestampP, - filterSubmitterP, - limit, - ) = searchRequest - - for { - filterSourceDomain <- Future( - DomainAlias - .create(filterSourceDomainP) - .valueOr(err => throw ProtoDeserializationFailure.WrapNoLoggingStr(err).asGrpcError) - ) - filterDomain = if (filterSourceDomainP == "") None else Some(filterSourceDomain) - searchDomain <- Future( - DomainAlias - .create(searchDomainP) - .valueOr(err => throw ProtoDeserializationFailure.WrapNoLoggingStr(err).asGrpcError) - ) - filterSubmitterO <- Future( - OptionUtil - .emptyStringAsNone(filterSubmitterP) - .map(ProtoConverter.parseLfPartyId) - .sequence - .valueOr(err => throw ProtoDeserializationFailure.WrapNoLogging(err).asGrpcError) - ) - filterTimestampO <- Future( - filterTimestampP - .map(CantonTimestamp.fromProtoTimestamp) - .sequence - .valueOr(err => throw ProtoDeserializationFailure.WrapNoLogging(err).asGrpcError) - ) - transferData <- - service - .transferSearch( - searchDomain, - filterDomain, - filterTimestampO, - filterSubmitterO, - limit.toInt, - ) - .valueOr(err => throw ProtoDeserializationFailure.WrapNoLoggingStr(err).asGrpcError) - } yield { - val searchResultsP = transferData.map(TransferSearchResult(_).toProtoV30) - AdminTransferSearchResponse(results = searchResultsP) - } - } -} - -final case class TransferSearchResult( - transferId: TransferId, - submittingParty: String, - targetDomain: String, - sourceDomain: String, - contractId: LfContractId, - readyForTransferIn: Boolean, - targetTimeProofO: Option[CantonTimestamp], -) { - def toProtoV30: AdminTransferSearchResponse.TransferSearchResult = - AdminTransferSearchResponse.TransferSearchResult( - contractId = contractId.toProtoPrimitive, - transferId = Some(transferId.toAdminProto), - originDomain = sourceDomain, - targetDomain = targetDomain, - submittingParty = submittingParty, - readyForTransferIn = readyForTransferIn, - targetTimeProof = targetTimeProofO.map(_.toProtoTimestamp), - ) -} - -object TransferSearchResult { - def fromProtoV30( - resultP: AdminTransferSearchResponse.TransferSearchResult - ): ParsingResult[TransferSearchResult] = - resultP match { - case AdminTransferSearchResponse - .TransferSearchResult( - contractIdP, - transferIdP, - sourceDomain, - targetDomain, - submitter, - ready, - targetTimeProofOP, - ) => - for { - _ <- Either.cond(contractIdP.nonEmpty, (), FieldNotSet("contractId")) - contractId <- ProtoConverter.parseLfContractId(contractIdP) - transferId <- ProtoConverter - .required("transferId", transferIdP) - .flatMap(TransferId.fromAdminProto30) - targetTimeProofO <- targetTimeProofOP.traverse(CantonTimestamp.fromProtoTimestamp) - _ <- Either.cond(sourceDomain.nonEmpty, (), FieldNotSet("originDomain")) - _ <- Either.cond(targetDomain.nonEmpty, (), FieldNotSet("targetDomain")) - _ <- Either.cond(submitter.nonEmpty, (), FieldNotSet("submitter")) - } yield TransferSearchResult( - transferId, - submitter, - targetDomain, - sourceDomain, - contractId, - ready, - targetTimeProofO, - ) - } - - def apply(transferData: TransferData): TransferSearchResult = - TransferSearchResult( - transferData.transferId, - transferData.transferOutRequest.submitter, - transferData.targetDomain.toProtoPrimitive, - transferData.sourceDomain.toProtoPrimitive, - transferData.contract.contractId, - transferData.transferOutResult.isDefined, - Some(transferData.transferOutRequest.targetTimeProof.timestamp), - ) -} diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala index 170deb1de025..cc70b3b06e7c 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala @@ -61,7 +61,7 @@ import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.* -import com.digitalasset.canton.util.retry.RetryUtil.AllExnRetryable +import com.digitalasset.canton.util.retry.AllExceptionRetryPolicy import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.daml.lf.data.{Bytes, ImmArray} import com.google.common.annotations.VisibleForTesting @@ -1120,7 +1120,7 @@ final class RepairService( ) .unlessShutdown( FutureUnlessShutdown.outcomeF(check(persistentState, indexedDomain)), - AllExnRetryable, + AllExceptionRetryPolicy, ) ) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/AbstractMessageProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/AbstractMessageProcessor.scala index 4c5b8c92d77d..e8d040a5a348 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/AbstractMessageProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/AbstractMessageProcessor.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.participant.protocol import cats.syntax.either.* import cats.syntax.functor.* +import com.daml.metrics.api.MetricsContext import com.daml.nameof.NameOf.functionFullName import com.digitalasset.canton.crypto.{DomainSnapshotSyncCryptoApi, DomainSyncCryptoClient} import com.digitalasset.canton.data.CantonTimestamp @@ -86,6 +87,9 @@ abstract class AbstractMessageProcessor( )(implicit traceContext: TraceContext ): FutureUnlessShutdown[Unit] = { + implicit val metricsContext: MetricsContext = MetricsContext( + "type" -> "send-confirmation-response" + ) if (messages.isEmpty) FutureUnlessShutdown.unit else { logger.trace(s"Request $requestId: ProtocolProcessor scheduling the sending of responses") diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala index 3ff0bbadf40e..258c4cd727fc 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala @@ -8,6 +8,7 @@ import cats.implicits.catsStdInstancesForFuture import cats.syntax.either.* import cats.syntax.functorFilter.* import cats.syntax.parallel.* +import com.daml.metrics.api.MetricsContext import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty import com.digitalasset.canton.concurrent.FutureSupervisor @@ -429,6 +430,8 @@ abstract class ProtocolProcessor[ registeredF.mapK(FutureUnlessShutdown.outcomeK).map(afterRegistration) } + protected def metricsContextForSubmissionParam(submissionParam: SubmissionParam): MetricsContext + /** Submit the batch to the sequencer. * Also registers `submissionParam` as pending submission. */ @@ -446,6 +449,8 @@ abstract class ProtocolProcessor[ steps.SubmissionSendError, (SendResult, steps.SubmissionResultArgs), ] = { + implicit val metricsContext: MetricsContext = metricsContextForSubmissionParam(submissionParam) + def removePendingSubmission(): Unit = { steps .removePendingSubmission(steps.pendingSubmissions(ephemeral), submissionId) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala index a5678450b60a..6250858dde3c 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.participant.protocol import cats.data.EitherT import com.daml.error.* +import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.* import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.{ProcessingTimeout, TestingConfigInternal} @@ -119,6 +120,15 @@ class TransactionProcessor( futureSupervisor, ) { + override protected def metricsContextForSubmissionParam( + submissionParam: TransactionProcessingSteps.SubmissionParam + ): MetricsContext = { + MetricsContext( + "application-id" -> submissionParam.submitterInfo.applicationId, + "type" -> "send-confirmation-request", + ) + } + def submit( submitterInfo: SubmitterInfo, transactionMeta: TransactionMeta, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/ContractsTransfer.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/ContractsTransfer.scala index 4945c8d51848..8fd5e2f4182a 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/ContractsTransfer.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/routing/ContractsTransfer.scala @@ -15,7 +15,7 @@ import com.digitalasset.canton.protocol.* import com.digitalasset.canton.topology.ParticipantId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion} +import com.digitalasset.canton.version.Transfer.TargetProtocolVersion import scala.concurrent.{ExecutionContext, Future} @@ -97,7 +97,6 @@ private[routing] class ContractsTransfer( .submitTransferIn( submitterMetadata, outResult.transferId, - SourceProtocolVersion(sourceSyncDomain.staticDomainParameters.protocolVersion), ) .leftMap[String](err => s"Transfer in failed with error ${err}") .flatMap { s => diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/AutomaticTransferIn.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/AutomaticTransferIn.scala index 1f187f2e65ec..1851abe848db 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/AutomaticTransferIn.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/AutomaticTransferIn.scala @@ -18,9 +18,8 @@ import com.digitalasset.canton.protocol.* import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.transaction.ParticipantPermission -import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{EitherTUtil, MonadUtil} -import com.digitalasset.canton.version.Transfer.SourceProtocolVersion import org.slf4j.event.Level import scala.concurrent.{ExecutionContext, Future} @@ -66,17 +65,6 @@ private[participant] object AutomaticTransferIn { possibleSubmittingParties.headOption, AutomaticTransferInError("No possible submitting party for automatic transfer-in"), ) - sourceProtocolVersion <- EitherT - .fromEither[Future]( - transferCoordination - .protocolVersionFor(Traced(id.sourceDomain.unwrap)) - .toRight( - AutomaticTransferInError( - s"Unable to get protocol version of source domain ${id.sourceDomain}" - ) - ) - ) - .map(SourceProtocolVersion(_)) submissionResult <- transferCoordination .transferIn( targetDomain, @@ -89,7 +77,6 @@ private[participant] object AutomaticTransferIn { workflowId = None, ), id, - sourceProtocolVersion, )( TraceContext.empty ) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferCoordination.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferCoordination.scala index f4c39abfb1e0..3c68c989b229 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferCoordination.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferCoordination.scala @@ -32,7 +32,7 @@ import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.util.OptionUtil import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion} +import com.digitalasset.canton.version.Transfer.TargetProtocolVersion import scala.concurrent.{ExecutionContext, Future} @@ -131,7 +131,6 @@ class TransferCoordination( targetDomain: TargetDomainId, submitterMetadata: TransferSubmitterMetadata, transferId: TransferId, - sourceProtocolVersion: SourceProtocolVersion, )(implicit traceContext: TraceContext ): EitherT[Future, TransferProcessorError, TransferInProcessingSteps.SubmissionResult] = { @@ -147,7 +146,6 @@ class TransferCoordination( .submitTransferIn( submitterMetadata, transferId, - sourceProtocolVersion, ) .mapK(FutureUnlessShutdown.outcomeK) .semiflatMap(Predef.identity) @@ -311,7 +309,6 @@ trait TransferSubmissionHandle { def submitTransferIn( submitterMetadata: TransferSubmitterMetadata, transferId: TransferId, - sourceProtocolVersion: SourceProtocolVersion, )(implicit traceContext: TraceContext ): EitherT[Future, TransferProcessorError, FutureUnlessShutdown[ diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala index 0460105ad18a..b04ff19214ae 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala @@ -118,7 +118,6 @@ private[transfer] class TransferInProcessingSteps( val SubmissionParam( submitterMetadata, transferId, - sourceProtocolVersion, ) = submissionParam val topologySnapshot = recentSnapshot.ipsSnapshot val pureCrypto = recentSnapshot.pureCrypto @@ -183,7 +182,7 @@ private[transfer] class TransferInProcessingSteps( mediator, transferOutResult, transferInUuid, - sourceProtocolVersion, + transferData.sourceProtocolVersion, targetProtocolVersion, ) ) @@ -685,7 +684,6 @@ object TransferInProcessingSteps { final case class SubmissionParam( submitterMetadata: TransferSubmitterMetadata, transferId: TransferId, - sourceProtocolVersion: SourceProtocolVersion, ) { val submitterLf: LfPartyId = submitterMetadata.submitter } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessor.scala index be1f2156e661..826184435b22 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessor.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.participant.protocol.transfer +import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.{ProcessingTimeout, TestingConfigInternal} import com.digitalasset.canton.crypto.DomainSyncCryptoClient @@ -65,4 +66,13 @@ class TransferInProcessor( targetProtocolVersion.v, loggerFactory, futureSupervisor, + ) { + override protected def metricsContextForSubmissionParam( + submissionParam: TransferInProcessingSteps.SubmissionParam + ): MetricsContext = { + MetricsContext( + "application-id" -> submissionParam.submitterMetadata.applicationId, + "type" -> "transfer-in", ) + } +} diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessor.scala index dc57646987b3..41b6bc1931cd 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessor.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.participant.protocol.transfer +import com.daml.metrics.api.MetricsContext import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.{ProcessingTimeout, TestingConfigInternal} import com.digitalasset.canton.crypto.DomainSyncCryptoClient @@ -65,4 +66,13 @@ class TransferOutProcessor( sourceProtocolVersion.v, loggerFactory, futureSupervisor, + ) { + override protected def metricsContextForSubmissionParam( + submissionParam: TransferOutProcessingSteps.SubmissionParam + ): MetricsContext = { + MetricsContext( + "application-id" -> submissionParam.submitterMetadata.applicationId, + "type" -> "transfer-out", ) + } +} diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala index 7f429c69b60a..ac5d4a6af974 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala @@ -11,6 +11,7 @@ import cats.syntax.parallel.* import cats.syntax.traverse.* import cats.syntax.validated.* import com.daml.error.* +import com.daml.metrics.api.MetricsContext import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty import com.digitalasset.canton.admin.participant.v30.{ReceivedCommitmentState, SentCommitmentState} @@ -1361,6 +1362,7 @@ class AcsCommitmentProcessor( val delayMillis = if (maxDelayMillis > 0) rand.nextInt(maxDelayMillis) else 0 def sendUnlessClosing()(ts: CantonTimestamp) = { + implicit val metricsContext: MetricsContext = MetricsContext("type" -> "send-commitment") performUnlessClosingUSF(functionFullName) { def message = s"Failed to send commitment message batch for period $period" val cryptoSnapshot = domainCrypto.currentSnapshotApproximation diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala index d937c49b42e7..6e61054cccfb 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala @@ -33,7 +33,7 @@ import com.digitalasset.canton.store.IndexedStringStore import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable +import com.digitalasset.canton.util.retry.NoExceptionRetryPolicy import com.digitalasset.canton.util.{ErrorUtil, retry} import com.digitalasset.canton.version.ReleaseProtocolVersion import org.apache.pekko.stream.Materializer @@ -210,7 +210,7 @@ object ParticipantNodePersistentState extends HasLoggerName { ) .unlessShutdown( settingsStore.refreshCache().map(_ => lens(settingsStore.settings).toRight(())), - NoExnRetryable, + NoExceptionRetryPolicy, ) .map(_.getOrElse { ErrorUtil.internalError( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/TransferStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/TransferStore.scala index 5470bc0a6fcc..d0944e50d210 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/TransferStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/TransferStore.scala @@ -350,8 +350,7 @@ trait TransferLookup { traceContext: TraceContext ): EitherT[Future, TransferLookupError, TransferData] - /** Find utility to look for in-flight transfers. Queried by - * [[com.digitalasset.canton.console.commands.ParticipantAdministration#transfer.search]]. + /** Find utility to look for in-flight transfers. * Results need not be consistent with [[lookup]]. */ def find( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala index 61df51695dfc..12bfea5a900a 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala @@ -33,7 +33,7 @@ import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext, Traced} import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.TryUtil.ForFailedOps -import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable +import com.digitalasset.canton.util.retry.NoExceptionRetryPolicy import com.digitalasset.canton.util.{BatchAggregator, ErrorUtil, OptionUtil, SingleUseCell, retry} import com.digitalasset.canton.version.ReleaseProtocolVersion import slick.jdbc.{PositionedParameters, SetParameter} @@ -427,7 +427,7 @@ object DbInFlightSubmissionStore { implicit val stopRetry: retry.Success[Boolean] = retry.Success[Boolean](Predef.identity) retry .Directly(logger, storage, retry.Forever, "register submission retry") - .unlessShutdown(oneRound, NoExnRetryable) + .unlessShutdown(oneRound, NoExceptionRetryPolicy) .onShutdown { fillEmptyCells(Success(AbortedDueToShutdown)) true diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala index c0cdbaf41bf6..74276e131252 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala @@ -106,7 +106,7 @@ import com.digitalasset.canton.util.FutureInstances.parallelFuture import com.digitalasset.canton.util.OptionUtils.OptionExtension import com.digitalasset.canton.util.* import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion} +import com.digitalasset.canton.version.Transfer.TargetProtocolVersion import com.digitalasset.daml.lf.archive.DamlLf import com.digitalasset.daml.lf.data.Ref.{PackageId, Party, SubmissionId} import com.digitalasset.daml.lf.data.{ImmArray, Ref} @@ -310,13 +310,6 @@ class CantonSyncService( (tracedDomainId: Traced[DomainId]) => syncDomainPersistentStateManager.protocolVersionFor(tracedDomainId.value) - val transferService: TransferService = new TransferService( - domainIdOfAlias = aliasManager.domainIdForAlias, - submissionHandles = readySyncDomainById, - transferLookups = domainId => - syncDomainPersistentStateManager.get(domainId.unwrap).map(_.transferStore), - protocolVersionFor = protocolVersionGetter, - ) val commandProgressTracker: CommandProgressTracker = if (parameters.commandProgressTracking.enabled) new CommandProgressTrackerImpl(parameters.commandProgressTracking, clock, loggerFactory) @@ -1706,7 +1699,7 @@ class CantonSyncService( domain: DomainId, remoteDomain: DomainId, )( - transfer: ProtocolVersion => SyncDomain => EitherT[Future, E, FutureUnlessShutdown[T]] + transfer: SyncDomain => EitherT[Future, E, FutureUnlessShutdown[T]] )(implicit traceContext: TraceContext): Future[SubmissionResult] = { for { syncDomain <- EitherT.fromOption[Future]( @@ -1714,12 +1707,7 @@ class CantonSyncService( ifNone = RequestValidationErrors.InvalidArgument .Reject(s"Domain ID not found: $domain"): DamlError, ) - remoteProtocolVersion <- EitherT.fromOption[Future]( - protocolVersionGetter(Traced(remoteDomain)), - ifNone = RequestValidationErrors.InvalidArgument - .Reject(s"Domain ID's protocol version not found: $remoteDomain"): DamlError, - ) - _ <- transfer(remoteProtocolVersion)(syncDomain) + _ <- transfer(syncDomain) .leftMap(error => RequestValidationErrors.InvalidArgument .Reject( @@ -1734,32 +1722,49 @@ class CantonSyncService( .leftMap(error => SubmissionResult.SynchronousError(error.rpcStatus())) .merge + def getProtocolVersion(domainId: DomainId): Future[ProtocolVersion] = + protocolVersionGetter(Traced(domainId)) match { + case Some(protocolVersion) => Future.successful(protocolVersion) + case None => + Future.failed( + RequestValidationErrors.InvalidArgument + .Reject(s"Domain ID's protocol version not found: $domainId") + .asGrpcError + ) + } + reassignmentCommand match { case unassign: ReassignmentCommand.Unassign => - doTransfer( - domain = unassign.sourceDomain.unwrap, - remoteDomain = unassign.targetDomain.unwrap, - )(protocolVersion => - _.submitTransferOut( - submitterMetadata = TransferSubmitterMetadata( - submitter = submitter, - applicationId = applicationId, - submittingParticipant = participantId, - commandId = commandId, - submissionId = submissionId, - workflowId = workflowId, - ), - contractId = unassign.contractId, - targetDomain = unassign.targetDomain, - targetProtocolVersion = TargetProtocolVersion(protocolVersion), + for { + targetProtocolVersion <- getProtocolVersion(unassign.targetDomain.unwrap).map( + TargetProtocolVersion(_) ) - ) + + submissionResult <- doTransfer( + domain = unassign.sourceDomain.unwrap, + remoteDomain = unassign.targetDomain.unwrap, + )( + _.submitTransferOut( + submitterMetadata = TransferSubmitterMetadata( + submitter = submitter, + applicationId = applicationId, + submittingParticipant = participantId, + commandId = commandId, + submissionId = submissionId, + workflowId = workflowId, + ), + contractId = unassign.contractId, + targetDomain = unassign.targetDomain, + targetProtocolVersion = targetProtocolVersion, + ) + ) + } yield submissionResult case assign: ReassignmentCommand.Assign => doTransfer( domain = assign.targetDomain.unwrap, remoteDomain = assign.sourceDomain.unwrap, - )(protocolVersion => + )( _.submitTransferIn( submitterMetadata = TransferSubmitterMetadata( submitter = submitter, @@ -1770,7 +1775,6 @@ class CantonSyncService( workflowId = workflowId, ), transferId = TransferId(assign.sourceDomain, assign.unassignId), - sourceProtocolVersion = SourceProtocolVersion(protocolVersion), ) ) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala index 8a9e78bb1e1d..3de212b6efaa 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala @@ -886,7 +886,6 @@ class SyncDomain( override def submitTransferIn( submitterMetadata: TransferSubmitterMetadata, transferId: TransferId, - sourceProtocolVersion: SourceProtocolVersion, )(implicit traceContext: TraceContext ): EitherT[Future, TransferProcessorError, FutureUnlessShutdown[ @@ -906,11 +905,7 @@ class SyncDomain( transferInProcessor .submit( TransferInProcessingSteps - .SubmissionParam( - submitterMetadata, - transferId, - sourceProtocolVersion, - ) + .SubmissionParam(submitterMetadata, transferId) ) .onShutdown(Left(DomainNotReady(domainId, "The domain is shutting down"))) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala index ce8c0ef0e9a3..8ee94e16b6fd 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala @@ -148,7 +148,7 @@ class LedgerServerPartyNotifier( ) } // propagate admin parties - case DomainTrustCertificate(participantId, _, _, _) => + case DomainTrustCertificate(participantId, _) => Seq( ( participantId.adminParty, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala index 2b2e907785ef..7948a7be2f8d 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala @@ -169,7 +169,7 @@ class ParticipantTopologyDispatcher( filterNamespace = None, ) .map(_.toTopologyState.exists { - case DomainTrustCertificate(`participantId`, `domainId`, _, _) => true + case DomainTrustCertificate(`participantId`, `domainId`) => true case _ => false }) ) @@ -186,8 +186,6 @@ class ParticipantTopologyDispatcher( DomainTrustCertificate( participantId, domainId, - transferOnlyToGivenTargetDomains = false, - targetDomains = Seq.empty, ), serial = None, // TODO(#12390) auto-determine signing keys diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala index fe0b3d3cbc40..7b56d73d8b58 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.participant.protocol import cats.Eval import cats.data.EitherT +import com.daml.metrics.api.MetricsContext import com.daml.nonempty.NonEmpty import com.daml.test.evidence.scalatest.ScalaTestSupport.TagContainer import com.daml.test.evidence.tag.EvidenceTag @@ -148,7 +149,7 @@ class ProtocolProcessorTest any[Option[AggregationRule]], any[SendCallback], any[Boolean], - )(anyTraceContext) + )(anyTraceContext, any[MetricsContext]) ) .thenAnswer( ( @@ -384,6 +385,10 @@ class ProtocolProcessorTest override def participantId: ParticipantId = participant override def timeouts: ProcessingTimeout = ProtocolProcessorTest.this.timeouts + + override protected def metricsContextForSubmissionParam( + submissionParam: Int + ): MetricsContext = MetricsContext.Empty } ephemeralState.get().recordOrderPublisher.scheduleRecoveries(List.empty) @@ -478,7 +483,7 @@ class ProtocolProcessorTest any[Option[AggregationRule]], any[SendCallback], any[Boolean], - )(anyTraceContext) + )(anyTraceContext, any[MetricsContext]) ) .thenReturn(EitherT.leftT[FutureUnlessShutdown, Unit](sendError)) val (sut, _persistent, _ephemeral, _) = diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingStepsTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingStepsTest.scala index 6943f278aabe..8ec953dcdfe2 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingStepsTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingStepsTest.scala @@ -53,8 +53,8 @@ import com.digitalasset.canton.time.{DomainTimeTracker, TimeProofTestUtil, WallC import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.transaction.ParticipantPermission +import com.digitalasset.canton.version.HasTestCloseContext import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion} -import com.digitalasset.canton.version.{HasTestCloseContext, ProtocolVersion} import org.scalatest.wordspec.AsyncWordSpec import java.util.UUID @@ -203,12 +203,10 @@ class TransferInProcessingStepsTest val transferId = TransferId(sourceDomain, CantonTimestamp.Epoch) val transferDataF = TransferStoreTest.mkTransferDataForDomain(transferId, sourceMediator, party1, targetDomain) - val submissionParam = - SubmissionParam( - submitterInfo(party1), - transferId, - SourceProtocolVersion(testedProtocolVersion), - ) + val submissionParam = SubmissionParam( + submitterInfo(party1), + transferId, + ) val transferOutResult = TransferResultHelpers.transferOutResult( sourceDomain, @@ -316,12 +314,10 @@ class TransferInProcessingStepsTest } "fail when submitting party is not a stakeholder" in { - val submissionParam2 = - SubmissionParam( - submitterInfo(party2), - transferId, - SourceProtocolVersion(testedProtocolVersion), - ) + val submissionParam2 = SubmissionParam( + submitterInfo(party2), + transferId, + ) for { transferData <- transferDataF @@ -372,12 +368,10 @@ class TransferInProcessingStepsTest } "fail when submitting party not hosted on the participant" in { - val submissionParam2 = - SubmissionParam( - submitterInfo(party2), - transferId, - SourceProtocolVersion(testedProtocolVersion), - ) + val submissionParam2 = SubmissionParam( + submitterInfo(party2), + transferId, + ) for { transferData2 <- TransferStoreTest.mkTransferDataForDomain( transferId, @@ -401,31 +395,6 @@ class TransferInProcessingStepsTest } } } - - "fail when protocol version are incompatible" in { - // source domain does not support transfer counters - val submissionParam2 = - submissionParam.copy(sourceProtocolVersion = SourceProtocolVersion(ProtocolVersion.v31)) - for { - transferData <- transferDataF - deps <- statefulDependencies - (persistentState, ephemeralState) = deps - _ <- setUpOrFail(transferData, transferOutResult, persistentState).failOnShutdown - preparedSubmission <- - transferInProcessingSteps - .createSubmission( - submissionParam2, - targetMediator, - ephemeralState, - cryptoSnapshot, - ) - .value - .failOnShutdown - } yield { - preparedSubmission should matchPattern { case Right(_) => } - } - - } } "receive request" should { diff --git a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala index 727deb3dca8f..e6fe734b5d40 100644 --- a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala +++ b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala @@ -560,8 +560,6 @@ class TestingIdentityFactory( DomainTrustCertificate( participantId, domainId, - transferOnlyToGivenTargetDomains = false, - targetDomains = Seq.empty, ) ) :+ mkAdd( ParticipantDomainPermission( @@ -641,8 +639,6 @@ class TestingOwnerWithKeys( DomainTrustCertificate( participant1, domainId, - transferOnlyToGivenTargetDomains = false, - targetDomains = Seq.empty, ) private val defaultDomainParameters = TestDomainParameters.defaultDynamic @@ -685,9 +681,9 @@ class TestingOwnerWithKeys( ) ) - val p1_dtc = mkAdd(DomainTrustCertificate(participant1, domainId, false, Seq.empty)) - val p2_dtc = mkAdd(DomainTrustCertificate(participant2, domainId, false, Seq.empty)) - val p3_dtc = mkAdd(DomainTrustCertificate(participant3, domainId, false, Seq.empty)) + val p1_dtc = mkAdd(DomainTrustCertificate(participant1, domainId)) + val p2_dtc = mkAdd(DomainTrustCertificate(participant2, domainId)) + val p3_dtc = mkAdd(DomainTrustCertificate(participant3, domainId)) val p1_otk = mkAddMultiKey( OwnerToKeyMapping(participant1, None, NonEmpty(Seq, EncryptionKeys.key1, SigningKeys.key1)), NonEmpty(Set, key1), diff --git a/sdk/canton/ref b/sdk/canton/ref index b2ab581a7fe8..f3c8e636ab88 100644 --- a/sdk/canton/ref +++ b/sdk/canton/ref @@ -1 +1 @@ -20240725.13761.vb5290ce8 +20240725.13780.v97d3c8b7