From 317ed8d659ce6cf0eeee45fd028099956253e065 Mon Sep 17 00:00:00 2001 From: canton-machine <48923836+canton-machine@users.noreply.github.com> Date: Mon, 11 Apr 2022 09:21:33 +0200 Subject: [PATCH] Update 2022-04-11.07 (#20) Reference commit: 0fe56b5b0 Co-authored-by: Canton --- .../app/src/main/resources/application.conf | 4 - .../canton/console/InstanceReference.scala | 3 +- .../SequencerAdministrationGroup.scala | 26 ++- .../canton/environment/Errors.scala | 2 + .../canton/environment/Nodes.scala | 12 +- .../examples/06-messaging/contact/daml.yaml | 2 +- .../examples/06-messaging/message/daml.yaml | 2 +- .../src/main/daml/CantonExamples/daml.yaml | 4 +- ...oconfigure.spi.SdkTracerProviderConfigurer | 1 - .../main/scala/com/daml/lf/CantonOnly.scala | 4 +- .../com/daml/nonempty/NonEmptyUtil.scala | 29 +++ .../canton/ProtoDeserializationError.scala | 47 +++-- .../common/domain/ServiceAgreement.scala | 2 +- .../canton/config/RequireTypes.scala | 4 +- .../canton/data/ActionDescription.scala | 3 - .../canton/data/GenTransactionTree.scala | 168 ++++++++++-------- .../canton/data/GenTransferViewTree.scala | 4 +- .../digitalasset/canton/data/Informee.scala | 19 +- .../digitalasset/canton/data/MerkleSeq.scala | 45 ++--- .../digitalasset/canton/data/MerkleTree.scala | 11 +- .../canton/data/TransactionView.scala | 48 ++--- .../canton/data/TreeSerialization.scala | 5 +- .../environment/CantonNodeBootstrap.scala | 3 +- .../ledger/api/client/LedgerConnection.scala | 4 +- .../lifecycle/FutureUnlessShutdown.scala | 2 + .../canton/lifecycle/Lifecycle.scala | 4 +- .../lifecycle/ShutdownFailedException.scala | 8 +- .../logging/pretty/PrettyInstances.scala | 5 + .../com/digitalasset/canton/package.scala | 24 +-- .../protocol/WellFormedTransaction.scala | 18 +- .../messages/ConfirmationRequest.scala | 10 +- .../protocol/messages/InformeeMessage.scala | 7 +- .../messages/TransactionResultMessage.scala | 7 +- .../canton/resource/DbMigrations.scala | 105 ++++++----- .../canton/resource/DbStorageSingle.scala | 10 +- .../canton/resource/Storage.scala | 36 ++-- .../sequencing/protocol/Recipients.scala | 51 +++--- .../sequencing/protocol/RecipientsTree.scala | 56 +++--- .../store/db/DbBulkUpdateProcessor.scala | 21 ++- .../canton/topology/store/TopologyStore.scala | 16 +- .../canton/tracing/BatchTracing.scala | 8 +- .../canton/tracing/TracerProvider.scala | 32 ++-- .../canton/util/BatchAggregator.scala | 18 +- .../canton/util/LfTransactionUtil.scala | 12 +- .../canton/util/retry/RetryEither.scala | 87 ++++++--- .../canton/version/CantonVersion.scala | 13 +- .../sequencing/protocol/RecipientsTest.scala | 66 +++---- .../protocol/RecipientsTreeTest.scala | 26 +-- .../com/digitalasset/canton/test/hello.proto | 0 .../canton/test/parsing-attack.proto | 49 +++++ .../com/digitalasset/canton/BaseTest.scala | 12 +- .../canton/data/GenTransactionTreeTest.scala | 33 ++-- .../canton/data/MerkleSeqTest.scala | 11 +- .../canton/lifecycle/LifecycleTest.scala | 2 +- .../protobuf/ProtobufParsingAttackTest.scala | 70 ++++++++ .../protocol/ExampleTransactionFactory.scala | 11 +- .../WellFormedTransactionMergeTest.scala | 7 +- .../protocol/WellFormedTransactionTest.scala | 1 - .../canton/resource/DbStorageSingleTest.scala | 11 +- .../canton/store/db/DbStorageSetup.scala | 10 +- .../topology/store/TopologyStoreTest.scala | 18 ++ .../canton/util/BatchAggregatorTest.scala | 10 +- .../canton/util/LfTransactionBuilder.scala | 4 - .../src/main/daml/ai-analysis/AIAnalysis.daml | 3 +- .../demo/src/main/daml/ai-analysis/daml.yaml | 4 +- community/demo/src/main/daml/bank/daml.yaml | 4 +- community/demo/src/main/daml/doctor/daml.yaml | 4 +- .../src/main/daml/health-insurance/daml.yaml | 4 +- .../src/main/daml/medical-records/daml.yaml | 4 +- .../canton/demo/ReferenceDemoScript.scala | 26 +-- community/demo/src/pack/demo/demo.sc | 1 + .../ConfirmationResponseProcessor.scala | 29 +-- .../sequencing/sequencer/EventSignaller.scala | 4 +- .../sequencer/SequencerWriter.scala | 67 ++++--- .../sequencer/SequencerWriterSource.scala | 79 ++++---- .../sequencer/store/DbSequencerStore.scala | 35 ++-- .../store/InMemorySequencerStore.scala | 10 +- .../sequencer/store/SequencerStore.scala | 7 +- .../store/SequencerWriterStore.scala | 7 +- .../topology/DomainTopologyDispatcher.scala | 37 ++-- .../ConfirmationResponseProcessorTest.scala | 28 +-- .../sequencer/SequencerReaderTest.scala | 10 +- .../sequencer/SequencerWriterSourceTest.scala | 3 +- .../sequencer/SequencerWriterTest.scala | 3 +- .../MultiTenantedSequencerStoreTest.scala | 6 +- .../sequencer/store/SequencerStoreTest.scala | 45 ++--- .../service/GrpcSequencerServiceTest.scala | 16 +- community/participant/src/main/daml/daml.yaml | 4 +- .../participant/admin/RepairService.scala | 2 - .../api/CantonLedgerApiServerWrapper.scala | 4 +- .../protocol/MessageDispatcher.scala | 5 +- .../protocol/TransactionProcessingSteps.scala | 33 ++-- .../transfer/TransferInProcessingSteps.scala | 13 +- .../transfer/TransferOutProcessingSteps.scala | 6 +- .../validation/ModelConformanceChecker.scala | 11 +- .../protocol/validation/UsedAndCreated.scala | 6 +- .../participant/store/DamlPackageStore.scala | 2 +- .../store/db/DbActiveContractStore.scala | 10 +- .../store/db/DbContractKeyJournal.scala | 4 +- .../store/db/DbContractStore.scala | 10 +- .../store/db/DbDamlPackageStore.scala | 11 +- .../store/db/DbInFlightSubmissionStore.scala | 11 +- .../store/db/DbMultiDomainEventLog.scala | 60 +++---- .../store/db/DbRequestJournalStore.scala | 17 +- .../memory/InMemoryDamlPackageStore.scala | 2 +- .../canton/participant/util/DAMLe.scala | 1 - .../protocol/MessageDispatcherTest.scala | 9 +- .../ModelConformanceCheckerTest.scala | 11 +- project/BuildCommon.scala | 43 ++--- project/Dependencies.scala | 13 +- project/project/DamlVersions.scala | 2 +- version.sbt | 2 +- 112 files changed, 1197 insertions(+), 874 deletions(-) delete mode 100644 community/common/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.SdkTracerProviderConfigurer create mode 100644 community/common/src/main/scala/com/daml/nonempty/NonEmptyUtil.scala rename community/common/src/{main => test}/protobuf/com/digitalasset/canton/test/hello.proto (100%) create mode 100644 community/common/src/test/protobuf/com/digitalasset/canton/test/parsing-attack.proto create mode 100644 community/common/src/test/scala/com/digitalasset/canton/protobuf/ProtobufParsingAttackTest.scala diff --git a/community/app/src/main/resources/application.conf b/community/app/src/main/resources/application.conf index 4f44007db..01ef650a4 100644 --- a/community/app/src/main/resources/application.conf +++ b/community/app/src/main/resources/application.conf @@ -1,8 +1,4 @@ akka { - # TODO(M98): Once akka shutdown order is fixed in daml-on-x-server reenable (https://github.com/digital-asset/daml/issues/1886) - log-dead-letters-during-shutdown = off - log-dead-letters = 0 - loggers = ["akka.event.slf4j.Slf4jLogger"] loglevel = "INFO" diff --git a/community/app/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala b/community/app/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala index c75366adb..5a93df036 100644 --- a/community/app/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala +++ b/community/app/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala @@ -261,7 +261,8 @@ trait DomainReference // above command needs to be def such that `Help` works. lazy private val partiesGroup = new PartiesAdministrationGroup(this, consoleEnvironment) - private lazy val sequencer_ = new SequencerAdministrationGroup(this, consoleEnvironment) + private lazy val sequencer_ = + new SequencerAdministrationGroup(this, consoleEnvironment, loggerFactory) @Help.Summary("Manage the sequencer") @Help.Group("Sequencer") override def sequencer: SequencerAdministrationGroup = sequencer_ diff --git a/community/app/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministrationGroup.scala b/community/app/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministrationGroup.scala index 9ee7c7e37..1323ed5b5 100644 --- a/community/app/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministrationGroup.scala +++ b/community/app/src/main/scala/com/digitalasset/canton/console/commands/SequencerAdministrationGroup.scala @@ -7,7 +7,14 @@ import com.digitalasset.canton.admin.api.client.commands.{ EnterpriseSequencerAdminCommands, SequencerAdminCommands, } -import com.digitalasset.canton.console.{AdminCommandRunner, ConsoleEnvironment, Help, Helpful} +import com.digitalasset.canton.console.{ + AdminCommandRunner, + ConsoleEnvironment, + FeatureFlag, + FeatureFlagFilter, + Help, + Helpful, +} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.sequencing.sequencer.{ LedgerIdentity, @@ -15,6 +22,7 @@ import com.digitalasset.canton.domain.sequencing.sequencer.{ SequencerPruningStatus, SequencerSnapshot, } +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.topology.Member import com.digitalasset.canton.util.ShowUtil._ @@ -23,8 +31,11 @@ import scala.jdk.DurationConverters._ class SequencerAdministrationGroup( runner: AdminCommandRunner, - consoleEnvironment: ConsoleEnvironment, -) extends Helpful { + val consoleEnvironment: ConsoleEnvironment, + val loggerFactory: NamedLoggerFactory, +) extends Helpful + with FeatureFlagFilter + with NamedLogging { @Help.Summary("Pruning of the sequencer") object pruning { @Help.Summary("Status of the sequencer and its connected clients") @@ -208,15 +219,18 @@ class SequencerAdministrationGroup( } @Help.Summary( - "Authorize a ledger identity (e.g. an EthereumAccount) on the underlying ledger. " + "Authorize a ledger identity (e.g. an EthereumAccount) on the underlying ledger. ", + FeatureFlag.Preview, ) @Help.Description("""Authorize a ledger identity (e.g. an EthereumAccount) on the underlying ledger. |Currently only implemented for the Ethereum sequencer and has no effect for other sequencer |integrations. | See the authorization documentation of the Ethereum sequencer integrations for more detail. """") - def authorize_ledger_identity(ledgerIdentity: LedgerIdentity): Unit = consoleEnvironment.run { - runner.adminCommand(EnterpriseSequencerAdminCommands.AuthorizeLedgerIdentity(ledgerIdentity)) + def authorize_ledger_identity(ledgerIdentity: LedgerIdentity): Unit = check(FeatureFlag.Preview) { + consoleEnvironment.run { + runner.adminCommand(EnterpriseSequencerAdminCommands.AuthorizeLedgerIdentity(ledgerIdentity)) + } } } diff --git a/community/app/src/main/scala/com/digitalasset/canton/environment/Errors.scala b/community/app/src/main/scala/com/digitalasset/canton/environment/Errors.scala index 65eeb6ac0..582fb03a3 100644 --- a/community/app/src/main/scala/com/digitalasset/canton/environment/Errors.scala +++ b/community/app/src/main/scala/com/digitalasset/canton/environment/Errors.scala @@ -49,6 +49,8 @@ final case class DidntUseForceOnRepairMigration(name: String) extends StartupErr final case class StartFailed(name: String, message: String) extends StartupError +final case class ShutdownDuringStartup(name: String, message: String) extends StartupError + /** Trying to start the node when the database has pending migrations */ final case class PendingDatabaseMigration(name: String, pendingMigrationMessage: String) diff --git a/community/app/src/main/scala/com/digitalasset/canton/environment/Nodes.scala b/community/app/src/main/scala/com/digitalasset/canton/environment/Nodes.scala index 443ee0fa0..23627dde9 100644 --- a/community/app/src/main/scala/com/digitalasset/canton/environment/Nodes.scala +++ b/community/app/src/main/scala/com/digitalasset/canton/environment/Nodes.scala @@ -223,7 +223,7 @@ class ManagedNodes[ } } - for { + val result = for { _ <- migrations .connectionCheck(failFastIfDbOut, params.processingTimeouts) .leftMap( @@ -240,6 +240,10 @@ class ManagedNodes[ .roundDurationForHumans(Duration.fromNanos(elapsed))}" ) } + + result.value.onShutdown( + Left(ShutdownDuringStartup(name, "DB migration check interrupted due to shutdown")) + ) } private def checkNotRunning(name: String): Either[StartupError, Unit] = @@ -252,6 +256,8 @@ class ManagedNodes[ .create(dbConfig, name) .migrateDatabase() .leftMap(FailedDatabaseMigration(name, _)) + .value + .onShutdown(Left(ShutdownDuringStartup(name, "DB migration interrupted due to shutdown"))) } private def runRepairMigration( @@ -263,6 +269,10 @@ class ManagedNodes[ .create(dbConfig, name) .repairFlywayMigration() .leftMap(FailedDatabaseRepairMigration(name, _)) + .value + .onShutdown( + Left(ShutdownDuringStartup(name, "DB repair migration interrupted due to shutdown")) + ) } } diff --git a/community/app/src/pack/examples/06-messaging/contact/daml.yaml b/community/app/src/pack/examples/06-messaging/contact/daml.yaml index 72bafd7d3..9e0f2b33f 100644 --- a/community/app/src/pack/examples/06-messaging/contact/daml.yaml +++ b/community/app/src/pack/examples/06-messaging/contact/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad +sdk-version: 2.1.0-snapshot.20220407.9685.0.7ed507cf sandbox-options: - --wall-clock-time name: contact diff --git a/community/app/src/pack/examples/06-messaging/message/daml.yaml b/community/app/src/pack/examples/06-messaging/message/daml.yaml index 74a8cb5bf..29d11593f 100644 --- a/community/app/src/pack/examples/06-messaging/message/daml.yaml +++ b/community/app/src/pack/examples/06-messaging/message/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad +sdk-version: 2.1.0-snapshot.20220407.9685.0.7ed507cf sandbox-options: - --wall-clock-time name: message diff --git a/community/common/src/main/daml/CantonExamples/daml.yaml b/community/common/src/main/daml/CantonExamples/daml.yaml index df428e2df..85eff0547 100644 --- a/community/common/src/main/daml/CantonExamples/daml.yaml +++ b/community/common/src/main/daml/CantonExamples/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad +sdk-version: 2.1.0-snapshot.20220407.9685.0.7ed507cf name: CantonExamples parties: - Alice @@ -11,7 +11,7 @@ exposed-modules: - Paint - Swap source: . -version: 2.1.0 +version: 2.2.0 dependencies: - daml-prim - daml-stdlib diff --git a/community/common/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.SdkTracerProviderConfigurer b/community/common/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.SdkTracerProviderConfigurer deleted file mode 100644 index a07fd8374..000000000 --- a/community/common/src/main/resources/META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.SdkTracerProviderConfigurer +++ /dev/null @@ -1 +0,0 @@ -com.digitalasset.canton.tracing.TracerProviderConfigurer \ No newline at end of file diff --git a/community/common/src/main/scala/com/daml/lf/CantonOnly.scala b/community/common/src/main/scala/com/daml/lf/CantonOnly.scala index 9739ccf5b..5e75eb429 100644 --- a/community/common/src/main/scala/com/daml/lf/CantonOnly.scala +++ b/community/common/src/main/scala/com/daml/lf/CantonOnly.scala @@ -3,7 +3,6 @@ package com.daml.lf -import cats.data.NonEmptyList import com.daml.lf.data.ImmArray import com.daml.lf.engine.{Engine, EngineConfig} import com.daml.lf.language.LanguageVersion @@ -16,6 +15,7 @@ import com.daml.lf.transaction.{ } import com.daml.lf.value.Value import com.daml.lf.value.Value.VersionedValue +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.protocol.{LfNode, LfNodeId, LfTransactionVersion} import java.nio.file.Path @@ -99,7 +99,7 @@ object CantonOnly { ): VersionedValue = asVersionedValue(value, transactionVersion) - def maxTransactionVersion(versions: NonEmptyList[LfTransactionVersion]): LfTransactionVersion = + def maxTransactionVersion(versions: NonEmpty[Seq[LfTransactionVersion]]): LfTransactionVersion = versions.reduceLeft[LfTransactionVersion](LfTransactionVersion.Ordering.max) } diff --git a/community/common/src/main/scala/com/daml/nonempty/NonEmptyUtil.scala b/community/common/src/main/scala/com/daml/nonempty/NonEmptyUtil.scala new file mode 100644 index 000000000..2aa30aca2 --- /dev/null +++ b/community/common/src/main/scala/com/daml/nonempty/NonEmptyUtil.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.daml.nonempty + +import com.digitalasset.canton.logging.pretty.Pretty + +import scala.collection.immutable + +/** Additional methods for [[com.daml.nonempty.NonEmpty]]. + * + * Cats instances for [[com.daml.nonempty.NonEmpty]] must be imported explicitly as + * `import `[[com.daml.nonempty.catsinstances]]`._` when necessary. + */ +object NonEmptyUtil { + def fromUnsafe[A](xs: A with immutable.Iterable[_]): NonEmpty[A] = + NonEmpty.from(xs).getOrElse(throw new NoSuchElementException) + + object instances { + + /** This instance is exposed as [[com.digitalasset.canton.logging.pretty.PrettyInstances.prettyNonempty]]. + * It lives only here because `NonEmptyColl.Instance.subst` is private to the `nonempty` package + */ + def prettyNonEmpty[A](implicit F: Pretty[A]): Pretty[NonEmpty[A]] = { + type K[T[_]] = Pretty[T[A]] + NonEmptyColl.Instance.subst[K](F) + } + } +} diff --git a/community/common/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala b/community/common/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala index 5ba8cd2bf..86d933698 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 package com.digitalasset.canton + import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure import com.digitalasset.canton.error.CantonErrorGroups.ProtoDeserializationErrorGroup import com.digitalasset.canton.error.CantonError @@ -13,28 +14,52 @@ import com.google.protobuf.InvalidProtocolBufferException sealed trait ProtoDeserializationError extends Product with Serializable { def toAdminError(implicit loggingContext: ErrorLoggingContext): CantonError = ProtoDeserializationFailure.Wrap(this) + + def inField(field: String): ProtoDeserializationError.ValueDeserializationError = + ProtoDeserializationError.ValueDeserializationError(field, message) + + def message: String } + object ProtoDeserializationError extends ProtoDeserializationErrorGroup { final case class BufferException(error: InvalidProtocolBufferException) - extends ProtoDeserializationError + extends ProtoDeserializationError { + override val message = error.getMessage + } final case class CryptoDeserializationError(error: DeserializationError) - extends ProtoDeserializationError + extends ProtoDeserializationError { + override val message = error.message + } final case class TransactionDeserialization(message: String) extends ProtoDeserializationError final case class ValueDeserializationError(field: String, message: String) extends ProtoDeserializationError - final case class StringConversionError(error: String) extends ProtoDeserializationError - final case class UnrecognizedField(error: String) extends ProtoDeserializationError - final case class UnrecognizedEnum(field: String, value: Int) extends ProtoDeserializationError - final case class FieldNotSet(field: String) extends ProtoDeserializationError - final case class NotImplementedYet(className: String) extends ProtoDeserializationError + final case class StringConversionError(message: String) extends ProtoDeserializationError + final case class UnrecognizedField(message: String) extends ProtoDeserializationError + final case class UnrecognizedEnum(field: String, value: Int) extends ProtoDeserializationError { + override val message = s"Unrecognized value `$value` in enum field `$field`" + } + final case class FieldNotSet(field: String) extends ProtoDeserializationError { + override val message = s"Field `$field` is not set" + } + final case class NotImplementedYet(className: String) extends ProtoDeserializationError { + override val message = className + } final case class TimestampConversionError(message: String) extends ProtoDeserializationError final case class TimeModelConversionError(message: String) extends ProtoDeserializationError final case class ValueConversionError(field: String, error: String) - extends ProtoDeserializationError + extends ProtoDeserializationError { + override val message = s"Unable to convert field `$field`: $error" + } final case class SubmissionIdConversionError(message: String) extends ProtoDeserializationError - final case class InvariantViolation(error: String) extends ProtoDeserializationError - final case class UnknownGrpcCodeError(error: String) extends ProtoDeserializationError - final case class OtherError(error: String) extends ProtoDeserializationError + final case class InvariantViolation(error: String) extends ProtoDeserializationError { + override def message = error + } + final case class UnknownGrpcCodeError(error: String) extends ProtoDeserializationError { + override def message = error + } + final case class OtherError(error: String) extends ProtoDeserializationError { + override def message = error + } /** Common Deserialization error code * diff --git a/community/common/src/main/scala/com/digitalasset/canton/common/domain/ServiceAgreement.scala b/community/common/src/main/scala/com/digitalasset/canton/common/domain/ServiceAgreement.scala index 231578aa2..01626f7a3 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/common/domain/ServiceAgreement.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/common/domain/ServiceAgreement.scala @@ -46,6 +46,6 @@ object ServiceAgreement { id <- ServiceAgreementId.fromProtoPrimitive(agreement.id) legalText <- String256M .create(agreement.legalText) - .leftMap(err => ProtoDeserializationError.ValueDeserializationError("legal_text", err)) + .leftMap(ProtoDeserializationError.ValueDeserializationError("legal_text", _)) } yield ServiceAgreement(id, legalText) } diff --git a/community/common/src/main/scala/com/digitalasset/canton/config/RequireTypes.scala b/community/common/src/main/scala/com/digitalasset/canton/config/RequireTypes.scala index 9118e558a..562ab7b21 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/config/RequireTypes.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/config/RequireTypes.scala @@ -102,7 +102,7 @@ object RequireTypes { object NonNegativeNumeric { def tryCreate[T](t: T)(implicit num: Numeric[T]): NonNegativeNumeric[T] = - create(t).valueOr(err => throw new IllegalArgumentException(err.error)) + create(t).valueOr(err => throw new IllegalArgumentException(err.message)) def create[T]( t: T @@ -179,7 +179,7 @@ object RequireTypes { object PositiveNumeric { def tryCreate[T](t: T)(implicit num: Numeric[T]): PositiveNumeric[T] = - create(t).valueOr(err => throw new IllegalArgumentException(err.error)) + create(t).valueOr(err => throw new IllegalArgumentException(err.message)) def create[T]( t: T diff --git a/community/common/src/main/scala/com/digitalasset/canton/data/ActionDescription.scala b/community/common/src/main/scala/com/digitalasset/canton/data/ActionDescription.scala index b13b7d636..04a140e69 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/data/ActionDescription.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/data/ActionDescription.scala @@ -89,7 +89,6 @@ object ActionDescription { _signatories, _stakeholders, _key, - _byInterface, version, ) => for { @@ -110,7 +109,6 @@ object ActionDescription { exerciseResult, _key, byKey, - _byInterface, version, ) => for { @@ -135,7 +133,6 @@ object ActionDescription { _stakeholders, _key, byKey, - _byInterface, version, ) => for { diff --git a/community/common/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala b/community/common/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala index af2b81467..dba01f8e3 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/data/GenTransactionTree.scala @@ -4,13 +4,15 @@ package com.digitalasset.canton.data import java.util.UUID -import cats.data.{EitherT, NonEmptyList, NonEmptySet} +import cats.data.{EitherT, NonEmptyList} import cats.syntax.either._ import cats.syntax.foldable._ import cats.syntax.functorFilter._ import cats.syntax.traverse._ import com.daml.ledger.api.DeduplicationPeriod import com.daml.ledger.participant.state.v2.SubmitterInfo +import com.daml.nonempty.NonEmptyUtil +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.ProtoDeserializationError.FieldNotSet import com.digitalasset.canton._ import com.digitalasset.canton.crypto._ @@ -47,13 +49,13 @@ import com.digitalasset.canton.protocol.{ v0, } import com.digitalasset.canton.sequencing.protocol.{Recipients, RecipientsTree} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.serialization.{MemoizedEvidence, ProtoConverter} import com.digitalasset.canton.util.{HasProtoV0, HasVersionedWrapper, NoCopy} import com.digitalasset.canton.version.ProtocolVersion import com.google.protobuf.ByteString import scala.annotation.tailrec -import scala.collection.immutable.SortedSet import scala.collection.mutable import scala.concurrent.{ExecutionContext, Future} @@ -260,7 +262,7 @@ object GenTransactionTree { def fromProtoV0( hashOps: HashOps, protoTransactionTree: v0.GenTransactionTree, - ): Either[String, GenTransactionTree] = + ): ParsingResult[GenTransactionTree] = for { submitterMetadata <- MerkleTree .fromProtoOption( @@ -279,23 +281,25 @@ object GenTransactionTree { ) rootViewsP <- ProtoConverter .required("GenTransactionTree.rootViews", protoTransactionTree.rootViews) - .leftMap(_.toString) rootViews <- MerkleSeq.fromProtoV0(hashOps, TransactionView.fromByteString(hashOps))( rootViewsP ) - genTransactionTree <- GenTransactionTree.create(hashOps)( - submitterMetadata, - commonMetadata, - participantMetadata, - rootViews, - ) + genTransactionTree <- GenTransactionTree + .create(hashOps)( + submitterMetadata, + commonMetadata, + participantMetadata, + rootViews, + ) + .leftMap(e => + ProtoDeserializationError.OtherError(s"Unable to create transaction tree: $e") + ) } yield genTransactionTree - def fromByteString(hashOps: HashOps, bytes: ByteString): Either[String, GenTransactionTree] = + def fromByteString(hashOps: HashOps, bytes: ByteString): ParsingResult[GenTransactionTree] = for { protoTransactionTree <- ProtoConverter .protoParser(v0.GenTransactionTree.parseFrom)(bytes) - .leftMap(_.error.toString) transactionTree <- fromProtoV0(hashOps, protoTransactionTree) } yield transactionTree } @@ -537,28 +541,28 @@ object InformeeTree { def fromProtoVersioned( hashOps: HashOps, protoInformeeTree: VersionedInformeeTree, - ): Either[String, InformeeTree] = + ): ParsingResult[InformeeTree] = protoInformeeTree.version match { case VersionedInformeeTree.Version.Empty => - Left(FieldNotSet("VersionedInformeeTree.version")).leftMap(_.toString) + Left(FieldNotSet("VersionedInformeeTree.version")) case VersionedInformeeTree.Version.V0(tree) => fromProtoV0(hashOps, tree) } def fromProtoV0( hashOps: HashOps, protoInformeeTree: v0.InformeeTree, - ): Either[String, InformeeTree] = + ): ParsingResult[InformeeTree] = for { - protoTree <- ProtoConverter.required("tree", protoInformeeTree.tree).leftMap(_.toString) + protoTree <- ProtoConverter.required("tree", protoInformeeTree.tree) tree <- GenTransactionTree.fromProtoV0(hashOps, protoTree) - informeeTree <- InformeeTree.create(tree) + informeeTree <- InformeeTree + .create(tree) + .leftMap(e => ProtoDeserializationError.OtherError(s"Unable to create informee tree: $e")) } yield informeeTree - def fromByteString(hashOps: HashOps, bytes: ByteString): Either[String, InformeeTree] = + def fromByteString(hashOps: HashOps, bytes: ByteString): ParsingResult[InformeeTree] = for { - protoInformeeTree <- ProtoConverter - .protoParser(VersionedInformeeTree.parseFrom)(bytes) - .leftMap(_.error.toString) + protoInformeeTree <- ProtoConverter.protoParser(VersionedInformeeTree.parseFrom)(bytes) informeeTree <- fromProtoVersioned(hashOps, protoInformeeTree) } yield informeeTree } @@ -638,27 +642,30 @@ object FullInformeeTree { def fromProtoVersioned( hashOps: HashOps - )(protoInformeeTree: VersionedFullInformeeTree): Either[String, FullInformeeTree] = + )(protoInformeeTree: VersionedFullInformeeTree): ParsingResult[FullInformeeTree] = protoInformeeTree.version match { case VersionedFullInformeeTree.Version.Empty => - Left(FieldNotSet("VersionedFullInformeeTree.version")).leftMap(_.toString) + Left(FieldNotSet("VersionedFullInformeeTree.version")) case VersionedFullInformeeTree.Version.V0(tree) => fromProtoV0(hashOps)(tree) } def fromProtoV0( hashOps: HashOps - )(protoInformeeTree: v0.FullInformeeTree): Either[String, FullInformeeTree] = + )(protoInformeeTree: v0.FullInformeeTree): ParsingResult[FullInformeeTree] = for { - protoTree <- ProtoConverter.required("tree", protoInformeeTree.tree).leftMap(_.toString) + protoTree <- ProtoConverter.required("tree", protoInformeeTree.tree) tree <- GenTransactionTree.fromProtoV0(hashOps, protoTree) - fullInformeeTree <- FullInformeeTree.create(tree) + fullInformeeTree <- FullInformeeTree + .create(tree) + .leftMap(e => + ProtoDeserializationError.OtherError(s"Unable to create full informee tree: $e") + ) } yield fullInformeeTree - def fromByteString(hashOps: HashOps)(bytes: ByteString): Either[String, FullInformeeTree] = + def fromByteString(hashOps: HashOps)(bytes: ByteString): ParsingResult[FullInformeeTree] = for { protoInformeeTree <- ProtoConverter .protoParser(VersionedFullInformeeTree.parseFrom)(bytes) - .leftMap(_.error.toString) informeeTree <- fromProtoVersioned(hashOps)(protoInformeeTree) } yield informeeTree } @@ -762,7 +769,7 @@ object SubmitterMetadata { def fromByteString( hashOps: HashOps - )(bytes: ByteString): Either[String, MerkleTree[SubmitterMetadata]] = + )(bytes: ByteString): ParsingResult[MerkleTree[SubmitterMetadata]] = for { protoSubmitterMetadata <- TreeSerialization.deserializeProtoNode( bytes, @@ -773,16 +780,16 @@ object SubmitterMetadata { private def fromProtoVersioned(hashOps: HashOps, metaDataP: VersionedSubmitterMetadata)( bytes: ByteString - ): Either[String, MerkleTree[SubmitterMetadata]] = + ): ParsingResult[MerkleTree[SubmitterMetadata]] = metaDataP.version match { case VersionedSubmitterMetadata.Version.Empty => - Left(FieldNotSet("VersionedSubmitterMetadata.version")).leftMap(_.toString) + Left(FieldNotSet("VersionedSubmitterMetadata.version")) case VersionedSubmitterMetadata.Version.V0(data) => fromProtoV0(hashOps, data)(bytes) } private def fromProtoV0(hashOps: HashOps, metaDataP: v0.SubmitterMetadata)( bytes: ByteString - ): Either[String, MerkleTree[SubmitterMetadata]] = { + ): ParsingResult[MerkleTree[SubmitterMetadata]] = { val v0.SubmitterMetadata( saltOP, actAsP, @@ -795,24 +802,25 @@ object SubmitterMetadata { for { submitterParticipant <- ParticipantId .fromProtoPrimitive(submitterParticipantP, "SubmitterMetadata.submitter_participant") - .leftMap(_.toString) actAs <- actAsP.traverse( - ProtoConverter.parseLfPartyId(_).leftMap(s => s"Unable to parse actAs: $s") + ProtoConverter + .parseLfPartyId(_) + .leftMap(e => ProtoDeserializationError.ValueConversionError("actAs", e.message)) ) applicationId <- ApplicationId .fromProtoPrimitive(applicationIdP) - .leftMap(s => s"Unable to parse application id: $s") + .leftMap(ProtoDeserializationError.ValueConversionError("applicationId", _)) commandId <- CommandId .fromProtoPrimitive(commandIdP) - .leftMap(s => s"Unable to parse command id: $s") + .leftMap(ProtoDeserializationError.ValueConversionError("commandId", _)) salt <- ProtoConverter .parseRequired(Salt.fromProtoV0, "salt", saltOP) - .leftMap(err => s"Unable to parse salt: $err") + .leftMap(e => ProtoDeserializationError.ValueConversionError("salt", e.message)) submissionId <- if (submissionIdP.nonEmpty) LedgerSubmissionId .fromString(submissionIdP) - .bimap(s => s"Unable to parse submission id: $s", Some(_)) + .bimap(ProtoDeserializationError.ValueConversionError("submissionId", _), Some(_)) else Right(None) dedupPeriod <- ProtoConverter .parseRequired( @@ -820,7 +828,9 @@ object SubmitterMetadata { "SubmitterMetadata.deduplication_period", dedupPeriodOP, ) - .leftMap(s => s"Unable to parse deduplication period: $s") + .leftMap(e => + ProtoDeserializationError.ValueConversionError("deduplicationPeriod", e.message) + ) submitterMetadata <- returnLeftWhenInitializationFails( new SubmitterMetadata( actAs.toSet, @@ -831,7 +841,7 @@ object SubmitterMetadata { submissionId, dedupPeriod, )(hashOps, Some(bytes)) - ) + ).leftMap(ProtoDeserializationError.OtherError(_)) } yield submitterMetadata } @@ -906,7 +916,7 @@ object CommonMetadata { def fromByteString( hashOps: HashOps - )(bytes: ByteString): Either[String, MerkleTree[CommonMetadata]] = + )(bytes: ByteString): ParsingResult[MerkleTree[CommonMetadata]] = for { protoCommonMetadata <- TreeSerialization.deserializeProtoNode(bytes, VersionedCommonMetadata) commonMetadata <- fromProtoVersioned(hashOps, protoCommonMetadata)(bytes) @@ -914,31 +924,32 @@ object CommonMetadata { private def fromProtoVersioned(hashOps: HashOps, metaDataP: VersionedCommonMetadata)( bytes: ByteString - ): Either[String, MerkleTree[CommonMetadata]] = + ): ParsingResult[MerkleTree[CommonMetadata]] = metaDataP.version match { case VersionedCommonMetadata.Version.Empty => - Left(FieldNotSet("VersionedCommonMetadata.version")).leftMap(_.toString) + Left(FieldNotSet("VersionedCommonMetadata.version")) case VersionedCommonMetadata.Version.V0(data) => fromProtoV0(hashOps, data)(bytes) } private def fromProtoV0(hashOps: HashOps, metaDataP: v0.CommonMetadata)( bytes: ByteString - ): Either[String, MerkleTree[CommonMetadata]] = + ): ParsingResult[MerkleTree[CommonMetadata]] = for { confirmationPolicy <- ConfirmationPolicy .fromProtoPrimitive(metaDataP.confirmationPolicy) - .leftMap(_.toString) - v0.CommonMetadata(saltP, confirmationPolicyP, domainIdP, uuidP, mediatorIdP) = metaDataP - domainUid <- UniqueIdentifier.fromProtoPrimitive_(domainIdP) + .leftMap(e => + ProtoDeserializationError.ValueDeserializationError("confirmationPolicy", e.show) + ) + v0.CommonMetadata(saltP, _confirmationPolicyP, domainIdP, uuidP, mediatorIdP) = metaDataP + domainUid <- UniqueIdentifier + .fromProtoPrimitive_(domainIdP) + .leftMap(ProtoDeserializationError.ValueDeserializationError("domainId", _)) mediatorId <- MediatorId .fromProtoPrimitive(mediatorIdP, "CommonMetadata.mediator_id") - .leftMap(_.toString) salt <- ProtoConverter .parseRequired(Salt.fromProtoV0, "salt", saltP) - .leftMap(err => s"Could not parse salt: $err") - uuid <- ProtoConverter.UuidConverter - .fromProtoPrimitive(uuidP) - .leftMap(e => s"Could not parse UUID ${e.error}") + .leftMap(_.inField("salt")) + uuid <- ProtoConverter.UuidConverter.fromProtoPrimitive(uuidP).leftMap(_.inField("uuid")) } yield new CommonMetadata(confirmationPolicy, DomainId(domainUid), mediatorId, salt, uuid)( hashOps, Some(bytes), @@ -1005,7 +1016,7 @@ object ParticipantMetadata { def fromByteString( hashOps: HashOps - )(bytes: ByteString): Either[String, MerkleTree[ParticipantMetadata]] = + )(bytes: ByteString): ParsingResult[MerkleTree[ParticipantMetadata]] = for { protoParticipantMetadata <- TreeSerialization.deserializeProtoNode( bytes, @@ -1016,31 +1027,33 @@ object ParticipantMetadata { private def fromProtoVersioned(hashOps: HashOps, metadataP: VersionedParticipantMetadata)( bytes: ByteString - ): Either[String, MerkleTree[ParticipantMetadata]] = + ): ParsingResult[MerkleTree[ParticipantMetadata]] = metadataP.version match { case VersionedParticipantMetadata.Version.Empty => - Left(FieldNotSet("VersionedParticipantMetadata.version")).leftMap(_.toString) + Left(FieldNotSet("VersionedParticipantMetadata.version")) case VersionedParticipantMetadata.Version.V0(data) => fromProtoV0(hashOps, data)(bytes) } private def fromProtoV0(hashOps: HashOps, metadataP: v0.ParticipantMetadata)( bytes: ByteString - ): Either[String, MerkleTree[ParticipantMetadata]] = + ): ParsingResult[MerkleTree[ParticipantMetadata]] = for { let <- ProtoConverter .parseRequired(CantonTimestamp.fromProtoPrimitive, "ledgerTime", metadataP.ledgerTime) - .leftMap(_.toString) - v0.ParticipantMetadata(saltP, ledgerTimeP, submissionTimeP, workflowIdP) = metadataP + v0.ParticipantMetadata(saltP, _ledgerTimeP, submissionTimeP, workflowIdP) = metadataP submissionTime <- ProtoConverter .parseRequired(CantonTimestamp.fromProtoPrimitive, "submissionTime", submissionTimeP) - .leftMap(_.toString) workflowId <- workflowIdP match { case "" => Right(None) - case wf => WorkflowId.fromProtoPrimitive(wf).map(Some(_)) + case wf => + WorkflowId + .fromProtoPrimitive(wf) + .map(Some(_)) + .leftMap(ProtoDeserializationError.ValueDeserializationError("workflowId", _)) } salt <- ProtoConverter .parseRequired(Salt.fromProtoV0, "salt", saltP) - .leftMap(err => s"Could not parse salt: $err") + .leftMap(_.inField("salt")) } yield new ParticipantMetadata(let, submissionTime, workflowId, salt)(hashOps, Some(bytes)) } @@ -1155,29 +1168,32 @@ object LightTransactionViewTree { def fromProtoVersioned( hashOps: HashOps - )(protoT: VersionedLightTransactionViewTree): Either[String, LightTransactionViewTree] = + )(protoT: VersionedLightTransactionViewTree): ParsingResult[LightTransactionViewTree] = protoT.version match { case VersionedLightTransactionViewTree.Version.Empty => - Left(FieldNotSet("VersionedLightTransactionViewTree.version")).leftMap(_.toString) + Left(FieldNotSet("VersionedLightTransactionViewTree.version")) case VersionedLightTransactionViewTree.Version.V0(tree) => fromProtoV0(hashOps)(tree) } def fromProtoV0( hashOps: HashOps - )(protoT: v0.LightTransactionViewTree): Either[String, LightTransactionViewTree] = + )(protoT: v0.LightTransactionViewTree): ParsingResult[LightTransactionViewTree] = for { - protoTree <- ProtoConverter.required("tree", protoT.tree).leftMap(_.toString) + protoTree <- ProtoConverter.required("tree", protoT.tree) tree <- GenTransactionTree.fromProtoV0(hashOps, protoTree) - result <- LightTransactionViewTree.create(tree) + result <- LightTransactionViewTree + .create(tree) + .leftMap(e => + ProtoDeserializationError.OtherError(s"Unable to create transaction tree: $e") + ) } yield result def fromByteString( hashOps: HashOps - )(bytes: ByteString): Either[String, LightTransactionViewTree] = + )(bytes: ByteString): ParsingResult[LightTransactionViewTree] = for { protoTransactionViewTree <- ProtoConverter .protoParser(VersionedLightTransactionViewTree.parseFrom)(bytes) - .leftMap(_.error.toString) lightTransactionViewTree <- fromProtoVersioned(hashOps)(protoTransactionViewTree) } yield lightTransactionViewTree @@ -1301,17 +1317,17 @@ object LightTransactionViewTree { * By convention, the order is: the view's informees are at the head of the list, then the parent's views informees, * then the grandparent's, etc. */ -case class Witnesses(unwrap: List[Set[Informee]]) { +case class Witnesses(unwrap: Seq[Set[Informee]]) { import Witnesses._ - def prepend(informees: Set[Informee]) = Witnesses(informees :: unwrap) + def prepend(informees: Set[Informee]) = Witnesses(informees +: unwrap) /** Derive a recipient tree that mirrors the given hierarchy of witnesses. */ def toRecipients( topology: PartyTopologySnapshotClient )(implicit ec: ExecutionContext): EitherT[Future, InvalidWitnesses, Recipients] = for { - recipientsList <- unwrap.foldLeftM(List.empty[RecipientsTree]) { (children, informees) => + recipientsList <- unwrap.foldLeftM(Seq.empty[RecipientsTree]) { (children, informees) => for { informeeParticipants <- topology .activeParticipantsOfAll(informees.map(_.party).toList) @@ -1319,12 +1335,14 @@ case class Witnesses(unwrap: List[Set[Informee]]) { InvalidWitnesses(s"Found no active participants for informees: $missing") ) informeeParticipantSet <- EitherT.fromOption[Future]( - NonEmptySet.fromSet[Member](SortedSet(informeeParticipants.toList: _*)), + NonEmpty.from(informeeParticipants.toSet[Member]), InvalidWitnesses(s"Empty set of witnesses given"), ) - } yield List(RecipientsTree(informeeParticipantSet, children)) + } yield Seq(RecipientsTree(informeeParticipantSet, children)) } - recipients = Recipients(NonEmptyList.fromListUnsafe(recipientsList)) + // TODO(error handling) Why is it safe to assume that the recipient list is non-empty? + // It will be empty if `unwrap` is empty. + recipients = Recipients(NonEmptyUtil.fromUnsafe(recipientsList)) } yield recipients def flatten: Set[Informee] = unwrap.foldLeft(Set.empty[Informee])(_ union _) @@ -1332,7 +1350,7 @@ case class Witnesses(unwrap: List[Set[Informee]]) { } case object Witnesses { - lazy val empty: Witnesses = Witnesses(List.empty) + lazy val empty: Witnesses = Witnesses(Seq.empty) case class InvalidWitnesses(message: String) extends PrettyPrinting { override def pretty: Pretty[InvalidWitnesses] = prettyOfClass(unnamedParam(_.message.unquoted)) diff --git a/community/common/src/main/scala/com/digitalasset/canton/data/GenTransferViewTree.scala b/community/common/src/main/scala/com/digitalasset/canton/data/GenTransferViewTree.scala index d3b681dc5..0d6f3d42c 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/data/GenTransferViewTree.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/data/GenTransferViewTree.scala @@ -88,10 +88,10 @@ object GenTransferViewTree { val v0.TransferViewTree(commonDataP, viewP) = treeP for { commonData <- MerkleTree - .fromProtoOption(commonDataP, deserializeCommonData(_).leftMap(_.toString)) + .fromProtoOption(commonDataP, deserializeCommonData(_)) .leftMap(error => OtherError(s"transferCommonData: $error")) view <- MerkleTree - .fromProtoOption(viewP, deserializeView(_).leftMap(_.toString)) + .fromProtoOption(viewP, deserializeView(_)) .leftMap(error => OtherError(s"transferView: $error")) } yield createTree(commonData, view) } diff --git a/community/common/src/main/scala/com/digitalasset/canton/data/Informee.scala b/community/common/src/main/scala/com/digitalasset/canton/data/Informee.scala index 8316b2f7f..4231d78dc 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/data/Informee.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/data/Informee.scala @@ -4,10 +4,11 @@ package com.digitalasset.canton.data import cats.syntax.either._ -import com.digitalasset.canton.LfPartyId +import com.digitalasset.canton.{LfPartyId, ProtoDeserializationError} import com.digitalasset.canton.data.Informee.InvalidInformee import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.v0 +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.util.HasProtoV0 /** A party that must be informed about the view. @@ -45,11 +46,17 @@ object Informee { def create(party: LfPartyId, weight: Int): Either[String, Informee] = Either.catchOnly[InvalidInformee](tryCreate(party, weight)).leftMap(_.message) - def fromProtoV0(informeeP: v0.Informee): Either[String, Informee] = { + def fromProtoV0(informeeP: v0.Informee): ParsingResult[Informee] = { val v0.Informee(partyString, weight) = informeeP for { - party <- LfPartyId.fromString(partyString) - informee <- Informee.create(party, weight) + party <- LfPartyId + .fromString(partyString) + .leftMap(ProtoDeserializationError.ValueDeserializationError("party", _)) + informee <- Informee + .create(party, weight) + .leftMap(err => + ProtoDeserializationError.OtherError(s"Unable to deserialize informee data: $err") + ) } yield informee } @@ -61,13 +68,13 @@ object Informee { * @param weight determines the impact of the party on whether the view is approved. * @throws com.digitalasset.canton.data.Informee$.InvalidInformee if `weight` is not positive */ -case class ConfirmingParty(party: LfPartyId, weight: Int) extends Informee { +final case class ConfirmingParty(party: LfPartyId, weight: Int) extends Informee { if (weight <= 0) throw InvalidInformee(s"Unable to create a confirming party with non-positive weight $weight.") } /** An informee that is not a confirming party */ -case class PlainInformee(party: LfPartyId) extends Informee { +final case class PlainInformee(party: LfPartyId) extends Informee { override val weight = 0 } diff --git a/community/common/src/main/scala/com/digitalasset/canton/data/MerkleSeq.scala b/community/common/src/main/scala/com/digitalasset/canton/data/MerkleSeq.scala index 4a8e6887d..e11e3ff9e 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/data/MerkleSeq.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/data/MerkleSeq.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.data import cats.syntax.either._ import cats.syntax.traverse._ +import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.crypto.HashOps import com.digitalasset.canton.data.MerkleSeq.MerkleSeqElement import com.digitalasset.canton.data.MerkleTree.{ @@ -17,7 +18,8 @@ import com.digitalasset.canton.data.ViewPosition.{MerklePathElement, MerkleSeqIn import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.{RootHash, v0} import com.digitalasset.canton.serialization.ProtoConverter -import com.digitalasset.canton.util.{HasVersionedToByteString, HasProtoV0} +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.util.{HasProtoV0, HasVersionedToByteString} import com.digitalasset.canton.version.ProtocolVersion import com.google.protobuf.ByteString @@ -223,36 +225,31 @@ object MerkleSeq { private[MerkleSeq] def fromByteString[M <: MerkleTree[_] with HasVersionedToByteString]( hashOps: HashOps, - dataFromByteString: ByteString => Either[String, MerkleTree[ - M with HasVersionedToByteString - ]], - )(bytes: ByteString): Either[String, MerkleSeqElement[M]] = { + dataFromByteString: ByteString => ParsingResult[MerkleTree[M with HasVersionedToByteString]], + )(bytes: ByteString): ParsingResult[MerkleSeqElement[M]] = { for { merkleSeqElementP <- ProtoConverter .protoParser(v0.MerkleSeqElement.parseFrom)(bytes) - .leftMap(_.toString) merkleSeqElement <- fromProtoV0(hashOps, dataFromByteString)(merkleSeqElementP) } yield merkleSeqElement } private[MerkleSeq] def fromProtoV0[M <: MerkleTree[_] with HasVersionedToByteString]( hashOps: HashOps, - dataFromByteString: ByteString => Either[String, MerkleTree[ - M with HasVersionedToByteString - ]], - )(merkleSeqElementP: v0.MerkleSeqElement): Either[String, MerkleSeqElement[M]] = { + dataFromByteString: ByteString => ParsingResult[MerkleTree[M with HasVersionedToByteString]], + )(merkleSeqElementP: v0.MerkleSeqElement): ParsingResult[MerkleSeqElement[M]] = { val v0.MerkleSeqElement(maybeFirstP, maybeSecondP, maybeDataP) = merkleSeqElementP def branchChildFromMaybeProtoBlindableNode( maybeNodeP: Option[v0.BlindableNode] - ): Either[String, Option[MerkleTree[MerkleSeqElement[M]]]] = + ): ParsingResult[Option[MerkleTree[MerkleSeqElement[M]]]] = maybeNodeP.traverse(nodeP => MerkleTree.fromProtoOption(Some(nodeP), fromByteString(hashOps, dataFromByteString)) ) def singletonDataFromMaybeProtoBlindableNode( maybeDataP: Option[v0.BlindableNode] - ): Either[String, Option[MerkleTree[M with HasVersionedToByteString]]] = + ): ParsingResult[Option[MerkleTree[M with HasVersionedToByteString]]] = maybeDataP.traverse(dataP => MerkleTree.fromProtoOption(Some(dataP), dataFromByteString)) for { @@ -264,18 +261,24 @@ object MerkleSeq { case (Some(first), Some(second), None) => Right(Branch(first, second)(hashOps)) case (None, None, Some(data)) => Right(Singleton[M](data)(hashOps)) case (None, None, None) => - Left(s"Unable to create MerkleSeqElement, as all fields are undefined.") + ProtoDeserializationError + .OtherError(s"Unable to create MerkleSeqElement, as all fields are undefined.") + .asLeft case (Some(_), Some(_), Some(_)) => - Left( - s"Unable to create MerkleSeqElement, as both the fields for a Branch and a Singleton are defined." - ) + ProtoDeserializationError + .OtherError( + s"Unable to create MerkleSeqElement, as both the fields for a Branch and a Singleton are defined." + ) + .asLeft case (_, _, _) => // maybeFirst.isDefined != maybeSecond.isDefined def mkState: Option[_] => String = _.fold("undefined")(_ => "defined") - Left( - s"Unable to create MerkleSeqElement, as first is ${mkState(maybeFirst)} and second is ${mkState(maybeSecond)}." - ) + ProtoDeserializationError + .OtherError( + s"Unable to create MerkleSeqElement, as first is ${mkState(maybeFirst)} and second is ${mkState(maybeSecond)}." + ) + .asLeft } } yield merkleSeqElement @@ -284,8 +287,8 @@ object MerkleSeq { def fromProtoV0[M <: MerkleTree[_] with HasVersionedToByteString]( hashOps: HashOps, - dataFromByteString: ByteString => Either[String, MerkleTree[M with HasVersionedToByteString]], - )(merkleSeqP: v0.MerkleSeq): Either[String, MerkleSeq[M]] = { + dataFromByteString: ByteString => ParsingResult[MerkleTree[M with HasVersionedToByteString]], + )(merkleSeqP: v0.MerkleSeq): ParsingResult[MerkleSeq[M]] = { val v0.MerkleSeq(maybeRootP) = merkleSeqP for { rootOrEmpty <- maybeRootP.traverse(_ => diff --git a/community/common/src/main/scala/com/digitalasset/canton/data/MerkleTree.scala b/community/common/src/main/scala/com/digitalasset/canton/data/MerkleTree.scala index 0202f07f4..d14b7394e 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/data/MerkleTree.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/data/MerkleTree.scala @@ -4,12 +4,14 @@ package com.digitalasset.canton.data import cats.implicits._ +import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.crypto._ import com.digitalasset.canton.data.MerkleSeq.MerkleSeqElement import com.digitalasset.canton.data.MerkleTree._ import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.{RootHash, v0} import com.digitalasset.canton.serialization.HasCryptographicEvidence +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.util.HasVersionedToByteString import com.digitalasset.canton.version.ProtocolVersion import com.google.protobuf.ByteString @@ -233,19 +235,20 @@ object MerkleTree { /** Deserialize a bindable protobuf node to a blinded or an unblinded tree node depending on the contents of protoNode */ def fromProtoOption[NodeType]( protoNode: Option[v0.BlindableNode], - f: ByteString => Either[String, MerkleTree[NodeType]], - ): Either[String, MerkleTree[NodeType]] = { + f: ByteString => ParsingResult[MerkleTree[NodeType]], + ): ParsingResult[MerkleTree[NodeType]] = { import v0.BlindableNode.{BlindedOrNot => BON} protoNode.map(_.blindedOrNot) match { case Some(BON.BlindedHash(hashBytes)) => RootHash .fromProtoPrimitive(hashBytes) .bimap( - e => s"Failed to deserialize root hash: $e", + e => ProtoDeserializationError.OtherError(s"Failed to deserialize root hash: $e"), hash => BlindedNode.apply[NodeType](hash), ) case Some(BON.Unblinded(unblindedNode)) => f(unblindedNode) - case Some(BON.Empty) | None => Left(s"Missing blindedOrNot specification") + case Some(BON.Empty) | None => + Left(ProtoDeserializationError.OtherError(s"Missing blindedOrNot specification")) } } } diff --git a/community/common/src/main/scala/com/digitalasset/canton/data/TransactionView.scala b/community/common/src/main/scala/com/digitalasset/canton/data/TransactionView.scala index 0719ae7de..133fcc8ad 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/data/TransactionView.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/data/TransactionView.scala @@ -31,6 +31,7 @@ import com.digitalasset.canton.serialization.{ SerializationCheckFailed, } import com.digitalasset.canton.protocol.RollbackContext +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.util.{ HasProtoV0, HasVersionedToByteString, @@ -47,6 +48,7 @@ import com.digitalasset.canton.{ LfFetchCommand, LfLookupByKeyCommand, LfPartyId, + ProtoDeserializationError, checked, } import com.google.common.annotations.VisibleForTesting @@ -184,7 +186,7 @@ object TransactionView { ) .leftMap(_.message) - def fromByteString(hashOps: HashOps)(bytes: ByteString): Either[String, TransactionView] = + def fromByteString(hashOps: HashOps)(bytes: ByteString): ParsingResult[TransactionView] = for { protoView <- TreeSerialization.deserializeProtoNode(bytes, v0.ViewNode) view <- fromProtoV0(hashOps, protoView) @@ -193,7 +195,7 @@ object TransactionView { private def fromProtoV0( hashOps: HashOps, protoView: v0.ViewNode, - ): Either[String, TransactionView] = { + ): ParsingResult[TransactionView] = { for { commonData <- MerkleTree.fromProtoOption( protoView.viewCommonData, @@ -204,13 +206,15 @@ object TransactionView { ViewParticipantData.fromByteString(hashOps), ) subViews <- deserializeViews(hashOps)(protoView.subviews) - view <- create(hashOps)(commonData, participantData, subViews) + view <- create(hashOps)(commonData, participantData, subViews).leftMap(e => + ProtoDeserializationError.OtherError(s"Unable to create transaction views: $e") + ) } yield view } private[data] def deserializeViews( hashOps: HashOps - )(protoViews: Seq[v0.BlindableNode]): Either[String, Seq[MerkleTree[TransactionView]]] = + )(protoViews: Seq[v0.BlindableNode]): ParsingResult[Seq[MerkleTree[TransactionView]]] = protoViews.traverse(protoView => MerkleTree.fromProtoOption(Some(protoView), fromByteString(hashOps)) ) @@ -384,22 +388,22 @@ object ViewCommonData { // valid serialization of "viewCommonDataP". private def fromProtoVersioned( hashOps: HashOps - )(bytes: ByteString, viewCommonDataP: VersionedViewCommonData): Either[String, ViewCommonData] = + )(bytes: ByteString, viewCommonDataP: VersionedViewCommonData): ParsingResult[ViewCommonData] = viewCommonDataP.version match { case VersionedViewCommonData.Version.Empty => - Left(FieldNotSet("VersionedViewCommonData.version")).leftMap(_.toString) + Left(FieldNotSet("VersionedViewCommonData.version")) case VersionedViewCommonData.Version.V0(data) => fromProtoV0(hashOps)(bytes, data) } private def fromProtoV0( hashOps: HashOps - )(bytes: ByteString, viewCommonDataP: v0.ViewCommonData): Either[String, ViewCommonData] = + )(bytes: ByteString, viewCommonDataP: v0.ViewCommonData): ParsingResult[ViewCommonData] = for { informees <- viewCommonDataP.informees.traverse(Informee.fromProtoV0) salt <- ProtoConverter .parseRequired(Salt.fromProtoV0, "salt", viewCommonDataP.salt) - .leftMap(err => s"Could not parse salt: $err") + .leftMap(_.inField("salt")) // The constructor of ViewCommandData throws an exception if an object invariant would be violated, // which must not escape this method. Therefore we translate the exception to Left(...). @@ -407,12 +411,12 @@ object ViewCommonData { // indicate a bug in the code and can therefore not be recovered from. viewCommonData <- returnLeftWhenInitializationFails( new ViewCommonData(informees.toSet, viewCommonDataP.threshold, salt)(hashOps, Some(bytes)) - ) + ).leftMap(ProtoDeserializationError.OtherError(_)) } yield viewCommonData // Unlike "create" and "tryCreate", this method initializes the "deserializedFrom" field with the given byte string. // This is to ensure that subsequent calls of "toByteString" yield the same byte string. - def fromByteString(hashOps: HashOps)(bytes: ByteString): Either[String, ViewCommonData] = + def fromByteString(hashOps: HashOps)(bytes: ByteString): ParsingResult[ViewCommonData] = for { viewCommonDataP <- TreeSerialization.deserializeProtoNode(bytes, VersionedViewCommonData) viewCommonData <- ViewCommonData.fromProtoVersioned(hashOps)(bytes, viewCommonDataP) @@ -743,9 +747,9 @@ object ViewParticipantData { private def fromProtoV0(hashOps: HashOps, dataP: v0.ViewParticipantData)( bytes: ByteString - ): Either[String, MerkleTree[ViewParticipantData]] = + ): ParsingResult[MerkleTree[ViewParticipantData]] = for { - coreInputsSeq <- dataP.coreInputs.traverse(InputContract.fromProtoV0).leftMap(_.toString) + coreInputsSeq <- dataP.coreInputs.traverse(InputContract.fromProtoV0) v0.ViewParticipantData( saltP, _, @@ -756,24 +760,24 @@ object ViewParticipantData { rbContextP, ) = dataP coreInputs = coreInputsSeq.map(x => x.contractId -> x).toMap - createdCore <- createdCoreP.traverse(CreatedContract.fromProtoV0).leftMap(_.toString) + createdCore <- createdCoreP.traverse(CreatedContract.fromProtoV0) archivedFromSubviews <- archivedFromSubviewsP .traverse(LfContractId.fromProtoPrimitive) - .leftMap(_.toString) resolvedKeys <- resolvedKeysP.traverse( - ResolvedKey.fromProtoV0(_).bimap(_.toString, rk => rk.key -> rk.resolution) + ResolvedKey.fromProtoV0(_).map(rk => rk.key -> rk.resolution) ) resolvedKeysMap = resolvedKeys.toMap actionDescription <- ProtoConverter .required("action_description", actionDescriptionP) .flatMap(ActionDescription.fromProtoV0) - .leftMap(_.toString) + salt <- ProtoConverter .parseRequired(Salt.fromProtoV0, "salt", saltP) - .leftMap(err => s"Could not parse salt: $err") + .leftMap(_.inField("salt")) + rollbackContext <- RollbackContext .fromProtoV0(rbContextP) - .leftMap(err => s"Could not parse rollback context: $err") + .leftMap(_.inField("rollbackContext")) viewParticipantData <- returnLeftWhenInitializationFails( new ViewParticipantData( @@ -785,21 +789,21 @@ object ViewParticipantData { rollbackContext = rollbackContext, salt = salt, )(hashOps, Some(bytes)) - ) + ).leftMap(ProtoDeserializationError.OtherError(_)) } yield viewParticipantData private def fromProtoVersioned(hashOps: HashOps, dataP: VersionedViewParticipantData)( bytes: ByteString - ): Either[String, MerkleTree[ViewParticipantData]] = + ): ParsingResult[MerkleTree[ViewParticipantData]] = dataP.version match { case VersionedViewParticipantData.Version.Empty => - Left(FieldNotSet("VersionedViewParticipantData.version")).leftMap(_.toString) + Left(FieldNotSet("VersionedViewParticipantData.version")) case VersionedViewParticipantData.Version.V0(data) => fromProtoV0(hashOps, data)(bytes) } def fromByteString( hashOps: HashOps - )(bytes: ByteString): Either[String, MerkleTree[ViewParticipantData]] = + )(bytes: ByteString): ParsingResult[MerkleTree[ViewParticipantData]] = for { protoViewParticipantData <- TreeSerialization.deserializeProtoNode( bytes, diff --git a/community/common/src/main/scala/com/digitalasset/canton/data/TreeSerialization.scala b/community/common/src/main/scala/com/digitalasset/canton/data/TreeSerialization.scala index 5ba1a8cfc..83e32c5fc 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/data/TreeSerialization.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/data/TreeSerialization.scala @@ -3,8 +3,8 @@ package com.digitalasset.canton.data -import cats.implicits._ import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.google.protobuf.ByteString import scalapb.GeneratedMessageCompanion @@ -16,6 +16,5 @@ object TreeSerialization { def deserializeProtoNode[ProtoType <: scalapb.GeneratedMessage]( bytes: ByteString, protoBuilder: GeneratedMessageCompanion[ProtoType], - ): Either[String, ProtoType] = - ProtoConverter.protoParser(protoBuilder.parseFrom)(bytes).leftMap(_.error.getMessage) + ): ParsingResult[ProtoType] = ProtoConverter.protoParser(protoBuilder.parseFrom)(bytes) } diff --git a/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala b/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala index 62d0052a7..e96dd931d 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/environment/CantonNodeBootstrap.scala @@ -139,7 +139,7 @@ abstract class CantonNodeBootstrapBase[ val timeouts: ProcessingTimeout = parameterConfig.processingTimeouts // TODO(soren): Move to a error-safe node initialization approach - protected val storage = timeouts.unbounded.await("create storage factory")( + protected val storage = storageFactory .tryCreate( connectionPoolForParticipant, @@ -148,7 +148,6 @@ abstract class CantonNodeBootstrapBase[ parameterConfig.processingTimeouts, loggerFactory, ) - ) protected val initializationStore = InitializationStore(storage, timeouts, loggerFactory) protected val indexedStringStore = IndexedStringStore.create( diff --git a/community/common/src/main/scala/com/digitalasset/canton/ledger/api/client/LedgerConnection.scala b/community/common/src/main/scala/com/digitalasset/canton/ledger/api/client/LedgerConnection.scala index 8daae4781..3e2a8b8f6 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/ledger/api/client/LedgerConnection.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/ledger/api/client/LedgerConnection.scala @@ -43,7 +43,7 @@ import com.digitalasset.canton.util.AkkaUtil import com.google.rpc.status.Status import io.grpc.StatusRuntimeException import io.opentelemetry.api.trace.Tracer -import io.opentelemetry.instrumentation.grpc.v1_5.GrpcTracing +import io.opentelemetry.instrumentation.grpc.v1_6.GrpcTracing import org.slf4j.event.Level import scalaz.syntax.tag._ @@ -145,7 +145,7 @@ object LedgerConnection { .builderFor(config.address, config.port.unwrap) .executor(ec) .intercept( - GrpcTracing.newBuilder(tracerProvider.openTelemetry).build().newClientInterceptor() + GrpcTracing.builder(tracerProvider.openTelemetry).build().newClientInterceptor() ) LedgerClient.fromBuilder(builder, clientConfig) } diff --git a/community/common/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala b/community/common/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala index 72c4e4023..87f5e3e4b 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala @@ -53,6 +53,8 @@ object FutureUnlessShutdown { override def apply[A](future: Future[A]): FutureUnlessShutdown[A] = outcomeF(future) } + def liftK: UnlessShutdown ~> FutureUnlessShutdown = FunctionK.lift(lift) + /** Analog to [[scala.concurrent.Future]]`.failed` */ def failed[A](ex: Throwable): FutureUnlessShutdown[A] = FutureUnlessShutdown(Future.failed(ex)) diff --git a/community/common/src/main/scala/com/digitalasset/canton/lifecycle/Lifecycle.scala b/community/common/src/main/scala/com/digitalasset/canton/lifecycle/Lifecycle.scala index c953398c6..bc566bafa 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/lifecycle/Lifecycle.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/lifecycle/Lifecycle.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.lifecycle import akka.actor.ActorSystem import akka.stream.Materializer -import cats.data.NonEmptyList +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} import com.digitalasset.canton.tracing.{NoTracing, TraceContext} @@ -62,7 +62,7 @@ object Lifecycle extends NoTracing { acc ++ stopSingle(instance).toList } - NonEmptyList.fromFoldable(failedInstances).foreach(i => throw new ShutdownFailedException(i)) + NonEmpty.from(failedInstances).foreach { i => throw new ShutdownFailedException(i) } } def toCloseableOption[A <: AutoCloseable](maybeClosable: Option[A]): AutoCloseable = diff --git a/community/common/src/main/scala/com/digitalasset/canton/lifecycle/ShutdownFailedException.scala b/community/common/src/main/scala/com/digitalasset/canton/lifecycle/ShutdownFailedException.scala index 436206a50..a3fd8b553 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/lifecycle/ShutdownFailedException.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/lifecycle/ShutdownFailedException.scala @@ -3,10 +3,8 @@ package com.digitalasset.canton.lifecycle -import cats.data.NonEmptyList +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.util.ShowUtil._ -class ShutdownFailedException(instances: NonEmptyList[String]) - extends RuntimeException( - s"Unable to close ${instances.map(_.singleQuoted).toList.mkString(", ")}." - ) {} +class ShutdownFailedException(instances: NonEmpty[Seq[String]]) + extends RuntimeException(show"Unable to close ${instances.map(_.singleQuoted)}.") diff --git a/community/common/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala b/community/common/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala index 06d394d69..26e3e6cdf 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/logging/pretty/PrettyInstances.scala @@ -19,6 +19,8 @@ import com.daml.ledger.participant.state.v2.Update.CommandRejected.RejectionReas import com.daml.lf.data.Ref import com.daml.lf.data.Ref.{DottedName, PackageId, QualifiedName} import com.daml.lf.value.Value +import com.daml.nonempty.NonEmptyUtil +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.topology.UniqueIdentifier import com.digitalasset.canton.protocol.{ ContractId, @@ -63,6 +65,9 @@ trait PrettyInstances { implicit def prettySeq[T: Pretty]: Pretty[Seq[T]] = treeOfIterable("Seq", _) + implicit def prettyNonempty[T: Pretty]: Pretty[NonEmpty[T]] = + NonEmptyUtil.instances.prettyNonEmpty + implicit def prettyArray[T: Pretty]: Pretty[Array[T]] = treeOfIterable("Array", _) implicit def prettySet[T: Pretty]: Pretty[Set[T]] = treeOfIterable("Set", _) diff --git a/community/common/src/main/scala/com/digitalasset/canton/package.scala b/community/common/src/main/scala/com/digitalasset/canton/package.scala index 18d97d7e5..753f5a34c 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/package.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/package.scala @@ -65,23 +65,23 @@ package object canton { type LfCommand = ReplayCommand val LfCommand: ReplayCommand.type = ReplayCommand - type LfCreateCommand = LfCommand.CreateByTemplate - val LfCreateCommand: LfCommand.CreateByTemplate.type = LfCommand.CreateByTemplate + type LfCreateCommand = LfCommand.Create + val LfCreateCommand: LfCommand.Create.type = LfCommand.Create - type LfExerciseCommand = LfCommand.LenientExercise - val LfExerciseCommand: LfCommand.LenientExercise.type = LfCommand.LenientExercise + type LfExerciseCommand = LfCommand.Exercise + val LfExerciseCommand: LfCommand.Exercise.type = LfCommand.Exercise - type LfExerciseByKeyCommand = LfCommand.ExerciseTemplateByKey - val LfExerciseByKeyCommand: LfCommand.ExerciseTemplateByKey.type = LfCommand.ExerciseTemplateByKey + type LfExerciseByKeyCommand = LfCommand.ExerciseByKey + val LfExerciseByKeyCommand: LfCommand.ExerciseByKey.type = LfCommand.ExerciseByKey - type LfFetchCommand = LfCommand.FetchTemplate - val LfFetchCommand: LfCommand.FetchTemplate.type = LfCommand.FetchTemplate + type LfFetchCommand = LfCommand.Fetch + val LfFetchCommand: LfCommand.Fetch.type = LfCommand.Fetch - type LfFetchByKeyCommand = LfCommand.FetchTemplateByKey - val LfFetchByKeyCommand: LfCommand.FetchTemplateByKey.type = LfCommand.FetchTemplateByKey + type LfFetchByKeyCommand = LfCommand.FetchByKey + val LfFetchByKeyCommand: LfCommand.FetchByKey.type = LfCommand.FetchByKey - type LfLookupByKeyCommand = LfCommand.LookupTemplateByKey - val LfLookupByKeyCommand: LfCommand.LookupTemplateByKey.type = LfCommand.LookupTemplateByKey + type LfLookupByKeyCommand = LfCommand.LookupByKey + val LfLookupByKeyCommand: LfCommand.LookupByKey.type = LfCommand.LookupByKey /** The counter assigned by the sequencer to messages sent to the participant. * The counter is specific to every participant. diff --git a/community/common/src/main/scala/com/digitalasset/canton/protocol/WellFormedTransaction.scala b/community/common/src/main/scala/com/digitalasset/canton/protocol/WellFormedTransaction.scala index 2fdb87eba..4fd999689 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/protocol/WellFormedTransaction.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/protocol/WellFormedTransaction.scala @@ -3,12 +3,13 @@ package com.digitalasset.canton.protocol -import cats.data.{NonEmptyChain, NonEmptyList, Validated} +import cats.data.{NonEmptyChain, Validated} import cats.syntax.either._ import cats.syntax.foldable._ import cats.syntax.functor._ import com.daml.lf.CantonOnly import com.daml.lf.data.ImmArray +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.checked import com.digitalasset.canton.data.ActionDescription import com.digitalasset.canton.topology.PartyId @@ -283,11 +284,11 @@ object WellFormedTransaction { addReferencesByLfValue(nodeId, argRefs.to(LazyList)) ) } { - case (nodeId: LfNodeId, LfNodeFetch(coid, _, _, _, _, _, _, _, _), _) => + case (nodeId: LfNodeId, LfNodeFetch(coid, _, _, _, _, _, _, _), _) => addReference(nodeId)(coid) case (nodeId, LfNodeLookupByKey(_, _, result, _), _) => result.traverse_(addReference(nodeId)) - case (nodeId, LfNodeCreate(cid, _, arg, _, _, _, _, _, _), _) => + case (nodeId, LfNodeCreate(cid, _, arg, _, _, _, _, _), _) => val argRefs = LfTransactionUtil.referencedContractIds(arg) for { _ <- addReferencesByLfValue(nodeId, argRefs.to(LazyList)) @@ -551,8 +552,8 @@ object WellFormedTransaction { * any normalization as the daml indexer/ReadService-consumer does not require rollback-normalized lf-transactions. */ def merge( - transactionsWithRollbackScope: NonEmptyList[ - WithRollbackScope[WellFormedTransaction[WithSuffixes]] + transactionsWithRollbackScope: NonEmpty[ + Seq[WithRollbackScope[WellFormedTransaction[WithSuffixes]]] ] ): Either[String, WellFormedTransaction[WithSuffixesAndMerged]] = { val mergedNodes = HashMap.newBuilder[LfNodeId, LfNode] @@ -565,17 +566,16 @@ object WellFormedTransaction { val transactions = transactionsWithRollbackScope.map(_.unwrap) val ledgerTimes = transactions.map(_.metadata.ledgerTime).distinct val submissionTimes = transactions.map(_.metadata.submissionTime).distinct - val versions = - transactions.map(_.tx.version).distinct(LfTransactionUtil.orderTransactionVersion) + val versions = transactions.map(_.tx.version).distinct for { ledgerTime <- Either.cond( ledgerTimes.size == 1, - ledgerTimes.head, + ledgerTimes.head1, s"Different ledger times: ${ledgerTimes.toList.mkString(", ")}", ) submissionTime <- Either.cond( submissionTimes.size == 1, - submissionTimes.head, + submissionTimes.head1, s"Different submission times: ${submissionTimes.toList.mkString(", ")}", ) version = CantonOnly.maxTransactionVersion(versions) diff --git a/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/ConfirmationRequest.scala b/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/ConfirmationRequest.scala index 538263a83..271d4525c 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/ConfirmationRequest.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/ConfirmationRequest.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.protocol.messages -import cats.data.{NonEmptyList, NonEmptySet} +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.ViewType import com.digitalasset.canton.topology.MediatorId import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} @@ -30,12 +30,12 @@ case class ConfirmationRequest( ) val participants = viewEnvelopes.flatMap(_.protocolMessage.randomSeed.keySet).distinct - val rootHashMessages = NonEmptyList - .fromList(participants.toList) - .map { participantsNel => + val rootHashMessages = NonEmpty + .from(participants) + .map { participantsNE => OpenEnvelope( rootHashMessage, - Recipients.groups(participantsNel.map(NonEmptySet.of(_, mediator))), + Recipients.groups(participantsNE.map(NonEmpty.mk(Set, _, mediator))), ) } .toList diff --git a/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/InformeeMessage.scala b/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/InformeeMessage.scala index 06f19d0ba..2b5f9e633 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/InformeeMessage.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/InformeeMessage.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.protocol.messages import java.util.UUID -import cats.syntax.either._ import com.digitalasset.canton.crypto.HashOps import com.digitalasset.canton.data.{FullInformeeTree, Informee, ViewType} import com.digitalasset.canton.protocol.messages.ProtocolMessage.ProtocolMessageContentCast @@ -14,7 +13,7 @@ import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.topology.{DomainId, MediatorId} import com.digitalasset.canton.util.HasProtoV0 import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{LfPartyId, ProtoDeserializationError} +import com.digitalasset.canton.LfPartyId import com.google.protobuf.ByteString /** The informee message to be sent to the mediator. @@ -100,9 +99,7 @@ object InformeeMessage { "InformeeMessage.informeeTree", maybeFullInformeeTreeP, ) - fullInformeeTree <- FullInformeeTree - .fromProtoV0(hashOps)(fullInformeeTreeP) - .leftMap(err => ProtoDeserializationError.TransactionDeserialization(err)) + fullInformeeTree <- FullInformeeTree.fromProtoV0(hashOps)(fullInformeeTreeP) } yield new InformeeMessage(fullInformeeTree) } diff --git a/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionResultMessage.scala b/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionResultMessage.scala index b64d978c1..236e3570b 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionResultMessage.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionResultMessage.scala @@ -4,8 +4,7 @@ package com.digitalasset.canton.protocol.messages import cats.syntax.bifunctor._ -import cats.syntax.either._ -import com.digitalasset.canton.ProtoDeserializationError.{FieldNotSet, TransactionDeserialization} +import com.digitalasset.canton.ProtoDeserializationError.FieldNotSet import com.digitalasset.canton.crypto.{HashOps, HashPurpose} import com.digitalasset.canton.data.ViewType.TransactionViewType import com.digitalasset.canton.data.{CantonTimestamp, InformeeTree} @@ -120,9 +119,7 @@ object TransactionResultMessage { protoNotificationTree <- ProtoConverter .required("notification_tree", protoResultMessage.notificationTree) .leftWiden[ProtoDeserializationError] - notificationTree <- InformeeTree - .fromProtoV0(hashOps, protoNotificationTree) - .leftMap(err => TransactionDeserialization(err)) + notificationTree <- InformeeTree.fromProtoV0(hashOps, protoNotificationTree) } yield new TransactionResultMessage(requestId, transactionResult, notificationTree)( Some(bytes) ) diff --git a/community/common/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala b/community/common/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala index 72975aa99..f7da4e66c 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/resource/DbMigrations.scala @@ -3,15 +3,17 @@ package com.digitalasset.canton.resource +import cats.data.EitherT import cats.syntax.either._ import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout} -import com.digitalasset.canton.lifecycle.CloseContext +import com.digitalasset.canton.lifecycle.{CloseContext, UnlessShutdown} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.resource.DbStorage.RetryConfig import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ResourceUtil import com.digitalasset.canton.util.retry.RetryEither +import com.digitalasset.canton.util.ShowUtil._ import io.functionmeta.functionFullName import org.flywaydb.core.Flyway import org.flywaydb.core.api.FlywayException @@ -19,9 +21,9 @@ import slick.jdbc.JdbcBackend.Database import slick.jdbc.hikaricp.HikariCPJdbcDataSource import slick.jdbc.{DataSourceJdbcDataSource, JdbcBackend, JdbcDataSource} +import java.sql.SQLException import javax.sql.DataSource import scala.concurrent.blocking -import scala.util.Try trait DbMigrationsFactory { @@ -58,8 +60,8 @@ trait DbMigrations { this: NamedLogging => } protected def withCreatedDb[A]( - fn: Database => Either[DbMigrations.Error, A] - ): Either[DbMigrations.Error, A] = { + fn: Database => EitherT[UnlessShutdown, DbMigrations.Error, A] + ): EitherT[UnlessShutdown, DbMigrations.Error, A] = { DbStorage .createDatabase( dbConfig, @@ -74,16 +76,16 @@ trait DbMigrations { this: NamedLogging => } /** Obtain access to the database to run the migration operation. */ - protected def withDb[A](fn: Database => Either[DbMigrations.Error, A])(implicit + protected def withDb[A](fn: Database => EitherT[UnlessShutdown, DbMigrations.Error, A])(implicit traceContext: TraceContext - ): Either[DbMigrations.Error, A] + ): EitherT[UnlessShutdown, DbMigrations.Error, A] protected def migrateDatabaseInternal( db: Database - )(implicit traceContext: TraceContext): Either[DbMigrations.Error, Unit] = { + )(implicit traceContext: TraceContext): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { val flyway = createFlyway(createDataSource(db.source)) // Retry the migration in case of failures, which may happen due to a race condition in concurrent migrations - RetryEither[DbMigrations.Error, Unit](10, 100, functionFullName, logger) { + RetryEither.retry[DbMigrations.Error, Unit](10, 100, functionFullName, logger) { Either .catchOnly[FlywayException](flyway.migrate()) .map(r => logger.info(s"Applied ${r.migrationsExecuted} migrations successfully")) @@ -93,7 +95,7 @@ trait DbMigrations { this: NamedLogging => protected def repairFlywayMigrationInternal( db: Database - )(implicit traceContext: TraceContext): Either[DbMigrations.Error, Unit] = { + )(implicit traceContext: TraceContext): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { val flyway = createFlyway(createDataSource(db.source)) Either .catchOnly[FlywayException](flyway.repair()) @@ -102,13 +104,14 @@ trait DbMigrations { this: NamedLogging => s"The repair of the Flyway database migration succeeded. This is the Flyway repair report: $r" ) ) - .leftMap(DbMigrations.FlywayError) + .leftMap[DbMigrations.Error](DbMigrations.FlywayError) + .toEitherT[UnlessShutdown] } protected def dbConfig: DbConfig /** Migrate the database with all pending migrations. */ - def migrateDatabase(): Either[DbMigrations.Error, Unit] = + def migrateDatabase(): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = TraceContext.withNewTraceContext { implicit traceContext => withDb(migrateDatabaseInternal) } @@ -123,14 +126,16 @@ trait DbMigrations { this: NamedLogging => * - Mark all missing migrations as deleted * ``` */ - def repairFlywayMigration(): Either[DbMigrations.Error, Unit] = + def repairFlywayMigration(): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = TraceContext.withNewTraceContext { implicit traceContext => withDb(repairFlywayMigrationInternal) } - protected def withFlyway[A](fn: (Database, Flyway) => Either[DbMigrations.Error, A])(implicit + protected def withFlyway[A]( + fn: (Database, Flyway) => EitherT[UnlessShutdown, DbMigrations.Error, A] + )(implicit traceContext: TraceContext - ): Either[DbMigrations.Error, A] = + ): EitherT[UnlessShutdown, DbMigrations.Error, A] = withDb { createdDb => ResourceUtil.withResource(createdDb) { db => val flyway = createFlyway(createDataSource(db.source)) @@ -141,43 +146,53 @@ trait DbMigrations { this: NamedLogging => def connectionCheck( failFast: Boolean, processingTimeout: ProcessingTimeout, - )(implicit tc: TraceContext): Either[DbMigrations.DatabaseError, Unit] = { - def attempt: Either[String, Unit] = Try { - withDb( - { createdDb => - ResourceUtil.withResource(createdDb) { db: JdbcBackend.Database => - //TODO(phoebe): The DataSource could be created from the DbConfig, without first having to create the whole - // Database. Swap to this more light-weight approach. - val dataSource = db.source - val conn = dataSource.createConnection() - val valid = blocking { + )(implicit tc: TraceContext): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { + def attempt: EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { + withDb { createdDb => + ResourceUtil.withResource(createdDb) { db: JdbcBackend.Database => + //TODO(phoebe): The DataSource could be created from the DbConfig, without first having to create the whole + // Database. Swap to this more light-weight approach. + val dataSource = db.source + val conn = dataSource.createConnection() + val valid = blocking { + Either.catchOnly[SQLException]( conn.isValid(processingTimeout.network.duration.toSeconds.toInt) - } - if (valid) Right(()) - else Left(DbMigrations.DatabaseError(s"A trial database connection was not valid")) + ) } + + valid + .leftMap(err => show"failed to check connection $err") + .flatMap { valid => + Either.cond( + valid, + (), + "A trial database connection was not valid", + ) + } + .leftMap[DbMigrations.Error](err => DbMigrations.DatabaseError(err)) + .toEitherT[UnlessShutdown] } - ).leftMap(err => err.toString) - }.toEither.fold(throwable => Left(throwable.toString), identity) + } + } - if (failFast) { attempt.leftMap(DbMigrations.DatabaseError) } - else { + if (failFast) { + attempt + } else { // Repeatedly attempt to create a valid connection, so that the system waits for the database to come up // We must retry the whole `attempt` operation including the `withDb`, as `withDb` may itself fail if the // database is not up. val retryConfig = RetryConfig.forever - val res = RetryEither[String, Unit]( + RetryEither.retryUnlessShutdown[DbMigrations.Error, Unit]( retryConfig.maxRetries, retryConfig.retryWaitingTime.toMillis, functionFullName, logger, )(attempt) - res.leftMap(DbMigrations.DatabaseError) } } /** Migrate a database if it is empty, otherwise skip the migration. */ - def migrateIfFresh(): Either[DbMigrations.Error, Unit] = + def migrateIfFresh(): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = TraceContext.withNewTraceContext { implicit traceContext => withFlyway { case (db, flyway) => migrateIfFreshInternal(db, flyway) @@ -186,22 +201,22 @@ trait DbMigrations { this: NamedLogging => private def migrateIfFreshInternal(db: Database, flyway: Flyway)(implicit traceContext: TraceContext - ): Either[DbMigrations.Error, Unit] = { + ): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = { if (flyway.info().applied().isEmpty) migrateDatabaseInternal(db) else { logger.debug("Skip flyway migration on non-empty database") - Right(()) + EitherT.rightT(()) } } /** Combined method of migrateIfFresh and checkPendingMigration, avoids creating multiple pools */ - def migrateIfFreshAndCheckPending(): Either[DbMigrations.Error, Unit] = + def migrateIfFreshAndCheckPending(): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = TraceContext.withNewTraceContext { implicit traceContext => withFlyway { case (db, flyway) => for { _ <- migrateIfFreshInternal(db, flyway) - _ <- checkPendingMigrationInternal(flyway) + _ <- checkPendingMigrationInternal(flyway).toEitherT[UnlessShutdown] } yield () } } @@ -209,8 +224,12 @@ trait DbMigrations { this: NamedLogging => def checkDbVersion( timeouts: ProcessingTimeout, standardConfig: Boolean, - )(implicit tc: TraceContext): Either[DbMigrations.Error, Unit] = - withDb(DbVersionCheck.dbVersionCheck(timeouts, standardConfig, dbConfig)) + )(implicit tc: TraceContext): EitherT[UnlessShutdown, DbMigrations.Error, Unit] = + withDb { db => + val check = DbVersionCheck + .dbVersionCheck(timeouts, standardConfig, dbConfig) + check(db).toEitherT[UnlessShutdown] + } private def checkPendingMigrationInternal( flyway: Flyway @@ -255,9 +274,9 @@ class CommunityDbMigrations( extends DbMigrations with NamedLogging { - override protected def withDb[A](fn: Database => Either[DbMigrations.Error, A])(implicit - traceContext: TraceContext - ): Either[DbMigrations.Error, A] = withCreatedDb(fn) + override protected def withDb[A](fn: Database => EitherT[UnlessShutdown, DbMigrations.Error, A])( + implicit traceContext: TraceContext + ): EitherT[UnlessShutdown, DbMigrations.Error, A] = withCreatedDb(fn) } object DbMigrations { diff --git a/community/common/src/main/scala/com/digitalasset/canton/resource/DbStorageSingle.scala b/community/common/src/main/scala/com/digitalasset/canton/resource/DbStorageSingle.scala index 1a68f6cc2..ef656d126 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/resource/DbStorageSingle.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/resource/DbStorageSingle.scala @@ -3,9 +3,9 @@ package com.digitalasset.canton.resource -import cats.syntax.either._ +import cats.data.EitherT import com.digitalasset.canton.config.{DbConfig, ProcessingTimeout, QueryCostMonitoringConfig} -import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable} +import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable, UnlessShutdown} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.DbStorageMetrics import com.digitalasset.canton.resource.DbStorage.{DbAction, DbStorageCreationException} @@ -68,6 +68,7 @@ object DbStorageSingle { retryConfig, ) .valueOr(err => throw new DbStorageCreationException(err)) + .onShutdown(throw new DbStorageCreationException("Shutdown during creation")) def create( config: DbConfig, @@ -77,7 +78,10 @@ object DbStorageSingle { timeouts: ProcessingTimeout, loggerFactory: NamedLoggerFactory, retryConfig: DbStorage.RetryConfig = DbStorage.RetryConfig.failFast, - )(implicit ec: ExecutionContext, closeContext: CloseContext): Either[String, DbStorageSingle] = + )(implicit + ec: ExecutionContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, String, DbStorageSingle] = for { db <- DbStorage.createDatabase( config, diff --git a/community/common/src/main/scala/com/digitalasset/canton/resource/Storage.scala b/community/common/src/main/scala/com/digitalasset/canton/resource/Storage.scala index 8672ad908..0ba206c6e 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/resource/Storage.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/resource/Storage.scala @@ -3,13 +3,19 @@ package com.digitalasset.canton.resource -import cats.data.{Chain, EitherT, NonEmptyList, OptionT} +import cats.data.{Chain, EitherT, OptionT} import cats.syntax.either._ import cats.syntax.functor._ import cats.{Functor, Monad} +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.{PositiveNumeric, String255} import com.digitalasset.canton.config._ -import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable, HasCloseContext} +import com.digitalasset.canton.lifecycle.{ + CloseContext, + FlagCloseable, + HasCloseContext, + UnlessShutdown, +} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{ ErrorLoggingContext, @@ -71,6 +77,7 @@ sealed trait Storage extends AutoCloseable { trait StorageFactory { def config: StorageConfig + /** Throws an exception in case of errors or shutdown during storage creation. */ def tryCreate( connectionPoolForParticipant: Boolean, logQueryCost: Option[QueryCostMonitoringConfig], @@ -81,10 +88,10 @@ trait StorageFactory { ec: ExecutionContext, traceContext: TraceContext, closeContext: CloseContext, - ): Future[Storage] = - create(connectionPoolForParticipant, logQueryCost, metrics, timeouts, loggerFactory).valueOr( - err => throw new StorageCreationException(err) - ) + ): Storage = + create(connectionPoolForParticipant, logQueryCost, metrics, timeouts, loggerFactory) + .valueOr(err => throw new StorageCreationException(err)) + .onShutdown(throw new StorageCreationException("Shutdown during storage creation")) def create( connectionPoolForParticipant: Boolean, @@ -96,7 +103,7 @@ trait StorageFactory { ec: ExecutionContext, traceContext: TraceContext, closeContext: CloseContext, - ): EitherT[Future, String, Storage] + ): EitherT[UnlessShutdown, String, Storage] } object StorageFactory { @@ -114,14 +121,13 @@ class CommunityStorageFactory(val config: CommunityStorageConfig) extends Storag ec: ExecutionContext, traceContext: TraceContext, closeContext: CloseContext, - ): EitherT[Future, String, Storage] = + ): EitherT[UnlessShutdown, String, Storage] = config match { case CommunityStorageConfig.Memory(_) => EitherT.rightT(new MemoryStorage) case db: DbConfig => DbStorageSingle .create(db, connectionPoolForParticipant, logQueryCost, metrics, timeouts, loggerFactory) .widen[Storage] - .toEitherT } } @@ -506,7 +512,7 @@ object DbStorage { retryConfig: DbStorage.RetryConfig = DbStorage.RetryConfig.failFast, )( loggerFactory: NamedLoggerFactory - )(implicit closeContext: CloseContext): Either[String, Database] = { + )(implicit closeContext: CloseContext): EitherT[UnlessShutdown, String, Database] = { val baseLogger = loggerFactory.getLogger(classOf[DbStorage]) val logger = TracedLogger(baseLogger) @@ -547,7 +553,7 @@ object DbStorage { s"Initializing database storage with config: ${DbConfig.hideConfidential(configWithMigrationFallbacks)}" ) - RetryEither[String, Database]( + RetryEither.retry[String, Database]( maxRetries = retryConfig.maxRetries, waitInMs = retryConfig.retryWaitingTime.toMillis, operationName = functionFullName, @@ -765,12 +771,12 @@ object DbStorage { @nowarn("cat=unused") // somehow, f is wrongly reported as unused by the compiler def toInClauses[T]( field: String, - values: NonEmptyList[T], + values: NonEmpty[Seq[T]], maxValuesInSqlList: PositiveNumeric[Int], - )(implicit f: SetParameter[T]): Iterable[(List[T], SQLActionBuilder)] = { + )(implicit f: SetParameter[T]): Iterable[(Seq[T], SQLActionBuilder)] = { import DbStorage.Implicits.BuilderChain._ - values.toList + values .grouped(maxValuesInSqlList.unwrap) .map { groupedValues => val inClause = sql"#$field in (" ++ @@ -785,7 +791,7 @@ object DbStorage { def toInClauses_[T]( field: String, - values: NonEmptyList[T], + values: NonEmpty[Seq[T]], maxValuesSqlInListSize: PositiveNumeric[Int], )(implicit f: SetParameter[T]): Iterable[SQLActionBuilder] = toInClauses(field, values, maxValuesSqlInListSize).map { case (_, builder) => builder } diff --git a/community/common/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipients.scala b/community/common/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipients.scala index 939567ff8..274ebbf66 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipients.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/sequencing/protocol/Recipients.scala @@ -3,8 +3,10 @@ package com.digitalasset.canton.sequencing.protocol -import cats.data.{NonEmptyList, NonEmptySet} -import cats.implicits._ +import cats.syntax.reducible._ +import cats.syntax.traverse._ +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances._ import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.topology.Member import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} @@ -15,18 +17,17 @@ import com.digitalasset.canton.util.HasProtoV0 /** Recipients of a batch. Uses a list of [[com.digitalasset.canton.sequencing.protocol.RecipientsTree]]s * that define the members receiving a batch, and which members see which other recipients. */ -case class Recipients private (trees: NonEmptyList[RecipientsTree]) +case class Recipients private (trees: NonEmpty[Seq[RecipientsTree]]) extends PrettyPrinting with HasProtoV0[v0.Recipients] { lazy val allRecipients: Set[Member] = { - trees.toList.flatMap(t => t.allRecipients).toSet + trees.flatMap(t => t.allRecipients).toSet } def forMember(member: Member): Option[Recipients] = { - - val ts = trees.toList.flatMap(t => t.forMember(member)) - val optTs = NonEmptyList.fromList(ts) + val ts = trees.forgetNE.flatMap(t => t.forMember(member)) + val optTs = NonEmpty.from(ts) optTs.map(Recipients(_)) } @@ -38,9 +39,9 @@ case class Recipients private (trees: NonEmptyList[RecipientsTree]) override def pretty: Pretty[Recipients.this.type] = prettyOfClass(param("Recipient trees", _.trees.toList)) - def asSingleGroup: Option[NonEmptySet[Member]] = { + def asSingleGroup: Option[NonEmpty[Set[Member]]] = { trees match { - case NonEmptyList(RecipientsTree(group, Nil), Nil) => Some(group) + case Seq(RecipientsTree(group, Seq())) => Some(group) case _ => None } } @@ -48,8 +49,8 @@ case class Recipients private (trees: NonEmptyList[RecipientsTree]) /** Members that appear at the leaf of the BCC tree. For example, the informees of a view are leaf members of the * view message. */ - lazy val leafMembers: NonEmptySet[Member] = - trees.tail.foldLeft(trees.head.leafMembers)(_ |+| _.leafMembers) + lazy val leafMembers: NonEmpty[Set[Member]] = + trees.toNEF.reduceLeftTo(_.leafMembers)(_ ++ _.leafMembers) } @@ -57,35 +58,33 @@ object Recipients { def fromProtoV0(proto: v0.Recipients): ParsingResult[Recipients] = { for { - trees <- proto.recipientsTree.toList.traverse(t => RecipientsTree.fromProtoV0(t)) - recipients <- NonEmptyList - .fromList(trees) - .fold[ParsingResult[Recipients]]( - Left( - ProtoDeserializationError.ValueConversionError( - "RecipientsTree.recipients", - s"RecipientsTree.recipients must be non-empty", - ) + trees <- proto.recipientsTree.traverse(t => RecipientsTree.fromProtoV0(t)) + recipients <- NonEmpty + .from(trees) + .toRight( + ProtoDeserializationError.ValueConversionError( + "RecipientsTree.recipients", + s"RecipientsTree.recipients must be non-empty", ) - )(ts => Right(Recipients(ts))) - } yield recipients + ) + } yield Recipients(recipients) } /** Create a [[com.digitalasset.canton.sequencing.protocol.Recipients]] representing a group of * members that "see" each other. */ def cc(first: Member, others: Member*): Recipients = - Recipients(NonEmptyList.of(RecipientsTree(NonEmptySet.of(first, others: _*), List.empty))) + Recipients(NonEmpty(Seq, RecipientsTree.leaf(NonEmpty(Set, first, others: _*)))) /** Create a [[com.digitalasset.canton.sequencing.protocol.Recipients]] representing independent groups of members * that do not "see" each other. */ - def groups(groups: NonEmptyList[NonEmptySet[Member]]): Recipients = - Recipients(groups.map(group => RecipientsTree(group, Nil))) + def groups(groups: NonEmpty[Seq[NonEmpty[Set[Member]]]]): Recipients = + Recipients(groups.map(group => RecipientsTree.leaf(group))) def ofSet[T <: Member](set: Set[T]): Option[Recipients] = { val members = set.toList - NonEmptyList.fromList(members).map(list => Recipients.cc(list.head, list.tail: _*)) + NonEmpty.from(members).map(list => Recipients.cc(list.head1, list.tail1: _*)) } } diff --git a/community/common/src/main/scala/com/digitalasset/canton/sequencing/protocol/RecipientsTree.scala b/community/common/src/main/scala/com/digitalasset/canton/sequencing/protocol/RecipientsTree.scala index b44b436f5..d8cbe1909 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/sequencing/protocol/RecipientsTree.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/sequencing/protocol/RecipientsTree.scala @@ -3,10 +3,10 @@ package com.digitalasset.canton.sequencing.protocol -import cats.data.NonEmptySet -import cats.syntax.foldable._ -import cats.syntax.semigroup._ +import cats.syntax.reducible._ import cats.syntax.traverse._ +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances._ import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.topology.Member import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} @@ -19,64 +19,60 @@ import com.digitalasset.canton.util.HasProtoV0 * the member. If a member is present in a subtree A and a sub-subtree of A then it should only see * the top-level subtree A. */ -case class RecipientsTree(recipientGroup: NonEmptySet[Member], children: List[RecipientsTree]) +case class RecipientsTree(recipientGroup: NonEmpty[Set[Member]], children: Seq[RecipientsTree]) extends PrettyPrinting with HasProtoV0[v0.RecipientsTree] { override def pretty: Pretty[RecipientsTree] = - prettyOfClass(param("value", _.recipientGroup.toList), param("children", _.children)) + prettyOfClass(param("recipient group", _.recipientGroup.toList), param("children", _.children)) - lazy val allRecipients: Set[Member] = { + lazy val allRecipients: NonEmpty[Set[Member]] = { val tail: Set[Member] = children.flatMap(t => t.allRecipients).toSet - recipientGroup.toSortedSet ++ tail + recipientGroup ++ tail } - def forMember(member: Member): List[RecipientsTree] = { + def forMember(member: Member): Seq[RecipientsTree] = { if (recipientGroup.contains(member)) { - List(this) + Seq(this) } else { children.flatMap(c => c.forMember(member)) } } - lazy val leafMembers: NonEmptySet[Member] = children match { - case Nil => recipientGroup - case c :: cs => cs.foldLeft(c.leafMembers)(_ |+| _.leafMembers) + lazy val leafMembers: NonEmpty[Set[Member]] = children match { + case NonEmpty(cs) => cs.toNEF.reduceLeftTo(_.leafMembers)(_ ++ _.leafMembers) + case _ => recipientGroup } override def toProtoV0: v0.RecipientsTree = { - val recipientsP = recipientGroup.toList.map(member => member.toProtoPrimitive) - val childrenP = children.map { t: RecipientsTree => - t.toProtoV0 - } + val recipientsP = recipientGroup.toSeq.map(member => member.toProtoPrimitive) + val childrenP = children.map(_.toProtoV0) new v0.RecipientsTree(recipientsP, childrenP) } } object RecipientsTree { + def leaf(group: NonEmpty[Set[Member]]): RecipientsTree = RecipientsTree(group, Seq.empty) + def fromProtoV0( treeProto: v0.RecipientsTree ): ParsingResult[RecipientsTree] = { for { - members <- treeProto.recipients.toList.traverse(str => + members <- treeProto.recipients.traverse(str => Member.fromProtoPrimitive(str, "RecipientsTreeProto.recipients") ) - membersNonEmpty <- { - members match { - case Nil => - Left( - ProtoDeserializationError.ValueConversionError( - "RecipientsTree.recipients", - s"RecipientsTree.recipients must be non-empty", - ) - ) - case x :: xs => Right(NonEmptySet.of[Member](x, xs: _*)) - } - } + membersNonEmpty <- NonEmpty + .from(members) + .toRight( + ProtoDeserializationError.ValueConversionError( + "RecipientsTree.recipients", + s"RecipientsTree.recipients must be non-empty", + ) + ) children = treeProto.children childTrees <- children.toList.traverse(fromProtoV0) - } yield RecipientsTree(membersNonEmpty, childTrees) + } yield RecipientsTree(membersNonEmpty.toSet, childTrees) } } diff --git a/community/common/src/main/scala/com/digitalasset/canton/store/db/DbBulkUpdateProcessor.scala b/community/common/src/main/scala/com/digitalasset/canton/store/db/DbBulkUpdateProcessor.scala index 943ec8df2..af0bcfa3a 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/store/db/DbBulkUpdateProcessor.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/store/db/DbBulkUpdateProcessor.scala @@ -3,8 +3,8 @@ package com.digitalasset.canton.store.db -import cats.data.NonEmptyList import cats.syntax.either._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.DiscardOps import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.ErrorLoggingContext @@ -33,7 +33,7 @@ trait DbBulkUpdateProcessor[A, B] extends BatchAggregator.Processor[A, Try[B]] { * * @return An [[scala.collection.Iterable]] of the same size as `items` that contains the response for `items(i)` is at index `i`. */ - protected def bulkUpdateWithCheck(items: NonEmptyList[Traced[A]], queryBaseName: String)(implicit + protected def bulkUpdateWithCheck(items: NonEmpty[Seq[Traced[A]]], queryBaseName: String)(implicit traceContext: TraceContext, closeContext: CloseContext, ): Future[Iterable[Try[B]]] = { @@ -56,17 +56,17 @@ trait DbBulkUpdateProcessor[A, B] extends BatchAggregator.Processor[A, Try[B]] { } } - protected def bulkUpdateWithCheck(items: List[Traced[A]], queryBaseName: String)(implicit + protected def bulkUpdateWithCheck(items: Seq[Traced[A]], queryBaseName: String)(implicit traceContext: TraceContext, closeContext: CloseContext, ): Future[Iterable[Try[B]]] = - NonEmptyList.fromList(items) match { - case None => Future.successful(Iterable.empty[Try[B]]) + NonEmpty.from(items) match { case Some(itemsNel) => bulkUpdateWithCheck(itemsNel, queryBaseName) + case None => Future.successful(Iterable.empty[Try[B]]) } /** Idempotent bulk DB operation for the given items. */ - protected def bulkUpdateAction(items: NonEmptyList[Traced[A]])(implicit + protected def bulkUpdateAction(items: NonEmpty[Seq[Traced[A]]])(implicit batchTraceContext: TraceContext ): DBIOAction[Array[Int], NoStream, Effect.All] @@ -119,11 +119,10 @@ trait DbBulkUpdateProcessor[A, B] extends BatchAggregator.Processor[A, Try[B]] { toCheck: Seq[BulkUpdatePendingCheck[A, B]], queryBaseName: String, )(implicit traceContext: TraceContext, closeContext: CloseContext): Future[Unit] = { - // TODO(#8271) No need for a list conversion here - NonEmptyList.fromList(toCheck.toList) match { + NonEmpty.from(toCheck) match { case None => Future.unit - case Some(toCheckNel) => - val ids = toCheckNel.map(x => itemIdentifier(x.target.value)) + case Some(toCheckNE) => + val ids = toCheckNE.map(x => itemIdentifier(x.target.value)) val lookupQueries = checkQuery(ids) storage.sequentialQueryAndCombine(lookupQueries, s"$queryBaseName lookup").map { foundDatas => @@ -152,7 +151,7 @@ trait DbBulkUpdateProcessor[A, B] extends BatchAggregator.Processor[A, Try[B]] { protected def dataIdentifier(state: CheckData): ItemIdentifier /** A list of queries for the items that we want to check for */ - protected def checkQuery(itemsToCheck: NonEmptyList[ItemIdentifier])(implicit + protected def checkQuery(itemsToCheck: NonEmpty[Seq[ItemIdentifier]])(implicit batchTraceContext: TraceContext ): Iterable[DbAction.ReadOnly[Iterable[CheckData]]] diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala index 2f4ce9760..3710fbcc9 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.topology.store import cats.syntax.traverse._ import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.LengthLimitedString.DisplayName -import com.digitalasset.canton.config.RequireTypes.{LengthLimitedString, String256M, String255} +import com.digitalasset.canton.config.RequireTypes.{LengthLimitedString, String255, String256M} import com.digitalasset.canton.crypto.{PublicKey, SignatureCheckError} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} @@ -19,7 +19,7 @@ import com.digitalasset.canton.topology.store.db.DbTopologyStoreFactory import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStoreFactory import com.digitalasset.canton.topology.transaction._ import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.ErrorUtil +import com.digitalasset.canton.util.{ErrorUtil, MonadUtil} import com.digitalasset.canton.ProtoDeserializationError import com.google.common.annotations.VisibleForTesting @@ -289,11 +289,13 @@ abstract class TopologyStore(implicit ec: ExecutionContext) extends AutoCloseabl def bootstrap( collection: StoredTopologyTransactions[TopologyChangeOp.Positive] )(implicit traceContext: TraceContext): Future[Unit] = - collection.result - .groupBy(_.validFrom) - .toList - .sortBy { case (validFrom, _) => validFrom } - .traverse { case (validFrom, transactions) => + MonadUtil + .sequentialTraverse( + collection.result + .groupBy(_.validFrom) + .toList + .sortBy { case (validFrom, _) => validFrom } + ) { case (validFrom, transactions) => val txs = transactions.map(tx => ValidatedTopologyTransaction(tx.transaction, None)) for { _ <- append(validFrom, txs) diff --git a/community/common/src/main/scala/com/digitalasset/canton/tracing/BatchTracing.scala b/community/common/src/main/scala/com/digitalasset/canton/tracing/BatchTracing.scala index 5ab1dc455..724fcda86 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/tracing/BatchTracing.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/tracing/BatchTracing.scala @@ -3,13 +3,13 @@ package com.digitalasset.canton.tracing -import cats.data.NonEmptyList +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.logging.TracedLogger /** Utility mixin for creating a single trace context from a batch of traced items */ object BatchTracing { - def withNelTracedBatch[A <: HasTraceContext, B](logger: TracedLogger, items: NonEmptyList[A])( - fn: TraceContext => NonEmptyList[A] => B + def withTracedBatch[A <: HasTraceContext, B](logger: TracedLogger, items: NonEmpty[Seq[A]])( + fn: TraceContext => NonEmpty[Seq[A]] => B ): B = - fn(TraceContext.ofBatch(items.toList)(logger))(items) + fn(TraceContext.ofBatch(items)(logger))(items) } diff --git a/community/common/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala b/community/common/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala index 6fe456e15..1d2342ea9 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.tracing -import io.grpc.{ManagedChannel, ManagedChannelBuilder} import io.opentelemetry.api.common.Attributes import io.opentelemetry.api.trace.Tracer import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator @@ -12,8 +11,8 @@ import io.opentelemetry.context.propagation.ContextPropagators import io.opentelemetry.exporter.jaeger.JaegerGrpcSpanExporter import io.opentelemetry.exporter.zipkin.ZipkinSpanExporter import io.opentelemetry.sdk.OpenTelemetrySdk -import io.opentelemetry.sdk.autoconfigure.OpenTelemetrySdkAutoConfiguration -import io.opentelemetry.sdk.autoconfigure.spi.SdkTracerProviderConfigurer +import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdk +import io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties import io.opentelemetry.sdk.common.CompletableResultCode import io.opentelemetry.sdk.resources.Resource import io.opentelemetry.sdk.trace.`export`.SpanExporter @@ -89,28 +88,19 @@ private object NoopSpanExporter extends SpanExporter { override def shutdown(): CompletableResultCode = CompletableResultCode.ofSuccess() } -/** This implements a service provider interface (SPI) such that the configure method gets called - * by OpenTelemetrySdkAutoConfiguration.initialize() and passes the tracer provider builder that contains all the - * system properties as described under at https://github.com/open-telemetry/opentelemetry-java/tree/main/sdk-extensions/autoconfigure - * We capture this builder so that we can reuse it multiple times with different service names instead of only using the - * global one (GlobalOpenTelemetry.get()). - * Notice that for this to be picked up, there is a file that contains this class's fully qualified name under - * resources/META-INF/services. - */ -private class TracerProviderConfigurer() extends SdkTracerProviderConfigurer { - override def configure(tracerProvider: SdkTracerProviderBuilder): Unit = { - Autoconfigure.autoconfigureBuilder.set(Some(tracerProvider)) - } -} - private object Autoconfigure { - // the TracerProviderConfigurer above is responsible for making sure this gets set once OpenTelemetrySdkAutoConfiguration.initialize() gets run val autoconfigureBuilder = new AtomicReference[Option[SdkTracerProviderBuilder]](None) val isEnabled: Boolean = sys.props.contains("otel.traces.exporter") if (isEnabled) { // set default propagator, otherwise the ledger-api-client interceptor won't propagate any information sys.props.getOrElseUpdate("otel.propagators", "tracecontext") - OpenTelemetrySdkAutoConfiguration.initialize() + AutoConfiguredOpenTelemetrySdk + .builder() + .addTracerProviderCustomizer { (t: SdkTracerProviderBuilder, u: ConfigProperties) => + autoconfigureBuilder.set(Some(t)) + t + } + .build() } else GlobalOpenTelemetry.set( OpenTelemetrySdk.builder @@ -148,10 +138,8 @@ object TracerProvider { private def createExporter(config: TracingConfig.Exporter): SpanExporter = config match { case TracingConfig.Exporter.Jaeger(address, port) => - val jaegerChannel: ManagedChannel = - ManagedChannelBuilder.forAddress(address, port).usePlaintext().build(); JaegerGrpcSpanExporter.builder - .setChannel(jaegerChannel) + .setEndpoint(s"http://$address:$port") .setTimeout(30, TimeUnit.SECONDS) .build case TracingConfig.Exporter.Zipkin(address, port) => diff --git a/community/common/src/main/scala/com/digitalasset/canton/util/BatchAggregator.scala b/community/common/src/main/scala/com/digitalasset/canton/util/BatchAggregator.scala index 95f99bc24..ecb539661 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/util/BatchAggregator.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/util/BatchAggregator.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.util -import cats.data.NonEmptyList +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.BatchAggregatorConfig import com.digitalasset.canton.logging.TracedLogger import com.digitalasset.canton.metrics.MetricHandle.GaugeM @@ -81,7 +81,7 @@ object BatchAggregator { def executeSingle( item: A )(implicit ec: ExecutionContext, traceContext: TraceContext): Future[B] = - executeBatch(NonEmptyList.one(Traced(item))).flatMap(_.headOption match { + executeBatch(NonEmpty(Seq, Traced(item))).flatMap(_.headOption match { case Some(value) => Future.successful(value) case None => val error = s"executeBatch returned an empty sequence of results" @@ -94,8 +94,7 @@ object BatchAggregator { * @return The responses for the items in the correct order. * Must have the same length */ - // TODO(#8271) Generalize to NonEmpty[Seq[Traced[A]]] - def executeBatch(items: NonEmptyList[Traced[A]])(implicit + def executeBatch(items: NonEmpty[Seq[Traced[A]]])(implicit traceContext: TraceContext ): Future[Iterable[B]] @@ -173,16 +172,15 @@ class BatchAggregatorImpl[A, B]( if (oldInFlight < maximumInFlight) { val queueItems = pollItemsFromQueue() - // TODO(#8271) No need for a list here - NonEmptyList.fromList(queueItems.toList) match { - case Some(queueItemsNel) => - if (queueItemsNel.toList.lengthCompare(1) == 0) { - val (tracedItem, promise) = queueItemsNel.head + NonEmpty.from(queueItems) match { + case Some(queueItemsNE) => + if (queueItemsNE.lengthCompare(1) == 0) { + val (tracedItem, promise) = queueItemsNE.head1 tracedItem.withTraceContext { implicit traceContext => item => promise.completeWith(runSingleWithoutIncrement(item)).discard[Promise[B]] } } else { - val items = queueItemsNel.map(_._1) + val items = queueItemsNE.map(_._1) val batchTraceContext = TraceContext.ofBatch(items.toList)(processor.logger) Future diff --git a/community/common/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala b/community/common/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala index 09940cdfe..2036884f2 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/util/LfTransactionUtil.scala @@ -274,21 +274,21 @@ object LfTransactionUtil { ) { case ( _, - LfNodeExercises(_, templateId, _, true, _, _, _, _, _, _, _, Some(key), _, _, _), + LfNodeExercises(_, templateId, _, true, _, _, _, _, _, _, _, Some(key), _, _), state, ) => state.consumed(LfGlobalKey.assertBuild(templateId, key.key)) case ( _, - LfNodeExercises(_, templateId, _, false, _, _, _, _, _, _, _, Some(key), _, _, _), + LfNodeExercises(_, templateId, _, false, _, _, _, _, _, _, _, Some(key), _, _), state, ) => state.referenced(LfGlobalKey.assertBuild(templateId, key.key)) case (_, _, state) => state // non-key exercise } { - case (_, LfNodeCreate(_, templateId, _, _, _, _, Some(key), _, _), state) => + case (_, LfNodeCreate(_, templateId, _, _, _, _, Some(key), _), state) => state.created(LfGlobalKey.assertBuild(templateId, key.key)) - case (_, LfNodeFetch(_, templateId, _, _, _, Some(key), _, _, _), state) => + case (_, LfNodeFetch(_, templateId, _, _, _, Some(key), _, _), state) => state.referenced(LfGlobalKey.assertBuild(templateId, key.key)) case (_, Node.NodeLookupByKey(templateId, key, Some(_), _), state) => state.referenced(LfGlobalKey.assertBuild(templateId, key.key)) @@ -363,9 +363,9 @@ object LfTransactionUtil { val actingParties: LfActionNode => Set[LfPartyId] = { case _: LfNodeCreate => Set.empty - case node @ LfNodeFetch(_, _, noActors, _, _, _, _, _, _) if noActors.isEmpty => + case node @ LfNodeFetch(_, _, noActors, _, _, _, _, _) if noActors.isEmpty => throw new IllegalArgumentException(s"Fetch node $node without acting parties.") - case LfNodeFetch(_, _, actors, _, _, _, _, _, _) => actors + case LfNodeFetch(_, _, actors, _, _, _, _, _) => actors case n: LfNodeExercises => n.actingParties diff --git a/community/common/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala b/community/common/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala index 913d73ed8..5f22287dc 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/util/retry/RetryEither.scala @@ -3,10 +3,11 @@ package com.digitalasset.canton.util.retry +import cats.data.EitherT import cats.syntax.either._ import cats.syntax.flatMap._ import com.digitalasset.canton.concurrent.Threading -import com.digitalasset.canton.lifecycle.CloseContext +import com.digitalasset.canton.lifecycle.{CloseContext, UnlessShutdown} import com.digitalasset.canton.logging.{ErrorLoggingContext, TracedLogger} import com.digitalasset.canton.util.LoggerUtil import org.slf4j.event.Level @@ -15,8 +16,7 @@ import org.slf4j.event.Level * Only provides a Pause-based retry. */ object RetryEither { - - def apply[A, B]( + def retry[A, B]( maxRetries: Int, waitInMs: Long, operationName: String, @@ -29,35 +29,64 @@ object RetryEither { )(implicit loggingContext: ErrorLoggingContext, closeContext: CloseContext, - ): Either[A, B] = { + ): EitherT[UnlessShutdown, A, B] = { + retryUnlessShutdown( + maxRetries, + waitInMs, + operationName, + logger, + stopOnLeft, + retryLogLevel, + failLogLevel, + )( + EitherT(UnlessShutdown.Outcome(body)) + ) + } + + def retryUnlessShutdown[A, B]( + maxRetries: Int, + waitInMs: Long, + operationName: String, + logger: TracedLogger, + stopOnLeft: Option[A => Boolean] = None, + retryLogLevel: Level = Level.INFO, + failLogLevel: Level = Level.WARN, + )( + body: => EitherT[UnlessShutdown, A, B] + )(implicit + loggingContext: ErrorLoggingContext, + closeContext: CloseContext, + ): EitherT[UnlessShutdown, A, B] = { maxRetries.tailRecM { retryCount => - body - .map(Right(_)) - .leftFlatMap { err => - if (closeContext.flagCloseable.isClosing) { - // Stop the retry attempts if caller is closing - Left(err) - } else if (stopOnLeft.exists(fn => fn(err))) { - // Stop the retry attempts on this particular Left if stopOnLeft is true - Left(err) - } else if (retryCount <= 0) { - // Stop the recursion with the error if we exhausted the max retries - LoggerUtil.logAtLevel( - failLogLevel, - s"Operation $operationName failed, exhausted retries: $err", - ) - Left(err) - } else { - // Retry the operation if it failed but we have retries left - LoggerUtil.logAtLevel( - retryLogLevel, - s"Operation $operationName failed, retrying in ${waitInMs}ms: $err", - ) - Threading.sleep(waitInMs) - val nextRetry = if (retryCount == Int.MaxValue) Int.MaxValue else retryCount - 1 - Right(Left(nextRetry)) + EitherT { + closeContext.flagCloseable.performUnlessClosing(body)(loggingContext.traceContext).flatMap { + _.value.map { + _.map(Right(_)) + .leftFlatMap { err => + if (stopOnLeft.exists(fn => fn(err))) { + // Stop the retry attempts on this particular Left if stopOnLeft is true + Left(err) + } else if (retryCount <= 0) { + // Stop the recursion with the error if we exhausted the max retries + LoggerUtil.logAtLevel( + failLogLevel, + s"Operation $operationName failed, exhausted retries: $err", + ) + Left(err) + } else { + // Retry the operation if it failed but we have retries left + LoggerUtil.logAtLevel( + retryLogLevel, + s"Operation $operationName failed, retrying in ${waitInMs}ms: $err", + ) + Threading.sleep(waitInMs) + val nextRetry = if (retryCount == Int.MaxValue) Int.MaxValue else retryCount - 1 + Right(Left(nextRetry)) + } + } } } + } } } diff --git a/community/common/src/main/scala/com/digitalasset/canton/version/CantonVersion.scala b/community/common/src/main/scala/com/digitalasset/canton/version/CantonVersion.scala index 81aac9c40..5c6f68747 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/version/CantonVersion.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/version/CantonVersion.scala @@ -101,9 +101,11 @@ object CantonVersion { private val releaseVersionToProtocolVersions: Map[ReleaseVersion, List[ProtocolVersion]] = Map( ReleaseVersion.v2_0_0_snapshot -> List(v2_0_0_snapshot), ReleaseVersion.v2_0_0 -> List(v2_0_0), - ReleaseVersion.v2_1_0_snapshot -> List(v2_0_0, v2_1_0_snapshot), - // next (most likely): - // ReleaseVersion.v2_1_0 -> List(v2_0_0, v2_1_0), + ReleaseVersion.v2_1_0_snapshot -> List(v2_0_0), + ReleaseVersion.v2_1_0 -> List(v2_0_0), + ReleaseVersion.v2_1_0_rc1 -> List(v2_0_0), + ReleaseVersion.v2_1_1_snapshot -> List(v2_0_0), + ReleaseVersion.v2_2_0_snapshot -> List(v2_0_0), ) private[version] def getSupportedProtocolsParticipantForRelease( @@ -184,6 +186,9 @@ object ReleaseVersion extends CompanionTrait { lazy val v2_0_0: ReleaseVersion = ReleaseVersion(2, 0, 0) lazy val v2_1_0_snapshot: ReleaseVersion = ReleaseVersion(2, 1, 0, Some("SNAPSHOT")) lazy val v2_1_0: ReleaseVersion = ReleaseVersion(2, 1, 0) + lazy val v2_1_0_rc1: ReleaseVersion = ReleaseVersion(2, 1, 0, Some("rc1")) + lazy val v2_1_1_snapshot: ReleaseVersion = ReleaseVersion(2, 1, 1, Some("SNAPSHOT")) + lazy val v2_2_0_snapshot: ReleaseVersion = ReleaseVersion(2, 2, 0, Some("SNAPSHOT")) } /** A Canton protocol version is a snapshot of how the Canton protocols, that nodes use to communicate, function at a certain point in time @@ -293,8 +298,6 @@ object ProtocolVersion extends CompanionTrait { // TODO(i8793): signifies an instance where the protocol version is currently hardcoded but should likely be // passed in via propagating the protocol version set in the domain parameters lazy val v2_0_0_Todo_i8793: ProtocolVersion = v2_0_0 - lazy val v2_1_0_snapshot: ProtocolVersion = ProtocolVersion(2, 1, 0, Some("SNAPSHOT")) - lazy val v2_1_0: ProtocolVersion = ProtocolVersion(2, 1, 0) } sealed trait HandshakeError { diff --git a/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTest.scala b/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTest.scala index 6bf549c86..2799b5744 100644 --- a/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTest.scala +++ b/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTest.scala @@ -3,9 +3,9 @@ package com.digitalasset.canton.sequencing.protocol -import cats.data.{NonEmptyList, NonEmptySet} import cats.syntax.option._ -import com.digitalasset.canton.topology.{Member, ParticipantId} +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.topology.ParticipantId import com.digitalasset.canton.sequencing.protocol.Recipients.cc import com.digitalasset.canton.sequencing.protocol.RecipientsTest._ import com.digitalasset.canton.{BaseTest, HasExecutionContext} @@ -13,7 +13,7 @@ import org.scalatest.wordspec.AnyWordSpec class RecipientsTest extends AnyWordSpec with BaseTest with HasExecutionContext { - lazy val recipients: Recipients = Recipients(NonEmptyList.of(t5, t2, t3, t5, t6)) + lazy val recipients: Recipients = Recipients(NonEmpty(Seq, t5, t2, t3, t5, t6)) "Recipients" should { @@ -22,11 +22,11 @@ class RecipientsTest extends AnyWordSpec with BaseTest with HasExecutionContext } "filter for a member that appears in one tree" in { - recipients.forMember(p6) shouldBe Some(Recipients(NonEmptyList.of(t6))) + recipients.forMember(p6) shouldBe Some(Recipients(NonEmpty(Seq, t6))) } "filter for a member that appears in several trees" in { - recipients.forMember(p3) shouldBe Some(Recipients(NonEmptyList.of(t3, t3, t3))) + recipients.forMember(p3) shouldBe Some(Recipients(NonEmpty(Seq, t3, t3, t3))) } "be preserved through serialization / deserialization" in { @@ -41,8 +41,9 @@ class RecipientsTest extends AnyWordSpec with BaseTest with HasExecutionContext } "test for a single group when present" in { - val recipients = Recipients(NonEmptyList.of(RecipientsTree(NonEmptySet.of(p2, p1, p3), Nil))) - recipients.asSingleGroup shouldBe NonEmptySet.of[Member](p3, p2, p1).some + val recipients = + Recipients(NonEmpty(Seq, RecipientsTree.leaf(NonEmpty.mk(Set, p2, p1, p3)))) + recipients.asSingleGroup shouldBe NonEmpty.mk(Set, p3, p2, p1).some } "test for a single group when not present" in { @@ -50,9 +51,10 @@ class RecipientsTest extends AnyWordSpec with BaseTest with HasExecutionContext // Multiple trees val case1 = Recipients( - NonEmptyList.of( - RecipientsTree(NonEmptySet.of(p2, p1, p3), Nil), - RecipientsTree(NonEmptySet.of(p2), Nil), + NonEmpty( + List, + RecipientsTree.leaf(NonEmpty.mk(Set, p2, p1, p3)), + RecipientsTree.leaf(NonEmpty.mk(Set, p2)), ) ) case1.asSingleGroup shouldBe None @@ -60,11 +62,12 @@ class RecipientsTest extends AnyWordSpec with BaseTest with HasExecutionContext // Tree with height > 1 val case2 = Recipients( - NonEmptyList.of( + NonEmpty( + List, RecipientsTree( - NonEmptySet.of(p2, p1, p3), - List(RecipientsTree(NonEmptySet.of(p1), Nil)), - ) + NonEmpty.mk(Set, p2, p1, p3), + Seq(RecipientsTree.leaf(NonEmpty.mk(Set, p1))), + ), ) ) case2.asSingleGroup shouldBe None @@ -72,24 +75,25 @@ class RecipientsTest extends AnyWordSpec with BaseTest with HasExecutionContext "correctly compute leaf members" in { val recipients = Recipients( - NonEmptyList.of( + NonEmpty( + List, RecipientsTree( - NonEmptySet.of(participant(1), participant(2)), - List( - RecipientsTree(NonEmptySet.of(participant(3)), List()), - RecipientsTree(NonEmptySet.of(participant(4)), List()), + NonEmpty.mk(Set, participant(1), participant(2)), + Seq( + RecipientsTree.leaf(NonEmpty.mk(Set, participant(3))), + RecipientsTree.leaf(NonEmpty.mk(Set, participant(4))), RecipientsTree( - NonEmptySet.of(participant(5)), - List( - RecipientsTree(NonEmptySet.of(participant(6), participant(2)), List()) + NonEmpty.mk(Set, participant(5)), + Seq( + RecipientsTree.leaf(NonEmpty.mk(Set, participant(6), participant(2))) ), ), ), - ) + ), ) ) - recipients.leafMembers shouldBe NonEmptySet - .of[Member](participant(2), participant(3), participant(4), participant(6)) + recipients.leafMembers shouldBe + NonEmpty.mk(Set, participant(2), participant(3), participant(4), participant(6)) } } } @@ -104,15 +108,15 @@ object RecipientsTest { lazy val p6 = ParticipantId("participant6") lazy val p7 = ParticipantId("participant7") - lazy val t1 = new RecipientsTree(NonEmptySet.of(p1), List.empty) - lazy val t2 = new RecipientsTree(NonEmptySet.of(p2), List.empty) + lazy val t1 = RecipientsTree.leaf(NonEmpty.mk(Set, p1)) + lazy val t2 = RecipientsTree.leaf(NonEmpty.mk(Set, p2)) - lazy val t3 = new RecipientsTree(NonEmptySet.of(p3), List(t1, t2)) - lazy val t4 = new RecipientsTree(NonEmptySet.of(p4), List.empty) + lazy val t3 = RecipientsTree(NonEmpty.mk(Set, p3), Seq(t1, t2)) + lazy val t4 = RecipientsTree.leaf(NonEmpty.mk(Set, p4)) - lazy val t5 = new RecipientsTree(NonEmptySet.of(p5), List(t3, t4)) + lazy val t5 = RecipientsTree(NonEmpty.mk(Set, p5), Seq(t3, t4)) - lazy val t6 = new RecipientsTree(NonEmptySet.of(p6), List.empty) + lazy val t6 = RecipientsTree.leaf(NonEmpty.mk(Set, p6)) def testInstance: Recipients = { val dummyMember = ParticipantId("dummyParticipant") diff --git a/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala b/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala index d33c4e750..8d26f8428 100644 --- a/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala +++ b/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala @@ -3,26 +3,26 @@ package com.digitalasset.canton.sequencing.protocol -import cats.data.NonEmptySet +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.topology.ParticipantId +import com.digitalasset.canton.topology.{Member, ParticipantId} import org.scalatest.wordspec.AnyWordSpec class RecipientsTreeTest extends AnyWordSpec with BaseTest { - lazy val p1 = ParticipantId("participant1") - lazy val p2 = ParticipantId("participant2") - lazy val p3 = ParticipantId("participant3") - lazy val p4 = ParticipantId("participant4") - lazy val p5 = ParticipantId("participant5") - lazy val p6 = ParticipantId("participant6") + lazy val p1: Member = ParticipantId("participant1") + lazy val p2: Member = ParticipantId("participant2") + lazy val p3: Member = ParticipantId("participant3") + lazy val p4: Member = ParticipantId("participant4") + lazy val p5: Member = ParticipantId("participant5") + lazy val p6: Member = ParticipantId("participant6") - lazy val t1 = new RecipientsTree(NonEmptySet.of(p1, p5), List.empty) - lazy val t2 = new RecipientsTree(NonEmptySet.of(p3), List.empty) - lazy val t3 = new RecipientsTree(NonEmptySet.of(p4, p2), List(t1, t2)) + lazy val t1 = RecipientsTree.leaf(NonEmpty(Set, p1, p5)) + lazy val t2 = RecipientsTree.leaf(NonEmpty(Set, p3)) + lazy val t3 = RecipientsTree(NonEmpty(Set, p4, p2), Seq(t1, t2)) - lazy val t4 = new RecipientsTree(NonEmptySet.of(p2, p6), List.empty) + lazy val t4 = RecipientsTree.leaf(NonEmpty(Set, p2, p6)) - lazy val t5 = new RecipientsTree(NonEmptySet.of(p1), List(t3, t4)) + lazy val t5 = RecipientsTree(NonEmpty(Set, p1), Seq(t3, t4)) "RecipientsTree" when { "allRecipients" should { diff --git a/community/common/src/main/protobuf/com/digitalasset/canton/test/hello.proto b/community/common/src/test/protobuf/com/digitalasset/canton/test/hello.proto similarity index 100% rename from community/common/src/main/protobuf/com/digitalasset/canton/test/hello.proto rename to community/common/src/test/protobuf/com/digitalasset/canton/test/hello.proto diff --git a/community/common/src/test/protobuf/com/digitalasset/canton/test/parsing-attack.proto b/community/common/src/test/protobuf/com/digitalasset/canton/test/parsing-attack.proto new file mode 100644 index 000000000..1738d87bd --- /dev/null +++ b/community/common/src/test/protobuf/com/digitalasset/canton/test/parsing-attack.proto @@ -0,0 +1,49 @@ +// Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.protobuf; + +message Base { + oneof sum { + string one = 1; + } +} + +// Same as Base, but with another field +message AddField { + oneof sum { + string one = 1; + } + DummyMessage three = 3; +} + +message DummyMessage { + string content = 1; +} + +message AttackAddField { + string one = 1; + int32 three = 3; +} + +message AttackAddFieldSameType { + string one = 1; + bytes three = 3; +} + + +// Same as Base, but with another variant +message AddVariant { + oneof sum { + string one = 1; + DummyMessage two = 2; + } +} + +message AttackAddVariant { + string one = 1; + bytes two = 2; +} + diff --git a/community/common/src/test/scala/com/digitalasset/canton/BaseTest.scala b/community/common/src/test/scala/com/digitalasset/canton/BaseTest.scala index 5938455c5..b42e38ed1 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/BaseTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/BaseTest.scala @@ -7,7 +7,7 @@ import cats.Functor import cats.data.{EitherT, OptionT} import com.digitalasset.canton.concurrent.{DirectExecutionContext, Threading} import com.digitalasset.canton.config.{DefaultProcessingTimeouts, ProcessingTimeout} -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, UnlessShutdown} import com.digitalasset.canton.logging.{NamedLogging, SuppressingLogger} import com.digitalasset.canton.tracing.{NoReportingTracerProvider, TraceContext, W3CTraceContext} import com.digitalasset.canton.util.CheckedT @@ -281,6 +281,16 @@ trait BaseTest self.leftOrFail(eitherT)(clue).onShutdown(fail(s"Shutdown during $clue")) } + implicit class EitherTUnlessShutdownSyntax[E, A]( + eitherT: EitherT[UnlessShutdown, E, A] + ) { + def valueOrFailShutdown(clue: String)(implicit pos: Position): A = + self.valueOrFail(eitherT)(clue).onShutdown(fail(s"Shutdown during $clue")) + + def leftOrFailShutdown(clue: String)(implicit pos: Position): E = + self.leftOrFail(eitherT)(clue).onShutdown(fail(s"Shutdown during $clue")) + } + implicit class FutureUnlessShutdownSyntax[A](fut: FutureUnlessShutdown[A]) { def failOnShutdown(clue: String)(implicit ec: ExecutionContext, pos: Position): Future[A] = fut.onShutdown(fail(s"Shutdown during $clue")) diff --git a/community/common/src/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala b/community/common/src/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala index 661fb3673..da7a5adff 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/data/GenTransactionTreeTest.scala @@ -3,14 +3,15 @@ package com.digitalasset.canton.data -import cats.data.{NonEmptyList, NonEmptySet} +import cats.data.NonEmptyList +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.crypto.SecureRandomness import com.digitalasset.canton.{BaseTest, HasExecutionContext, LfPartyId} import com.digitalasset.canton.data.MerkleTree.RevealIfNeedBe import com.digitalasset.canton.topology.{ParticipantId, TestingIdentityFactory} import com.digitalasset.canton.topology.transaction.{ - ParticipantPermission, ParticipantAttributes, + ParticipantPermission, TrustLevel, } import com.digitalasset.canton.protocol._ @@ -446,7 +447,7 @@ class GenTransactionTreeTest extends AnyWordSpec with BaseTest with HasExecution import GenTransactionTreeTest._ "correctly compute recipients from witnesses" in { - def mkWitnesses(setup: List[Set[Int]]): Witnesses = Witnesses(setup.map(_.map(informee))) + def mkWitnesses(setup: Seq[Set[Int]]): Witnesses = Witnesses(setup.map(_.map(informee))) // Maps parties to participants; parties have IDs that start at 1, participants have IDs that start at 11 def topology = TestingIdentityFactory( @@ -470,7 +471,7 @@ class GenTransactionTreeTest extends AnyWordSpec with BaseTest with HasExecution }, ).topologySnapshot() val witnesses = mkWitnesses( - List( + Seq( Set(1, 2), Set(1, 3), Set(2, 4), @@ -483,20 +484,21 @@ class GenTransactionTreeTest extends AnyWordSpec with BaseTest with HasExecution .toRecipients(topology) .valueOr(err => fail(err.message)) .futureValue shouldBe Recipients( - NonEmptyList.of( + NonEmpty( + Seq, RecipientsTree( - NonEmptySet.of(participant(16)), - List( + NonEmpty.mk(Set, participant(16)), + Seq( RecipientsTree( - NonEmptySet.of(participant(11), Set(12, 13, 15).map(participant).toSeq: _*), - List( + NonEmpty(Set, 11, 12, 13, 15).map(participant), + Seq( RecipientsTree( - NonEmptySet.of(participant(12), participant(14)), - List( + NonEmpty.mk(Set, participant(12), participant(14)), + Seq( RecipientsTree( - NonEmptySet.of(participant(11), participant(13)), - List( - RecipientsTree(NonEmptySet.of(participant(11), participant(12)), List()) + NonEmpty.mk(Set, participant(11), participant(13)), + Seq( + RecipientsTree.leaf(NonEmpty.mk(Set, participant(11), participant(12))) ), ) ), @@ -504,7 +506,7 @@ class GenTransactionTreeTest extends AnyWordSpec with BaseTest with HasExecution ), ) ), - ) + ), ) ) } @@ -515,5 +517,4 @@ object GenTransactionTreeTest { def party(i: Int): LfPartyId = LfPartyId.assertFromString(s"party$i::1") def informee(i: Int): Informee = PlainInformee(party(i)) def participant(i: Int): ParticipantId = ParticipantId(s"participant$i") - } diff --git a/community/common/src/test/scala/com/digitalasset/canton/data/MerkleSeqTest.scala b/community/common/src/test/scala/com/digitalasset/canton/data/MerkleSeqTest.scala index 248fb3cfa..67eec1d0a 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/data/MerkleSeqTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/data/MerkleSeqTest.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.data import cats.syntax.either._ -import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.{BaseTest, ProtoDeserializationError} import com.digitalasset.canton.crypto.{Hash, HashOps, TestHash} import com.digitalasset.canton.data.MerkleSeq.{Branch, MerkleSeqElement, Singleton} import com.digitalasset.canton.data.MerkleTree.{ @@ -110,9 +110,12 @@ class MerkleSeqTest extends AnyWordSpec with BaseTest { val merkleSeqP = merkleSeq.toProtoV0 val merkleSeqDeserialized = MerkleSeq - .fromProtoV0(hashOps, MerkleTreeTest.leafFromByteString(Leaf1)(_).leftMap(_.toString))( - merkleSeqP - ) + .fromProtoV0( + hashOps, + MerkleTreeTest + .leafFromByteString(Leaf1)(_) + .leftMap(ProtoDeserializationError.CryptoDeserializationError(_)), + )(merkleSeqP) .value merkleSeqDeserialized shouldEqual merkleSeq diff --git a/community/common/src/test/scala/com/digitalasset/canton/lifecycle/LifecycleTest.scala b/community/common/src/test/scala/com/digitalasset/canton/lifecycle/LifecycleTest.scala index 8f87247b6..0e020c068 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/lifecycle/LifecycleTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/lifecycle/LifecycleTest.scala @@ -60,7 +60,7 @@ class LifecycleTest extends AnyWordSpec with BaseTest { } ): _* ) - thrown.getMessage shouldBe "Unable to close 'component-1', 'component-2', 'component-3'." + thrown.getMessage shouldBe "Unable to close Seq('component-1', 'component-2', 'component-3')." } } } diff --git a/community/common/src/test/scala/com/digitalasset/canton/protobuf/ProtobufParsingAttackTest.scala b/community/common/src/test/scala/com/digitalasset/canton/protobuf/ProtobufParsingAttackTest.scala new file mode 100644 index 000000000..a583962f3 --- /dev/null +++ b/community/common/src/test/scala/com/digitalasset/canton/protobuf/ProtobufParsingAttackTest.scala @@ -0,0 +1,70 @@ +// Copyright (c) 2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.protobuf + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.ProtoDeserializationError.BufferException +import com.digitalasset.canton.serialization.ProtoConverter +import com.google.protobuf.ByteString +import org.scalatest.wordspec.AnyWordSpec + +class ProtobufParsingAttackTest extends AnyWordSpec with BaseTest { + + "adding a field to a message definition" should { + "ignore wrong field types" in { + + val attackAddField = AttackAddField("foo", 123).toByteString + + // Base parser can parse this + ProtoConverter.protoParser(Base.parseFrom)(attackAddField) shouldBe + Right(Base(Base.Sum.One("foo"))) + + // Parser for the message after the field has been added will ignore the additional field + ProtoConverter.protoParser(AddField.parseFrom)(attackAddField) shouldBe + Right(AddField(AddField.Sum.One("foo"), None)) + } + + "explode when the field deserialization fails" in { + val attackAddField = + AttackAddFieldSameType("foo", ByteString.copyFromUtf8("BYTESTRING")).toByteString + + // Base parser can parse this + ProtoConverter.protoParser(Base.parseFrom)(attackAddField) shouldBe + Right(Base(Base.Sum.One("foo"))) + + // Parser for the message after the field has been added will explode + ProtoConverter + .protoParser(AddField.parseFrom)(attackAddField) + .left + .value shouldBe a[BufferException] + } + } + + "adding an alternative to a one-of" should { + "produce different parsing results" in { + + val dummyMessage = DummyMessage("dummy") + val attackAddVariant = AttackAddVariant("bar", dummyMessage.toByteString).toByteString + + ProtoConverter.protoParser(Base.parseFrom)(attackAddVariant) shouldBe + Right(Base(Base.Sum.One("bar"))) + + ProtoConverter.protoParser(AddVariant.parseFrom)(attackAddVariant) shouldBe + Right(AddVariant(AddVariant.Sum.Two(dummyMessage))) + } + + "explode when given bad alternatives" in { + val attackAddVariant = + AttackAddVariant("bar", ByteString.copyFromUtf8("BYTESTRING")).toByteString + + ProtoConverter.protoParser(Base.parseFrom)(attackAddVariant) shouldBe + Right(Base(Base.Sum.One("bar"))) + + ProtoConverter + .protoParser(AddVariant.parseFrom)(attackAddVariant) + .left + .value shouldBe a[BufferException] + } + } +} diff --git a/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala b/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala index a955e7bfa..bc0ce13c5 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/protocol/ExampleTransactionFactory.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.protocol -import cats.data.NonEmptyList import cats.syntax.option._ import java.util.UUID @@ -22,6 +21,7 @@ import com.daml.lf.value.Value.{ ValueUnit, VersionedValue, } +import com.daml.nonempty.NonEmpty import com.digitalasset.canton._ import com.digitalasset.canton.crypto._ import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto @@ -118,7 +118,6 @@ object ExampleTransactionFactory { stakeholders = signatories ++ observers, key = key, byKey = byKey, - byInterface = None, version = version, ) @@ -138,7 +137,6 @@ object ExampleTransactionFactory { signatories = signatories, stakeholders = signatories ++ observers, key, - byInterface = None, version = transactionVersion, ) } @@ -170,7 +168,6 @@ object ExampleTransactionFactory { exerciseResult = exerciseResult, key = key, byKey = byKey, - byInterface = None, version = transactionVersion, ) @@ -227,9 +224,9 @@ object ExampleTransactionFactory { }: _*) val version = CantonOnly.maxTransactionVersion( - NonEmptyList - .fromList(nodesMap.values.toList.mapFilter(_.optVersion)) - .getOrElse(NonEmptyList.one(transactionVersion)) + NonEmpty + .from(nodesMap.values.toSeq.mapFilter(_.optVersion)) + .getOrElse(NonEmpty(Seq, transactionVersion)) ) CantonOnly.lfVersionedTransaction(version, nodesMap, roots) diff --git a/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionMergeTest.scala b/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionMergeTest.scala index 4e3e48221..bf3e01a96 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionMergeTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionMergeTest.scala @@ -3,9 +3,9 @@ package com.digitalasset.canton.protocol -import cats.data.NonEmptyList import com.daml.ledger.client.binding import com.daml.lf.transaction.test.TransactionBuilder +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.ComparesLfTransactions.TxTree import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.examples.Iou @@ -292,8 +292,9 @@ class WellFormedTransactionMergeTest transactions: WithRollbackScope[WellFormedTransaction[WithSuffixes]]* ) = { valueOrFail( - WellFormedTransaction - .merge(NonEmptyList.of(transactions.head, transactions.tail: _*)) + WellFormedTransaction.merge( + NonEmpty.from(transactions).valueOrFail("Cannot merge empty list of transactions") + ) )("unexpectedly failed to merge").unwrap } diff --git a/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala b/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala index b1a5e45aa..cc86ddd7d 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/protocol/WellFormedTransactionTest.scala @@ -236,7 +236,6 @@ class WellFormedTransactionTest extends AnyWordSpec with BaseTest with HasExecut exerciseResult = None, key = None, byKey = false, - byInterface = None, version = ExampleTransactionFactory.transactionVersion, ), ), diff --git a/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala b/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala index bf6ca9c26..2379e0e19 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/resource/DbStorageSingleTest.scala @@ -40,7 +40,7 @@ trait DbStorageSingleTest extends AsyncWordSpec with BaseTest with CloseableTest DefaultProcessingTimeouts.testing, loggerFactory, ) - .value shouldBe a[DbStorageSingle] + .valueOrFailShutdown("storage create") shouldBe a[DbStorageSingle] } "fail on invalid credentials" in { @@ -55,8 +55,7 @@ trait DbStorageSingleTest extends AsyncWordSpec with BaseTest with CloseableTest DefaultProcessingTimeouts.testing, loggerFactory, ) - .left - .value shouldBe a[String] + .leftOrFailShutdown("storage create") shouldBe a[String] } } @@ -72,8 +71,7 @@ trait DbStorageSingleTest extends AsyncWordSpec with BaseTest with CloseableTest DefaultProcessingTimeouts.testing, loggerFactory, ) - .left - .value shouldBe a[String] + .leftOrFailShutdown("storage create") shouldBe a[String] } } @@ -89,8 +87,7 @@ trait DbStorageSingleTest extends AsyncWordSpec with BaseTest with CloseableTest DefaultProcessingTimeouts.testing, loggerFactory, ) - .left - .value shouldBe a[String] + .leftOrFailShutdown("storage create") shouldBe a[String] } } } diff --git a/community/common/src/test/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala b/community/common/src/test/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala index c8cf53652..f4621b86b 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala @@ -57,9 +57,9 @@ abstract class PostgresDbStorageSetup( val migrationResult = new CommunityDbMigrationsFactory(loggerFactory).create(config).migrateDatabase() // throw so the first part of the test that attempts to use storage will fail with an exception - migrationResult.left.foreach(err => - throw new RuntimeException(show"Failed to migrate database: $err") - ) + migrationResult + .valueOr(err => throw new RuntimeException(show"Failed to migrate database: $err")) + .onShutdown(throw new RuntimeException("Migration interrupted due to shutdown")) } s } @@ -180,8 +180,8 @@ class H2DbStorageSetup(override protected val loggerFactory: NamedLoggerFactory) migrations .migrateIfFresh() - .left - .foreach(err => throw new RuntimeException(show"Failed to migrate database: $err")) + .valueOr(err => throw new RuntimeException(show"Failed to migrate database: $err")) + .onShutdown(throw new RuntimeException("Shutdown during migration")) } object DbStorageSetup { diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala index e009f4471..4f28e95e9 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTest.scala @@ -267,6 +267,24 @@ trait TopologyStoreTest } } + "bootstrap correctly updates timestamp" in { + val store = mk() + for { + _ <- store.bootstrap( + StoredTopologyTransactions( + Seq( + StoredTopologyTransaction(ts.plusSeconds(1), None, okm1), + StoredTopologyTransaction(ts.plusSeconds(2), None, ps1), + StoredTopologyTransaction(ts.plusSeconds(3), None, ps2), + StoredTopologyTransaction(ts.plusSeconds(4), None, ps3), + StoredTopologyTransaction(ts.plusSeconds(5), None, p2p1), + ) + ) + ) + currentTs <- store.timestamp + } yield currentTs shouldBe Some(ts.plusSeconds(5)) + } + "successfully append new items" in { val store = mk() for { diff --git a/community/common/src/test/scala/com/digitalasset/canton/util/BatchAggregatorTest.scala b/community/common/src/test/scala/com/digitalasset/canton/util/BatchAggregatorTest.scala index ddfcdc660..3c7103628 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/util/BatchAggregatorTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/util/BatchAggregatorTest.scala @@ -3,8 +3,8 @@ package com.digitalasset.canton.util -import cats.data.NonEmptyList import cats.syntax.traverse._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.concurrent.Threading import com.digitalasset.canton.config.BatchAggregatorConfig import com.digitalasset.canton.config.RequireTypes.PositiveNumeric @@ -24,11 +24,11 @@ import scala.util.Random class BatchAggregatorTest extends AnyWordSpec with BaseTest with HasExecutionContext { type K = Int type V = String - type BatchGetterType = NonEmptyList[Traced[K]] => Future[Iterable[V]] + type BatchGetterType = NonEmpty[Seq[Traced[K]]] => Future[Iterable[V]] private val defaultKeyToValue: K => V = _.toString - private val defaultBatchGetter: NonEmptyList[Traced[K]] => Future[Iterable[V]] = keys => - Future(keys.toList.map(item => defaultKeyToValue(item.value))) + private val defaultBatchGetter: NonEmpty[Seq[Traced[K]]] => Future[Iterable[V]] = keys => + Future(keys.map(item => defaultKeyToValue(item.value))) private val defaultMaximumInFlight: Int = 5 private val defaultMaximumBatchSize: Int = 5 @@ -40,7 +40,7 @@ class BatchAggregatorTest extends AnyWordSpec with BaseTest with HasExecutionCon val processor = new BatchAggregator.Processor[K, V] { override def kind: String = "item" override def logger: TracedLogger = BatchAggregatorTest.this.logger - override def executeBatch(items: NonEmptyList[Traced[K]])(implicit + override def executeBatch(items: NonEmpty[Seq[Traced[K]]])(implicit traceContext: TraceContext ): Future[Iterable[V]] = batchGetter(items) override def prettyItem: Pretty[K] = implicitly diff --git a/community/common/src/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala b/community/common/src/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala index 1cf2bc897..f0c4fd87c 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/util/LfTransactionBuilder.scala @@ -81,7 +81,6 @@ object LfTransactionBuilder { signatories = signatories, stakeholders = signatories ++ observers, key, - byInterface = None, ExampleTransactionFactory.transactionVersion, ) } yield ((nodeId, Map(nodeId -> node)), cid) @@ -128,7 +127,6 @@ object LfTransactionBuilder { exerciseResult = exerciseResult, key = key, byKey = key.nonEmpty, // Not true in general, but okay for tests - byInterface = None, ExampleTransactionFactory.transactionVersion, ), children, @@ -151,7 +149,6 @@ object LfTransactionBuilder { signatories = signatories, stakeholders = signatories ++ observers, key, - byInterface = None, ExampleTransactionFactory.transactionVersion, ) ) @@ -174,7 +171,6 @@ object LfTransactionBuilder { stakeholders = signatories ++ observers, key = key, byKey = key.nonEmpty, // Not true in general, but okay for tests - byInterface = None, ExampleTransactionFactory.transactionVersion, ) ) diff --git a/community/demo/src/main/daml/ai-analysis/AIAnalysis.daml b/community/demo/src/main/daml/ai-analysis/AIAnalysis.daml index fcab9ff77..3b1d4c317 100644 --- a/community/demo/src/main/daml/ai-analysis/AIAnalysis.daml +++ b/community/demo/src/main/daml/ai-analysis/AIAnalysis.daml @@ -27,9 +27,10 @@ template OfferAnalysis do registerData <- fetch registerId assert (registerData.owner == owner) + newRegisterId <- exercise registerId AddObserver with party = registry tm <- getTime let clientId = sha256 ((show tm) <> (show owner)) - pa <- create PendingAnalysis with registry, owner, analyser, clientId, registerId + pa <- create PendingAnalysis with registry, owner, analyser, clientId, registerId = newRegisterId an <- create AnonymizedRecords with registry, analyser, clientId, records = registerData.records return (an, pa) diff --git a/community/demo/src/main/daml/ai-analysis/daml.yaml b/community/demo/src/main/daml/ai-analysis/daml.yaml index c8c5f6940..ac6977563 100644 --- a/community/demo/src/main/daml/ai-analysis/daml.yaml +++ b/community/demo/src/main/daml/ai-analysis/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad +sdk-version: 2.1.0-snapshot.20220407.9685.0.7ed507cf name: ai-analysis parties: - Alice @@ -8,7 +8,7 @@ exposed-modules: - AIAnalysis source: AIAnalysis.daml init-script: AIAnalysis:setup -version: 2.1.0 +version: 2.2.0 dependencies: - daml-prim - daml-stdlib diff --git a/community/demo/src/main/daml/bank/daml.yaml b/community/demo/src/main/daml/bank/daml.yaml index 9c89213f0..7a0bac000 100644 --- a/community/demo/src/main/daml/bank/daml.yaml +++ b/community/demo/src/main/daml/bank/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad +sdk-version: 2.1.0-snapshot.20220407.9685.0.7ed507cf name: bank parties: - Alice @@ -7,7 +7,7 @@ exposed-modules: - Bank source: Bank.daml init-script: Bank:setup -version: 2.1.0 +version: 2.2.0 dependencies: - daml-prim - daml-stdlib diff --git a/community/demo/src/main/daml/doctor/daml.yaml b/community/demo/src/main/daml/doctor/daml.yaml index b35808209..78e9ed4bf 100644 --- a/community/demo/src/main/daml/doctor/daml.yaml +++ b/community/demo/src/main/daml/doctor/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad +sdk-version: 2.1.0-snapshot.20220407.9685.0.7ed507cf name: doctor parties: - Alice @@ -7,7 +7,7 @@ exposed-modules: - Doctor source: Doctor.daml init-script: Doctor:setup -version: 2.1.0 +version: 2.2.0 dependencies: - daml-prim - daml-stdlib diff --git a/community/demo/src/main/daml/health-insurance/daml.yaml b/community/demo/src/main/daml/health-insurance/daml.yaml index cd2c6a76c..82471b719 100644 --- a/community/demo/src/main/daml/health-insurance/daml.yaml +++ b/community/demo/src/main/daml/health-insurance/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad +sdk-version: 2.1.0-snapshot.20220407.9685.0.7ed507cf name: health-insurance parties: - Alice @@ -7,7 +7,7 @@ exposed-modules: - HealthInsurance source: HealthInsurance.daml init-script: HealthInsurance:setup -version: 2.1.0 +version: 2.2.0 dependencies: - daml-prim - daml-stdlib diff --git a/community/demo/src/main/daml/medical-records/daml.yaml b/community/demo/src/main/daml/medical-records/daml.yaml index cb5e6287b..170e9ac76 100644 --- a/community/demo/src/main/daml/medical-records/daml.yaml +++ b/community/demo/src/main/daml/medical-records/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad +sdk-version: 2.1.0-snapshot.20220407.9685.0.7ed507cf name: medical-records parties: - Alice @@ -7,7 +7,7 @@ exposed-modules: - MedicalRecord source: MedicalRecord.daml init-script: MedicalRecord:setup -version: 2.1.0 +version: 2.2.0 dependencies: - daml-prim - daml-stdlib diff --git a/community/demo/src/main/scala/com/digitalasset/canton/demo/ReferenceDemoScript.scala b/community/demo/src/main/scala/com/digitalasset/canton/demo/ReferenceDemoScript.scala index 686dd76c0..ed3b5f16c 100644 --- a/community/demo/src/main/scala/com/digitalasset/canton/demo/ReferenceDemoScript.scala +++ b/community/demo/src/main/scala/com/digitalasset/canton/demo/ReferenceDemoScript.scala @@ -9,6 +9,7 @@ import com.daml.ledger.client.binding.{Contract, TemplateCompanion, Primitive => import com.digitalasset.canton.DiscardOps import com.digitalasset.canton.concurrent.Threading import com.digitalasset.canton.config.TimeoutDuration +import com.digitalasset.canton.console.commands.DomainChoice import com.digitalasset.canton.console.{ConsoleMacros, ParticipantReference} import com.digitalasset.canton.demo.Step.{Action, Noop} import com.digitalasset.canton.demo.model.{ai => ME, doctor => M} @@ -461,12 +462,6 @@ class ReferenceDemoScript( "admin-api", "participant parties.enable | domains.connect | upload_dar ai-analysis.dar", () => { - val registerPartyF = Future { - blocking { - val processorId = participant6.parties.enable("Processor") - partyIdCache.put("Processor", (processorId, participant6)) - } - } val registerDomainF = Future { blocking { registerDomain(participant6, "medical", medicalConnection) @@ -480,13 +475,20 @@ class ReferenceDemoScript( } } }) :+ Future { - blocking { - ConsoleMacros.utils.retry_until_true(lookupTimeout) { - participant6.parties.list(filterParty = "Processor").nonEmpty + blocking {} + } :+ registerDomainF + // once all dars are uploaded and we've connected the domain, register the party (as we can flush everything there ...) + val sf = Future + .sequence(allF) + .flatMap(_ => + Future { + blocking { + val processorId = + participant6.parties.enable("Processor", waitForDomain = DomainChoice.All) + partyIdCache.put("Processor", (processorId, participant6)) + } } - } - } :+ registerPartyF :+ registerDomainF - val sf = Future.sequence(allF) + ) sf.foreach(_ => { val offer = ME.AIAnalysis .OfferAnalysis(registry = registry, owner = alice, analyser = processor) diff --git a/community/demo/src/pack/demo/demo.sc b/community/demo/src/pack/demo/demo.sc index 980ba7215..748f7de3c 100644 --- a/community/demo/src/pack/demo/demo.sc +++ b/community/demo/src/pack/demo/demo.sc @@ -16,6 +16,7 @@ val version = ReleaseVersion.current.fullVersion val (testScript, loadJar, adjustPath) = sys.props.getOrElseUpdate("demo-test", "0") match { case "1" => (true, false, true) case "2" => (true, true, false) + case "3" => (false, false, true) case _ => (false, true, false) } diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessor.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessor.scala index c07fac3f0..5acb9274b 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessor.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessor.scala @@ -3,11 +3,12 @@ package com.digitalasset.canton.domain.mediator -import cats.data.{EitherT, NonEmptyList, NonEmptySet} +import cats.data.EitherT import cats.syntax.alternative._ import cats.syntax.functor._ import cats.syntax.functorFilter._ import cats.syntax.traverse._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton._ import com.digitalasset.canton.crypto.{DomainSyncCryptoClient, SyncCryptoError} import com.digitalasset.canton.data.{CantonTimestamp, ViewType} @@ -128,7 +129,7 @@ class ConfirmationResponseProcessor( requestId: RequestId, counter: SequencerCounter, request: MediatorRequest, - rootHashMessages: List[OpenEnvelope[RootHashMessage[SerializedRootHashMessagePayload]]], + rootHashMessages: Seq[OpenEnvelope[RootHashMessage[SerializedRootHashMessagePayload]]], )(implicit traceContext: TraceContext): Future[Unit] = { withSpan("ConfirmationResponseProcessor.processRequest") { implicit traceContext => span => span.setAttribute("request_id", requestId.toString) @@ -196,20 +197,20 @@ class ConfirmationResponseProcessor( private def checkRootHashMessages( request: MediatorRequest, - rootHashMessages: List[OpenEnvelope[RootHashMessage[SerializedRootHashMessagePayload]]], + rootHashMessages: Seq[OpenEnvelope[RootHashMessage[SerializedRootHashMessagePayload]]], topologySnapshot: TopologySnapshot, ): EitherT[Future, String, Unit] = { val (wrongRecipients, oneMemberRecipients) = rootHashMessages.flatMap { rhm => rhm.recipients.trees.toList.map { - case tree @ RecipientsTree(group, Nil) => - Either.cond(group.toSortedSet.size == 2 && group.contains(mediatorId), group, tree) + case tree @ RecipientsTree(group, Seq()) => + Either.cond(group.size == 2 && group.contains(mediatorId), group, tree) case badTree => Left(badTree) } }.separate val members = oneMemberRecipients.mapFilter(recipients => (recipients - mediatorId).headOption) - def repeatedMembers(members: List[Member]): List[Member] = { - val repeatedMembersB = List.newBuilder[Member] + def repeatedMembers(members: Seq[Member]): Seq[Member] = { + val repeatedMembersB = Seq.newBuilder[Member] val seen = new mutable.HashSet[Member]() members.foreach { member => val fresh = seen.add(member) @@ -218,7 +219,7 @@ class ConfirmationResponseProcessor( repeatedMembersB.result() } - def wrongRootHashes(expectedRootHash: RootHash): List[RootHash] = + def wrongRootHashes(expectedRootHash: RootHash): Seq[RootHash] = rootHashMessages.mapFilter { envelope => val rootHash = envelope.protocolMessage.rootHash if (rootHash == expectedRootHash) None else Some(rootHash) @@ -242,10 +243,10 @@ class ConfirmationResponseProcessor( } } - def distinctPayloads: List[SerializedRootHashMessagePayload] = + def distinctPayloads: Seq[SerializedRootHashMessagePayload] = rootHashMessages.map(_.protocolMessage.payload).distinct - def wrongViewType(expectedViewType: ViewType): List[ViewType] = + def wrongViewType(expectedViewType: ViewType): Seq[ViewType] = rootHashMessages.map(_.protocolMessage.viewType).filterNot(_ == expectedViewType).distinct for { @@ -296,7 +297,7 @@ class ConfirmationResponseProcessor( private def sendMalformedRejection( requestId: RequestId, - rootHashMessages: List[OpenEnvelope[RootHashMessage[SerializedRootHashMessagePayload]]], + rootHashMessages: Seq[OpenEnvelope[RootHashMessage[SerializedRootHashMessagePayload]]], rejectionReason: MediatorReject, domainParameters: DynamicDomainParameters, )(implicit traceContext: TraceContext): Future[Unit] = { @@ -307,19 +308,19 @@ class ConfirmationResponseProcessor( val recipientsByViewType = rootHashMessages.groupBy(_.protocolMessage.viewType).mapFilter { rhms => val recipients = rhms.flatMap(_.recipients.allRecipients).toSet - mediatorId - NonEmptyList.fromList(recipients.toList) + NonEmpty.from(recipients.toSeq) } if (recipientsByViewType.nonEmpty) { for { snapshot <- crypto.awaitSnapshot(requestId.unwrap) - envs <- recipientsByViewType.toList + envs <- recipientsByViewType.toSeq .traverse { case (viewType, recipients) => val rejection = MalformedMediatorRequestResult(requestId, domain, viewType, rejectionReason) SignedProtocolMessage .tryCreate(rejection, snapshot, crypto.pureCrypto) .map { signedRejection => - signedRejection -> Recipients.groups(recipients.map(r => NonEmptySet.one(r))) + signedRejection -> Recipients.groups(recipients.map(r => NonEmpty(Set, r))) } } batch = Batch.of(envs: _*) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/EventSignaller.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/EventSignaller.scala index 50600799b..cd1036198 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/EventSignaller.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/EventSignaller.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.domain.sequencing.sequencer import akka.NotUsed import akka.stream.scaladsl.Source -import cats.data.NonEmptyList +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.domain.sequencing.sequencer.store.{Sequenced, SequencerMemberId} import com.digitalasset.canton.topology.Member import com.digitalasset.canton.tracing.TraceContext @@ -37,7 +37,7 @@ object WriteNotification { override def toString: String = s"Members(${memberIds.map(_.unwrap).mkString(",")})" } - def apply(events: NonEmptyList[Sequenced[_]]): WriteNotification = + def apply(events: NonEmpty[Seq[Sequenced[_]]]): WriteNotification = events .map(_.event.notifies) .reduceLeft(_ union _) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriter.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriter.scala index 057b5a75d..152633784 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriter.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriter.scala @@ -17,8 +17,10 @@ import com.digitalasset.canton.lifecycle.{ AsyncCloseable, AsyncOrSyncCloseable, FlagCloseableAsync, + FutureUnlessShutdown, SyncCloseable, } +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown.syntax._ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} import com.digitalasset.canton.resource.Storage import com.digitalasset.canton.sequencing.protocol.{SendAsyncError, SubmissionRequest} @@ -83,7 +85,7 @@ private[sequencer] class RunningSequencerWriterFlow( trait SequencerWriterStoreFactory extends AutoCloseable { def create(storage: Storage, generalStore: SequencerStore)(implicit traceContext: TraceContext - ): EitherT[Future, WriterStartupError, SequencerWriterStore] + ): EitherT[FutureUnlessShutdown, WriterStartupError, SequencerWriterStore] /** When the sequencer goes offline Exceptions may be thrown by the [[sequencer.store.SequencerStore]] and [[sequencer.SequencerWriterSource]]. * This allows callers to check whether the captured exception is expected when offline and indicates that the @@ -97,7 +99,7 @@ object SequencerWriterStoreFactory { new SequencerWriterStoreFactory { override def create(storage: Storage, generalStore: SequencerStore)(implicit traceContext: TraceContext - ): EitherT[Future, WriterStartupError, SequencerWriterStore] = + ): EitherT[FutureUnlessShutdown, WriterStartupError, SequencerWriterStore] = EitherT.pure(SequencerWriterStore.singleInstance(generalStore)) override def close(): Unit = () } @@ -184,39 +186,46 @@ class SequencerWriter( def start()(implicit traceContext: TraceContext): EitherT[Future, WriterStartupError, Unit] = performUnlessClosingEitherT[WriterStartupError, Unit](WriterStartupError.WriterShuttingDown) { def createStoreAndRunCrashRecovery() - : Future[Either[WriterStartupError, SequencerWriterStore]] = { + : EitherT[FutureUnlessShutdown, WriterStartupError, SequencerWriterStore] = { // only retry errors that are flagged as retryable - implicit val success: Success[Either[WriterStartupError, SequencerWriterStore]] = Success { - case Left(error) => !error.retryable - case Right(_) => true - } + implicit val success: Success[Either[WriterStartupError, SequencerWriterStore]] = + Success { + case Left(error) => !error.retryable + case Right(_) => true + } // continuously attempt to start the writer as we can't meaningfully proactively shutdown or crash // when this fails - Pause(logger, this, retry.Forever, 100.millis, "start-sequencer-writer").apply( - { - logger.debug("Starting sequencer writer") - for { - writerStore <- writerStoreFactory.create(storage, generalStore) - _ <- EitherTUtil.onErrorOrFailure(() => writerStore.close()) { - for { - // validate that the datastore has an appropriate commit mode set in order to run the writer - _ <- expectedCommitMode - .fold(EitherTUtil.unit[String])(writerStore.validateCommitMode) - .leftMap(WriterStartupError.BadCommitMode) - onlineTimestamp <- EitherT.right[WriterStartupError](runRecovery(writerStore)) - _ <- EitherT.right[WriterStartupError](waitForOnline(onlineTimestamp)) - } yield () - } - } yield writerStore - }.value, - AllExnRetryable, - ) + EitherT { + Pause(logger, this, retry.Forever, 100.millis, "start-sequencer-writer").unlessShutdown( + { + logger.debug("Starting sequencer writer") + for { + writerStore <- writerStoreFactory.create(storage, generalStore) + _ <- EitherTUtil + .onErrorOrFailure(() => writerStore.close()) { + for { + // validate that the datastore has an appropriate commit mode set in order to run the writer + _ <- expectedCommitMode + .fold(EitherTUtil.unit[String])(writerStore.validateCommitMode) + .leftMap(WriterStartupError.BadCommitMode) + onlineTimestamp <- EitherT.right[WriterStartupError]( + runRecovery(writerStore) + ) + _ <- EitherT.right[WriterStartupError](waitForOnline(onlineTimestamp)) + } yield () + } + .mapK(FutureUnlessShutdown.outcomeK) + } yield writerStore + }.value, + AllExnRetryable, + ) + } } - EitherT(createStoreAndRunCrashRecovery()) map { store => - startWriter(store) - } + createStoreAndRunCrashRecovery() + .map(startWriter) + .onShutdown(Left(WriterStartupError.WriterShuttingDown)) } def send( diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSource.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSource.scala index 208a41f14..17353711c 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSource.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSource.scala @@ -8,9 +8,10 @@ import akka.stream._ import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Merge, Source} import cats.data.{EitherT, NonEmptyList, OptionT, Validated, ValidatedNel} import cats.syntax.either._ -import cats.syntax.list._ +import cats.syntax.foldable._ import cats.syntax.option._ import cats.syntax.traverse._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.String256M import com.digitalasset.canton.crypto.DomainSyncCryptoClient import com.digitalasset.canton.data.CantonTimestamp @@ -19,7 +20,7 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, Traced import com.digitalasset.canton.sequencing.protocol.{SendAsyncError, SubmissionRequest} import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration} import com.digitalasset.canton.topology.Member -import com.digitalasset.canton.tracing.BatchTracing.withNelTracedBatch +import com.digitalasset.canton.tracing.BatchTracing.withTracedBatch import com.digitalasset.canton.tracing.{HasTraceContext, TraceContext, Traced} import com.digitalasset.canton.util.EitherTUtil import com.digitalasset.canton.version.ProtocolVersion @@ -59,10 +60,10 @@ case class BatchWritten(notifies: WriteNotification, latestTimestamp: CantonTime object BatchWritten { /** Assumes events are ordered by timestamp */ - def apply(events: NonEmptyList[Sequenced[_]]): BatchWritten = + def apply(events: NonEmpty[Seq[Sequenced[_]]]): BatchWritten = BatchWritten( notifies = WriteNotification(events), - latestTimestamp = events.last.timestamp, + latestTimestamp = events.last1.timestamp, ) } @@ -302,18 +303,20 @@ object SequenceWritesFlow { writes .traverse(sequenceWrite) .flatMap { - _.toList.toNel // due to the groupedWithin we should likely always have items + NonEmpty + .from(_) // due to the groupedWithin we should likely always have items .fold(Future.successful[Traced[Option[BatchWritten]]](Traced.empty(None))) { writes => - withNelTracedBatch(logger, writes) { implicit traceContext => writes => - { - val events = writes.collect { case SequencedWrite.Event(event) => event }.toNel - val notifies = - events.fold[WriteNotification](WriteNotification.None)(WriteNotification(_)) - for { - // if this write batch had any events then save them - _ <- events.fold(Future.unit)(store.saveEvents) - } yield Traced(BatchWritten(notifies, writes.last.timestamp).some) - } + withTracedBatch(logger, writes) { implicit traceContext => writes => + val events: Option[NonEmpty[Seq[Sequenced[PayloadId]]]] = + NonEmpty.from(writes.collect { case SequencedWrite.Event(event) => + event + }) + val notifies = + events.fold[WriteNotification](WriteNotification.None)(WriteNotification(_)) + for { + // if this write batch had any events then save them + _ <- events.fold(Future.unit)(store.saveEvents) + } yield Traced(BatchWritten(notifies, writes.last1.timestamp).some) } } } @@ -456,31 +459,31 @@ object WritePayloadsFlow { def writePayloads( events: Seq[Presequenced[StoreEvent[Payload]]] - ): Future[List[Presequenced[StoreEvent[PayloadId]]]] = - NonEmptyList - .fromList(events.toList) - .fold(Future.successful(List.empty[Presequenced[StoreEvent[PayloadId]]])) { events => - withNelTracedBatch(logger, events) { implicit traceContext => events => - // extract the payloads themselves for storing - val payloads = events.toList.map(_.event).flatMap(extractPayload(_).toList) - - // strip out the payloads and replace with their id as the content itself is not needed downstream - val eventsWithPayloadId = events.map(_.map(e => dropPayloadContent(e))).toList - logger.debug(s"Writing ${payloads.size} payloads from batch of ${events.size}") - - // save the payloads if there are any - EitherTUtil.toFuture { - payloads.toNel - .traverse(store.savePayloads(_, instanceDiscriminator)) - .leftMap { - case SavePayloadsError.ConflictingPayloadId(id, conflictingInstance) => - new ConflictingPayloadIdException(id, conflictingInstance) - case SavePayloadsError.PayloadMissing(id) => new PayloadMissingException(id) - } - .map(_ => eventsWithPayloadId) + ): Future[Seq[Presequenced[StoreEvent[PayloadId]]]] = { + if (events.isEmpty) Future.successful(Seq.empty[Presequenced[StoreEvent[PayloadId]]]) + else { + implicit val traceContext: TraceContext = TraceContext.ofBatch(events)(logger) + // extract the payloads themselves for storing + val payloads = events.map(_.event).flatMap(extractPayload(_).toList) + + // strip out the payloads and replace with their id as the content itself is not needed downstream + val eventsWithPayloadId = events.map(_.map(e => dropPayloadContent(e))) + logger.debug(s"Writing ${payloads.size} payloads from batch of ${events.size}") + + // save the payloads if there are any + EitherTUtil.toFuture { + NonEmpty + .from(payloads) + .traverse_(store.savePayloads(_, instanceDiscriminator)) + .leftMap { + case SavePayloadsError.ConflictingPayloadId(id, conflictingInstance) => + new ConflictingPayloadIdException(id, conflictingInstance) + case SavePayloadsError.PayloadMissing(id) => new PayloadMissingException(id) } - } + .map((_: Unit) => eventsWithPayloadId) } + } + } def extractPayload(event: StoreEvent[Payload]): Option[Payload] = event match { case DeliverStoreEvent(_, _, _, payload, _, _) => payload.some diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala index cfa3bc357..9f53fce2f 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala @@ -3,11 +3,13 @@ package com.digitalasset.canton.domain.sequencing.sequencer.store -import cats.data.{EitherT, NonEmptyList, NonEmptySet} +import cats.data.{EitherT, NonEmptySet} import cats.syntax.bifunctor._ import cats.syntax.either._ import cats.syntax.foldable._ import cats.syntax.list._ +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances._ import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{PositiveNumeric, String256M} import com.digitalasset.canton.data.CantonTimestamp @@ -384,13 +386,13 @@ class DbSequencerStore( * - Finally we filter to payloads that haven't yet been successfully inserted and go back to the first step attempting * to reinsert just this subset. */ - override def savePayloads(payloads: NonEmptyList[Payload], instanceDiscriminator: UUID)(implicit + override def savePayloads(payloads: NonEmpty[Seq[Payload]], instanceDiscriminator: UUID)(implicit traceContext: TraceContext ): EitherT[Future, SavePayloadsError, Unit] = { // insert the provided payloads with the associated discriminator to the payload table. // we're intentionally using a insert that will fail with a primary key constraint violation if rows exist - def insert(payloadsToInsert: NonEmptyList[Payload]): Future[Boolean] = { + def insert(payloadsToInsert: NonEmpty[Seq[Payload]]): Future[Boolean] = { def isConstraintViolation(batchUpdateException: SQLException): Boolean = profile match { case Postgres(_) => batchUpdateException.getSQLState == PSQLState.UNIQUE_VIOLATION.getState case Oracle(_) => @@ -418,11 +420,10 @@ class DbSequencerStore( storage .queryAndUpdate( - DbStorage.bulkOperation(insertSql, payloadsToInsert.toList, storage.profile) { - pp => payload => - pp >> payload.id.unwrap - pp >> instanceDiscriminator - pp >> payload.content + DbStorage.bulkOperation(insertSql, payloadsToInsert, storage.profile) { pp => payload => + pp >> payload.id.unwrap + pp >> instanceDiscriminator + pp >> payload.content }, functionFullName, ) @@ -446,7 +447,7 @@ class DbSequencerStore( // and which are still missing. // will return an error if the payload exists but with a different uniquifier as this suggests another process // has inserted a conflicting value. - def listMissing(): EitherT[Future, SavePayloadsError, List[Payload]] = { + def listMissing(): EitherT[Future, SavePayloadsError, Seq[Payload]] = { val payloadIds = payloads.map(_.id) // the max default config for number of payloads is around 50 and the max number of clauses that oracle supports is around 1000 // so we're really unlikely to need to this IN clause splitting, but lets support it just in case as Matthias has @@ -464,11 +465,11 @@ class DbSequencerStore( } map (_.toMap) // take all payloads we were expecting and then look up from inserted whether they are present and if they have // a matching instance discriminator (meaning we put them there) - missing <- payloads.toList - .foldM(List.empty[Payload]) { (missing, payload) => + missing <- payloads.toNEF + .foldM(Seq.empty[Payload]) { (missing, payload) => inserted .get(payload.id) - .fold[Either[SavePayloadsError, List[Payload]]](Right(missing :+ payload)) { + .fold[Either[SavePayloadsError, Seq[Payload]]](Right(missing :+ payload)) { storedDiscriminator => // we expect the local and stored instance discriminators should match otherwise it suggests the payload // was inserted by another `savePayloads` call @@ -484,17 +485,17 @@ class DbSequencerStore( } def go( - remainingPayloadsToInsert: NonEmptyList[Payload] + remainingPayloadsToInsert: NonEmpty[Seq[Payload]] ): EitherT[Future, SavePayloadsError, Unit] = EitherT .right(insert(remainingPayloadsToInsert)) .flatMap { successful => if (!successful) listMissing() - else EitherT.pure[Future, SavePayloadsError](List.empty[Payload]) + else EitherT.pure[Future, SavePayloadsError](Seq.empty[Payload]) } .flatMap { missing => // do we have any remaining to insert - NonEmptyList.fromList(missing).fold(EitherTUtil.unit[SavePayloadsError]) { missing => + NonEmpty.from(missing).fold(EitherTUtil.unit[SavePayloadsError]) { missing => logger.debug( s"Retrying to insert ${missing.size} missing of ${remainingPayloadsToInsert.size} payloads" ) @@ -505,7 +506,7 @@ class DbSequencerStore( go(payloads) } - override def saveEvents(instanceIndex: Int, events: NonEmptyList[Sequenced[PayloadId]])(implicit + override def saveEvents(instanceIndex: Int, events: NonEmpty[Seq[Sequenced[PayloadId]]])(implicit traceContext: TraceContext ): Future[Unit] = { // support dropping in the correct syntax to make this insert idempotent regardless of db @@ -523,7 +524,7 @@ class DbSequencerStore( |""".stripMargin storage.queryAndUpdate( - DbStorage.bulkOperation_(saveSql, events.toList, storage.profile) { pp => event => + DbStorage.bulkOperation_(saveSql, events, storage.profile) { pp => event => val DeliverStoreEventRow( timestamp, sequencerInstanceIndex, diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/InMemorySequencerStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/InMemorySequencerStore.scala index 3aef69950..5a3e97bd8 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/InMemorySequencerStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/InMemorySequencerStore.scala @@ -8,6 +8,8 @@ import cats.syntax.either._ import cats.syntax.functor._ import cats.syntax.option._ import cats.syntax.traverse._ +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances._ import com.digitalasset.canton.SequencerCounter import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.sequencing.sequencer._ @@ -76,10 +78,10 @@ class InMemorySequencerStore(protected val loggerFactory: NamedLoggerFactory)(im members.get(member) ) - override def savePayloads(payloadsToInsert: NonEmptyList[Payload], instanceDiscriminator: UUID)( + override def savePayloads(payloadsToInsert: NonEmpty[Seq[Payload]], instanceDiscriminator: UUID)( implicit traceContext: TraceContext ): EitherT[Future, SavePayloadsError, Unit] = - payloadsToInsert.toList.traverse { case Payload(id, content) => + payloadsToInsert.toNEF.traverse { case Payload(id, content) => Option(payloads.putIfAbsent(id.unwrap, StoredPayload(instanceDiscriminator, content))) .flatMap { existingPayload => // if we found an existing payload it must have a matching instance discriminator @@ -92,11 +94,11 @@ class InMemorySequencerStore(protected val loggerFactory: NamedLoggerFactory)(im .toEitherT[Future] }.void - override def saveEvents(instanceIndex: Int, eventsToInsert: NonEmptyList[Sequenced[PayloadId]])( + override def saveEvents(instanceIndex: Int, eventsToInsert: NonEmpty[Seq[Sequenced[PayloadId]]])( implicit traceContext: TraceContext ): Future[Unit] = Future.successful( - eventsToInsert.toList.foreach { event => + eventsToInsert.foreach { event => Option(events.putIfAbsent(event.timestamp, event.event)) .foreach(_ => throw new UniqueKeyViolationException( diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala index bc02bcfd7..dc7522c95 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala @@ -5,9 +5,10 @@ package com.digitalasset.canton.domain.sequencing.sequencer.store import cats.Order._ import cats.Show -import cats.data.{EitherT, NonEmptyList, NonEmptySet} +import cats.data.{EitherT, NonEmptySet} import cats.syntax.either._ import cats.syntax.traverse._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.SequencerCounter import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{PositiveNumeric, String256M} @@ -330,7 +331,7 @@ trait SequencerStore extends NamedLogging with AutoCloseable { * @param instanceDiscriminator a unique ephemeral value to ensure that no other sequencer instances are writing * conflicting payloads without having to check the payload body */ - def savePayloads(payloads: NonEmptyList[Payload], instanceDiscriminator: UUID)(implicit + def savePayloads(payloads: NonEmpty[Seq[Payload]], instanceDiscriminator: UUID)(implicit traceContext: TraceContext ): EitherT[Future, SavePayloadsError, Unit] @@ -339,7 +340,7 @@ trait SequencerStore extends NamedLogging with AutoCloseable { * Callers MUST ensure that event-ids are unique as no errors will be returned if a duplicate is present (for * the sequencer writer see [[sequencer.PartitionedTimestampGenerator]] for use with their instance index). */ - def saveEvents(instanceIndex: Int, events: NonEmptyList[Sequenced[PayloadId]])(implicit + def saveEvents(instanceIndex: Int, events: NonEmpty[Seq[Sequenced[PayloadId]]])(implicit traceContext: TraceContext ): Future[Unit] diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerWriterStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerWriterStore.scala index 7388c9de5..d8e3f2f94 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerWriterStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerWriterStore.scala @@ -3,7 +3,8 @@ package com.digitalasset.canton.domain.sequencing.sequencer.store -import cats.data.{EitherT, NonEmptyList} +import cats.data.EitherT +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.sequencing.sequencer.CommitMode import com.digitalasset.canton.topology.Member @@ -39,7 +40,7 @@ trait SequencerWriterStore extends AutoCloseable { /** Save a series of payloads to the store. * Is up to the caller to determine a reasonable batch size and no batching is done within the store. */ - def savePayloads(payloads: NonEmptyList[Payload], instanceDiscriminator: UUID)(implicit + def savePayloads(payloads: NonEmpty[Seq[Payload]], instanceDiscriminator: UUID)(implicit traceContext: TraceContext ): EitherT[Future, SavePayloadsError, Unit] = store.savePayloads(payloads, instanceDiscriminator) @@ -48,7 +49,7 @@ trait SequencerWriterStore extends AutoCloseable { * Callers should determine batch size. No batching is done within the store. * Callers MUST ensure that event-ids are unique otherwise stores will throw (likely a constraint violation). */ - def saveEvents(events: NonEmptyList[Sequenced[PayloadId]])(implicit + def saveEvents(events: NonEmpty[Seq[Sequenced[PayloadId]]])(implicit traceContext: TraceContext ): Future[Unit] = store.saveEvents(instanceIndex, events) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/topology/DomainTopologyDispatcher.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/topology/DomainTopologyDispatcher.scala index 3c1f01482..a03abd288 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/topology/DomainTopologyDispatcher.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/topology/DomainTopologyDispatcher.scala @@ -3,10 +3,11 @@ package com.digitalasset.canton.domain.topology -import cats.data.{EitherT, NonEmptyList} +import cats.data.EitherT import cats.syntax.foldable._ import cats.syntax.traverseFilter._ import com.daml.error.{ErrorCategory, ErrorCode, Explanation, Resolution} +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.crypto._ import com.digitalasset.canton.data.CantonTimestamp @@ -247,28 +248,28 @@ class DomainTopologyDispatcher( * We send the entire queue up and including of the first `ParticipantState` update at once for efficiency reasons. */ private def determineBatchFromQueue() - : List[Traced[StoredTopologyTransaction[TopologyChangeOp]]] = { + : Seq[Traced[StoredTopologyTransaction[TopologyChangeOp]]] = { val items = queue.dequeueWhile(_.value.transaction.transaction.element.mapping match { case _: ParticipantState | _: DomainParametersChange => false case _ => true }) (if (queue.nonEmpty) items :+ queue.dequeue() - else items).toList + else items).toSeq } /** wait an epsilon if the effective time is reduced with this change */ private def waitIfEffectiveTimeIsReduced( - transactions: Traced[NonEmptyList[StoredTopologyTransaction[TopologyChangeOp]]] + transactions: Traced[NonEmpty[Seq[StoredTopologyTransaction[TopologyChangeOp]]]] ): EitherT[FutureUnlessShutdown, String, Unit] = transactions.withTraceContext { implicit traceContext => txs => val empty = EitherT.rightT[FutureUnlessShutdown, String](()) - txs.last.transaction.transaction.element.mapping match { + txs.last1.transaction.transaction.element.mapping match { case mapping: DomainParametersChange => EitherT .right( performUnlessClosingF( - authorizedStoreSnapshot(txs.last.validFrom).findDynamicDomainParameters + authorizedStoreSnapshot(txs.last1.validFrom).findDynamicDomainParameters ) ) .flatMap(_.fold(empty) { param => @@ -299,8 +300,8 @@ class DomainTopologyDispatcher( } } })) - tracedTxO = NonEmptyList.fromList(pending).map { tracedNel => - BatchTracing.withNelTracedBatch(logger, tracedNel)(implicit traceContext => + tracedTxO = NonEmpty.from(pending).map { tracedNE => + BatchTracing.withTracedBatch(logger, tracedNE)(implicit traceContext => txs => Traced(txs.map(_.value)) ) } @@ -353,20 +354,20 @@ class DomainTopologyDispatcher( private def bootstrapAndDispatch( tracedTransaction: Traced[ - NonEmptyList[StoredTopologyTransaction[TopologyChangeOp]] + NonEmpty[Seq[StoredTopologyTransaction[TopologyChangeOp]]] ] ): EitherT[FutureUnlessShutdown, String, Unit] = { tracedTransaction withTraceContext { implicit traceContext => transactions => val flushToParticipantET: EitherT[FutureUnlessShutdown, String, Option[ParticipantId]] = - transactions.last.transaction.transaction.element.mapping match { + transactions.last1.transaction.transaction.element.mapping match { case ParticipantState(_, _, participant, _, _) => for { catchupForParticipant <- EitherT.right( performUnlessClosingF( catchup .determineCatchupForParticipant( - transactions.head.validFrom, - transactions.last.validFrom, + transactions.head1.validFrom, + transactions.last1.validFrom, participant, ) ) @@ -379,7 +380,7 @@ class DomainTopologyDispatcher( _ <- catchupForParticipant.fold(EitherT.rightT[FutureUnlessShutdown, String](())) { txs => sender.sendTransactions( - authorizedCryptoSnapshot(transactions.head.validFrom), + authorizedCryptoSnapshot(transactions.head1.validFrom), txs.toDomainTopologyTransactions, Set(participant), ) @@ -394,17 +395,19 @@ class DomainTopologyDispatcher( // update watermark, which we can as we successfully registered all transactions with the domain // we don't need to wait until they are processed _ <- EitherT.right( - performUnlessClosingF(targetStore.updateDispatchingWatermark(transactions.last.validFrom)) + performUnlessClosingF( + targetStore.updateDispatchingWatermark(transactions.last1.validFrom) + ) ) } yield () } } private def sendTransactions( - transactions: NonEmptyList[StoredTopologyTransaction[TopologyChangeOp]], + transactions: NonEmpty[Seq[StoredTopologyTransaction[TopologyChangeOp]]], add: Option[ParticipantId], )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] = { - val headSnapshot = authorizedStoreSnapshot(transactions.head.validFrom) + val headSnapshot = authorizedStoreSnapshot(transactions.head1.validFrom) val receivingParticipantsF = performUnlessClosingF( headSnapshot .participants() @@ -417,7 +420,7 @@ class DomainTopologyDispatcher( receivingParticipants <- EitherT.right(receivingParticipantsF) mediators <- EitherT.right(mediatorsF) _ <- sender.sendTransactions( - authorizedCryptoSnapshot(transactions.head.validFrom), + authorizedCryptoSnapshot(transactions.head1.validFrom), transactions.map(_.transaction).toList, (receivingParticipants ++ staticDomainMembers ++ mediators ++ add.toList).toSet, ) diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessorTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessorTest.scala index 770abc75d..ae4cabf60 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessorTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessorTest.scala @@ -3,8 +3,9 @@ package com.digitalasset.canton.domain.mediator -import cats.data.{EitherT, NonEmptyList, NonEmptySet} +import cats.data.EitherT import cats.syntax.option._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton._ import com.digitalasset.canton.config.CachingConfigs import com.digitalasset.canton.crypto._ @@ -451,7 +452,7 @@ class ConfirmationResponseProcessorTest extends AsyncWordSpec with BaseTest { override def rootHash: Option[RootHash] = correctRootHash.some } - val allParticipants = NonEmptyList.of(participant1, participant2, participant3) + val allParticipants = NonEmpty(Seq, participant1, participant2, participant3) val correctViewType = informeeMessage.viewType val rootHashMessage = @@ -462,16 +463,17 @@ class ConfirmationResponseProcessorTest extends AsyncWordSpec with BaseTest { SerializedRootHashMessagePayload.empty, ) - val tests = List( - "individual messages" -> allParticipants.toList.map(p => Recipients.cc(mediatorId, p)), - "just one message" -> List( - Recipients.groups(allParticipants.map(p => NonEmptySet.of(p, mediatorId))) + val tests = List[(String, Seq[Recipients])]( + "individual messages" -> allParticipants.map(p => Recipients.cc(mediatorId, p)), + "just one message" -> Seq( + Recipients.groups(allParticipants.map(p => NonEmpty.mk(Set, p, mediatorId))) ), - "mixed" -> List( + "mixed" -> Seq( Recipients.groups( - NonEmptyList.of( - NonEmptySet.of(participant1, mediatorId), - NonEmptySet.of(participant2, mediatorId), + NonEmpty.mk( + Seq, + NonEmpty.mk(Set, participant1, mediatorId), + NonEmpty.mk(Set, participant2, mediatorId), ) ), Recipients.cc(participant3, mediatorId), @@ -575,10 +577,10 @@ class ConfirmationResponseProcessorTest extends AsyncWordSpec with BaseTest { List(Set[Member](participant) -> correctViewType, Set[Member](otherMember) -> wrongViewType), (batchWithRootHashMessageWithTooManyRecipients -> - show"Root hash messages with wrong recipients tree: RecipientsTree(value = Seq($otherMember, $mediatorId, $participant), children = Seq())") -> + show"Root hash messages with wrong recipients tree: RecipientsTree(recipient group = Seq($mediatorId, $participant, $otherMember), children = Seq())") -> List(Set[Member](participant, otherMember) -> correctViewType), - (batchWithRootHashMessageWithTooFewRecipients -> show"Root hash messages with wrong recipients tree: RecipientsTree(value = $mediatorId, children = Seq())") -> List.empty, + (batchWithRootHashMessageWithTooFewRecipients -> show"Root hash messages with wrong recipients tree: RecipientsTree(recipient group = $mediatorId, children = Seq())") -> List.empty, (batchWithRepeatedRootHashMessage -> show"Several root hash messages for members: $participant") -> List(Set[Member](participant) -> correctViewType), @@ -648,7 +650,7 @@ class ConfirmationResponseProcessorTest extends AsyncWordSpec with BaseTest { val expectedResults = expectedRecipientsAndViewTypes._2.toSet val expected = expectedResults.flatMap { case (recipients, viewType) => recipients.map { member: Member => - RecipientsTree(NonEmptySet.one(member), Nil) -> Some(viewType) + RecipientsTree.leaf(NonEmpty(Set, member)) -> Some(viewType) } } withClue(s"Test case: ${expectedRecipientsAndViewTypes._1}") { diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReaderTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReaderTest.scala index 24bb8d81f..e7cc33351 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReaderTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReaderTest.scala @@ -7,7 +7,7 @@ import akka.NotUsed import akka.actor.ActorSystem import akka.stream.scaladsl.{Sink, SinkQueueWithCancel, Source} import akka.stream.{Materializer, OverflowStrategy, QueueOfferResult} -import cats.data.NonEmptyList +import com.daml.nonempty.NonEmptyUtil import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.domain.sequencing.sequencer.DomainSequencingTestUtils import com.digitalasset.canton.domain.sequencing.sequencer.DomainSequencingTestUtils._ @@ -141,16 +141,16 @@ class SequencerReaderTest extends FixtureAsyncWordSpec with BaseTest { } def storeAndWatermark(events: Seq[Sequenced[PayloadId]]): Future[Unit] = { - val eventsNel = NonEmptyList.fromListUnsafe(events.toList) + val eventsNE = NonEmptyUtil.fromUnsafe(events) val payloads = DomainSequencingTestUtils.payloadsForEvents(events) for { _ <- store - .savePayloads(NonEmptyList.fromListUnsafe(payloads), instanceDiscriminator) + .savePayloads(NonEmptyUtil.fromUnsafe(payloads), instanceDiscriminator) .valueOrFail(s"Save payloads") - _ <- store.saveEvents(instanceIndex, eventsNel) + _ <- store.saveEvents(instanceIndex, eventsNE) _ <- store - .saveWatermark(instanceIndex, eventsNel.last.timestamp) + .saveWatermark(instanceIndex, eventsNE.last1.timestamp) .valueOrFail("saveWatermark") } yield { // update the event signaller if auto signalling is enabled diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSourceTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSourceTest.scala index d571fae01..f9078b76f 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSourceTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterSourceTest.scala @@ -9,6 +9,7 @@ import akka.stream.QueueOfferResult import akka.stream.scaladsl.{Keep, Sink, Source} import cats.data.{EitherT, NonEmptyList} import cats.syntax.functor._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.sequencing.sequencer.store._ import com.digitalasset.canton.lifecycle.{ @@ -88,7 +89,7 @@ class SequencerWriterSourceTest extends AsyncWordSpec with BaseTest with HasExec timeAdvancement.set(duration) override def savePayloads( - payloadsToInsert: NonEmptyList[Payload], + payloadsToInsert: NonEmpty[Seq[Payload]], instanceDiscriminator: UUID, )(implicit traceContext: TraceContext): EitherT[Future, SavePayloadsError, Unit] = { clock.advance(timeAdvancement.get()) diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterTest.scala index 201050dd2..ec5075a2b 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterTest.scala @@ -12,6 +12,7 @@ import com.digitalasset.canton.domain.sequencing.sequencer.store.{ SequencerStore, SequencerWriterStore, } +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.resource.{MemoryStorage, Storage} import com.digitalasset.canton.sequencing.protocol.{SendAsyncError, SubmissionRequest} import com.digitalasset.canton.time.SimClock @@ -41,7 +42,7 @@ class SequencerWriterTest extends FixtureAsyncWordSpec with BaseTest { override def create(storage: Storage, generalStore: SequencerStore)(implicit traceContext: TraceContext - ): EitherT[Future, WriterStartupError, SequencerWriterStore] = + ): EitherT[FutureUnlessShutdown, WriterStartupError, SequencerWriterStore] = EitherT.pure( SequencerWriterStore.singleInstance(generalStore) ) diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/MultiTenantedSequencerStoreTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/MultiTenantedSequencerStoreTest.scala index 94eef1382..945aa9c8c 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/MultiTenantedSequencerStoreTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/MultiTenantedSequencerStoreTest.scala @@ -3,8 +3,8 @@ package com.digitalasset.canton.domain.sequencing.sequencer.store -import cats.data.NonEmptyList import cats.syntax.option._ +import com.daml.nonempty.NonEmptyUtil import com.digitalasset.canton.BaseTest import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.sequencing.sequencer.DomainSequencingTestUtils @@ -46,9 +46,9 @@ trait MultiTenantedSequencerStoreTest { val payloads = DomainSequencingTestUtils.payloadsForEvents(delivers) for { _unit <- store - .savePayloads(NonEmptyList.fromListUnsafe(payloads), instanceDiscriminator) + .savePayloads(NonEmptyUtil.fromUnsafe(payloads), instanceDiscriminator) .valueOrFail(s"Save payloads") - _unit <- store.saveEvents(NonEmptyList.fromListUnsafe(delivers)) + _unit <- store.saveEvents(NonEmptyUtil.fromUnsafe(delivers)) } yield { () } } diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStoreTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStoreTest.scala index 660bcc117..9f681a80a 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStoreTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStoreTest.scala @@ -3,9 +3,11 @@ package com.digitalasset.canton.domain.sequencing.sequencer.store -import cats.data.{EitherT, NonEmptyList, NonEmptySet} +import cats.data.{EitherT, NonEmptySet} import cats.syntax.option._ import cats.syntax.traverse._ +import com.daml.nonempty.NonEmptyUtil +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.BaseTest import com.digitalasset.canton.config.RequireTypes.String256M import com.digitalasset.canton.data.CantonTimestamp @@ -146,7 +148,7 @@ trait SequencerStoreTest extends AsyncWordSpec with BaseTest { } /** Save payloads using the default `instanceDiscriminator1` and expecting it to succeed */ - def savePayloads(payloads: NonEmptyList[Payload]): Future[Unit] = + def savePayloads(payloads: NonEmpty[Seq[Payload]]): Future[Unit] = valueOrFail(store.savePayloads(payloads, instanceDiscriminator1))("savePayloads") def saveWatermark(ts: CantonTimestamp): EitherT[Future, SaveWatermarkError, Unit] = @@ -188,10 +190,10 @@ trait SequencerStoreTest extends AsyncWordSpec with BaseTest { val env = Env() for { - _ <- env.savePayloads(NonEmptyList.of(payload1, payload2)) + _ <- env.savePayloads(NonEmpty(Seq, payload1, payload2)) deliverEvent1 <- env.deliverEvent(ts1, alice, messageId1, payload1.id) deliverEvent2 <- env.deliverEvent(ts2, alice, messageId2, payload2.id) - _ <- env.store.saveEvents(instanceIndex, NonEmptyList.of(deliverEvent1, deliverEvent2)) + _ <- env.store.saveEvents(instanceIndex, NonEmpty(Seq, deliverEvent1, deliverEvent2)) _ <- env.saveWatermark(deliverEvent2.timestamp).valueOrFail("saveWatermark") events <- env.readEvents(alice) _ = events should have size 2 @@ -205,11 +207,11 @@ trait SequencerStoreTest extends AsyncWordSpec with BaseTest { val env = Env() for { - _ <- env.savePayloads(NonEmptyList.of(payload1, payload2)) + _ <- env.savePayloads(NonEmpty(Seq, payload1, payload2)) // the first event is for alice, and the second for bob deliverEvent1 <- env.deliverEvent(ts1, alice, messageId1, payload1.id) deliverEvent2 <- env.deliverEvent(ts2, bob, messageId2, payload2.id) - _ <- env.store.saveEvents(instanceIndex, NonEmptyList.of(deliverEvent1, deliverEvent2)) + _ <- env.store.saveEvents(instanceIndex, NonEmpty(Seq, deliverEvent1, deliverEvent2)) _ <- env.saveWatermark(deliverEvent2.timestamp).valueOrFail("saveWatermark") aliceEvents <- env.readEvents(alice) bobEvents <- env.readEvents(bob) @@ -238,7 +240,7 @@ trait SequencerStoreTest extends AsyncWordSpec with BaseTest { val env = Env() for { - _ <- env.savePayloads(NonEmptyList.of(payload1, payload2, payload3)) + _ <- env.savePayloads(NonEmpty(Seq, payload1, payload2, payload3)) // the first event is for alice, and the second for bob deliverEventAlice <- env.deliverEvent(ts1, alice, messageId1, payload1.id) deliverEventAll <- env.deliverEvent( @@ -251,7 +253,7 @@ trait SequencerStoreTest extends AsyncWordSpec with BaseTest { deliverEventBob <- env.deliverEvent(ts3, bob, messageId3, payload3.id) _ <- env.store.saveEvents( instanceIndex, - NonEmptyList.of(deliverEventAlice, deliverEventAll, deliverEventBob), + NonEmpty(Seq, deliverEventAlice, deliverEventAll, deliverEventBob), ) _ <- env.saveWatermark(deliverEventBob.timestamp).valueOrFail("saveWatermark") aliceEvents <- env.readEvents(alice) @@ -292,7 +294,7 @@ trait SequencerStoreTest extends AsyncWordSpec with BaseTest { traceContext, ) timestampedError: Sequenced[Nothing] = Sequenced(ts1, error) - _ <- env.store.saveEvents(instanceIndex, NonEmptyList.of(timestampedError)) + _ <- env.store.saveEvents(instanceIndex, NonEmpty(Seq, timestampedError)) _ <- env.saveWatermark(timestampedError.timestamp).valueOrFail("saveWatermark") aliceEvents <- env.readEvents(alice) bobEvents <- env.readEvents(bob) @@ -308,19 +310,19 @@ trait SequencerStoreTest extends AsyncWordSpec with BaseTest { for { aliceId <- env.store.registerMember(alice, ts1) // lets write 20 deliver events - offsetting the second timestamp that is at epoch second 1 - events = NonEmptyList.fromListUnsafe( + events = NonEmptyUtil.fromUnsafe( (0L until 20L) .map(n => { env.deliverEventWithDefaults(ts1.plusSeconds(n), sender = aliceId)() }) - .toList + .toSeq ) - payloads = DomainSequencingTestUtils.payloadsForEvents(events.toList) + payloads = DomainSequencingTestUtils.payloadsForEvents(events) _ <- env.store - .savePayloads(NonEmptyList.fromListUnsafe(payloads), instanceDiscriminator1) + .savePayloads(NonEmptyUtil.fromUnsafe(payloads), instanceDiscriminator1) .valueOrFail(s"Save payloads") _ <- env.store.saveEvents(instanceIndex, events) - _ <- env.saveWatermark(events.last.timestamp).valueOrFail("saveWatermark") + _ <- env.saveWatermark(events.last1.timestamp).valueOrFail("saveWatermark") // read from the beginning (None) firstPage <- env.readEvents(alice, None, 10) // read from the ts of the last event of the prior page (read should be non-inclusive) @@ -347,9 +349,9 @@ trait SequencerStoreTest extends AsyncWordSpec with BaseTest { }) payloads = DomainSequencingTestUtils.payloadsForEvents(events) _ <- env.store - .savePayloads(NonEmptyList.fromListUnsafe(payloads), instanceDiscriminator1) + .savePayloads(NonEmptyUtil.fromUnsafe(payloads), instanceDiscriminator1) .valueOrFail(s"Save payloads") - _ <- env.store.saveEvents(instanceIndex, NonEmptyList.fromListUnsafe(events.toList)) + _ <- env.store.saveEvents(instanceIndex, NonEmptyUtil.fromUnsafe(events)) // put a watermark only a bit into our events _ <- env.saveWatermark(ts2.plusSeconds(5)).valueOrFail("saveWatermark") firstPage <- env.readEvents(alice, None, 10) @@ -381,11 +383,11 @@ trait SequencerStoreTest extends AsyncWordSpec with BaseTest { // we'll first write p1 and p2 that should work // then write p2 and p3 with a separate instance discriminator which should fail due to a conflicting id for { - _ <- valueOrFail(env.store.savePayloads(NonEmptyList.of(p1, p2), instanceDiscriminator1))( + _ <- valueOrFail(env.store.savePayloads(NonEmpty(Seq, p1, p2), instanceDiscriminator1))( "savePayloads1" ) error <- leftOrFail( - env.store.savePayloads(NonEmptyList.of(p2, p3), instanceDiscriminator2) + env.store.savePayloads(NonEmpty(Seq, p2, p3), instanceDiscriminator2) )("savePayloads2") } yield error shouldBe SavePayloadsError.ConflictingPayloadId(p2.id, instanceDiscriminator1) } @@ -564,14 +566,15 @@ trait SequencerStoreTest extends AsyncWordSpec with BaseTest { for { aliceId <- store.registerMember(alice, ts1) - _ <- store.saveEvents(0, NonEmptyList.of(deliverEventWithDefaults(ts2)())) + _ <- store.saveEvents(0, NonEmpty(Seq, deliverEventWithDefaults(ts2)())) bobId <- store.registerMember(bob, ts3) - _ <- env.savePayloads(NonEmptyList.of(payload1)) + _ <- env.savePayloads(NonEmpty(Seq, payload1)) // store a deliver events at ts4, ts5 & ts6 // (hopefully resulting in the earlier two deliver events being pruned) _ <- store.saveEvents( instanceIndex, - NonEmptyList.of( + NonEmpty( + Seq, Sequenced( ts(4), DeliverStoreEvent( diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerServiceTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerServiceTest.scala index 24f6445ba..58a9f8147 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerServiceTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerServiceTest.scala @@ -3,9 +3,10 @@ package com.digitalasset.canton.domain.sequencing.service -import cats.data.{EitherT, NonEmptyList, NonEmptySet} +import cats.data.EitherT import cats.syntax.foldable._ import cats.syntax.option._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.concurrent.Threading import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.crypto.DomainSyncCryptoClient @@ -403,14 +404,15 @@ class GrpcSequencerServiceTest extends FixtureAsyncWordSpec with BaseTest { ClosedEnvelope( ByteString.copyFromUtf8("message to two mediators and the participant"), Recipients( - NonEmptyList.of( + NonEmpty( + Seq, RecipientsTree( - NonEmptySet.of(participant), - List( - RecipientsTree(NonEmptySet.of(mediator1), List.empty), - RecipientsTree(NonEmptySet.of(mediator2), List.empty), + NonEmpty.mk(Set, participant), + Seq( + RecipientsTree.leaf(NonEmpty.mk(Set, mediator1)), + RecipientsTree.leaf(NonEmpty.mk(Set, mediator2)), ), - ) + ), ) ), ) diff --git a/community/participant/src/main/daml/daml.yaml b/community/participant/src/main/daml/daml.yaml index 1dffcf133..f9144fee8 100644 --- a/community/participant/src/main/daml/daml.yaml +++ b/community/participant/src/main/daml/daml.yaml @@ -1,7 +1,7 @@ -sdk-version: 2.1.0-snapshot.20220328.9630.0.66c37bad +sdk-version: 2.1.0-snapshot.20220407.9685.0.7ed507cf name: AdminWorkflows source: AdminWorkflows.daml -version: 2.1.0 +version: 2.2.0 dependencies: - daml-prim - daml-stdlib diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/RepairService.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/RepairService.scala index ed4d06a91..4bcc1cb5d 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/RepairService.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/RepairService.scala @@ -187,7 +187,6 @@ class RepairService( signatories = contractWithMetadata.signatories, stakeholders = contractWithMetadata.stakeholders, key = contractWithMetadata.keyWithMaintainers, - byInterface = None, version = contractWithMetadata.instance.version, ) } @@ -245,7 +244,6 @@ class RepairService( // If the contract keys were needed, we'd have to reinterpret the contract to look up the key. key = None, byKey = false, - byInterface = None, version = contract.rawContractInstance.contractInstance.version, ) ) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala index 781ac2f64..e492bb07b 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala @@ -58,7 +58,7 @@ import com.digitalasset.canton.participant.util.LoggingContextUtil import com.digitalasset.canton.tracing.{NoTracing, TracerProvider} import com.digitalasset.canton.util.EitherTUtil import com.digitalasset.canton.{LedgerParticipantId, checked} -import io.opentelemetry.instrumentation.grpc.v1_5.GrpcTracing +import io.opentelemetry.instrumentation.grpc.v1_6.GrpcTracing import java.time.{Duration => JDuration} import java.util.UUID.randomUUID @@ -338,7 +338,7 @@ object CantonLedgerApiServerWrapper extends NoTracing { config.cantonParameterConfig.loggingConfig.api, ), GrpcTracing - .newBuilder(config.tracerProvider.openTelemetry) + .builder(config.tracerProvider.openTelemetry) .build() .newServerInterceptor(), ), diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala index 568442e14..95e1f55c9 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala @@ -3,10 +3,11 @@ package com.digitalasset.canton.participant.protocol -import cats.data.{Chain, NonEmptyList, NonEmptySet} +import cats.data.{Chain, NonEmptyList} import cats.syntax.alternative._ import cats.syntax.functorFilter._ import cats.{Foldable, Monoid} +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.ViewType.{ TransactionViewType, TransferInViewType, @@ -450,7 +451,7 @@ trait MessageDispatcher { this: NamedLogging => if (furtherRHMs.isEmpty) { val validRecipients = rootHashMessage.recipients.asSingleGroup.contains( - NonEmptySet.of[Member](participantId, mediatorId) + NonEmpty.mk(Set, participantId, mediatorId) ) if (validRecipients) { goodRootHashMessage(rootHashMessage.protocolMessage, mediatorId) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala index 8e66d4783..ddf577756 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala @@ -24,6 +24,8 @@ import com.digitalasset.canton.data.ViewPosition.ListIndex import com.digitalasset.canton.data._ import com.digitalasset.canton.error.TransactionError import com.daml.error.ErrorCode +import com.daml.nonempty.NonEmptyUtil +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.ViewType.TransactionViewType import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.topology.{DomainId, MediatorId, ParticipantId} @@ -483,7 +485,7 @@ class TransactionProcessingSteps( ): Either[DeserializationError, LightTransactionViewTree] = LightTransactionViewTree .fromByteString(pureCrypto)(bytes) - .leftMap(DeserializationError(_, bytes)) + .leftMap(err => DeserializationError(err.message, bytes)) type DecryptionError = EncryptedViewMessageDecryptionError[TransactionViewType] @@ -749,8 +751,8 @@ class TransactionProcessingSteps( ): TransactionValidationResult = { val viewResults = SortedMap.newBuilder[ViewHash, ViewValidationResult] - enrichedTransaction.rootViewsWithUsedAndCreated.rootViewsWithContractKeys.toList - .flatMap(_._1.flatten.toList) + enrichedTransaction.rootViewsWithUsedAndCreated.rootViewsWithContractKeys.forgetNE + .flatMap(_._1.flatten) .foreach { view => val viewParticipantData = view.viewParticipantData val createdCore = viewParticipantData.createdCore.map(_.contract.contractId).toSet @@ -1475,7 +1477,7 @@ class TransactionProcessingSteps( consumedInputsOfHostedStakeholders -- maybeCreatedResult.keySet, maybeCreated = maybeCreatedResult, transient = transientResult, - rootViewsWithContractKeys = NonEmptyList.fromListUnsafe(perRootViewInputKeysB.result()), + rootViewsWithContractKeys = NonEmptyUtil.fromUnsafe(perRootViewInputKeysB.result()), uckFreeKeysOfHostedMaintainers = freeKeys, uckUpdatedKeysOfHostedMaintainers = updatedKeys, //TODO(i5352): Unit test this @@ -1512,9 +1514,9 @@ object TransactionProcessingSteps { ) { def transactionId: TransactionId = - rootViewsWithUsedAndCreated.rootViewsWithContractKeys.head._1.transactionId + rootViewsWithUsedAndCreated.rootViewsWithContractKeys.head1._1.transactionId def ledgerTime: CantonTimestamp = - rootViewsWithUsedAndCreated.rootViewsWithContractKeys.head._1.ledgerTime + rootViewsWithUsedAndCreated.rootViewsWithContractKeys.head1._1.ledgerTime } case class ParallelChecksResult( @@ -1536,17 +1538,16 @@ object TransactionProcessingSteps { /** @throws java.lang.IllegalArgumentException if `receivedViewTrees` contains views with different transaction root hashes */ - def tryCommonData(receivedViewTrees: NonEmptyList[TransactionViewTree]): CommonData = - receivedViewTrees.toList + def tryCommonData(receivedViewTrees: NonEmpty[Seq[TransactionViewTree]]): CommonData = { + val distinctCommonData = receivedViewTrees .map(v => CommonData(v.transactionId, v.ledgerTime, v.submissionTime, v.confirmationPolicy)) - .distinct match { - case List() => throw new IllegalStateException("Found non-empty list with no element.") - case List(commonData) => commonData - case ress => - throw new IllegalArgumentException( - s"Found several different transaction IDs, LETs or confirmation policies: $ress" - ) - } + .distinct + if (distinctCommonData.lengthCompare(1) == 0) distinctCommonData.head1 + else + throw new IllegalArgumentException( + s"Found several different transaction IDs, LETs or confirmation policies: $distinctCommonData" + ) + } case class CommonData( transactionId: TransactionId, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala index 5273e3313..a241c299b 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.participant.protocol.transfer -import cats.data.{EitherT, NonEmptyList, NonEmptySet} +import cats.data.{EitherT, NonEmptyList} import cats.syntax.alternative._ import cats.syntax.either._ import cats.syntax.functor._ @@ -14,6 +14,8 @@ import com.daml.lf.CantonOnly import com.daml.lf.data.ImmArray import com.daml.lf.engine.{Error => LfError} import com.daml.lf.interpretation.{Error => LfInterpretationError} +import com.daml.nonempty.NonEmptyUtil +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.crypto.{DecryptionError => _, EncryptionError => _, _} import com.digitalasset.canton.data.ViewType.TransferInViewType import com.digitalasset.canton.data._ @@ -183,9 +185,9 @@ class TransferInProcessingSteps( mediatorMessage = fullTree.mediatorMessage recipientsSet <- { import cats.syntax.traverse._ - stakeholders.toList + stakeholders.toSeq .traverse(activeParticipantsOfParty) - .map(_.foldLeft[Set[Member]](Set.empty[Member])(_ ++ _)) + .map(_.foldLeft(Set.empty[Member])(_ ++ _)) } recipients <- EitherT.fromEither[Future]( Recipients @@ -208,8 +210,8 @@ class TransferInProcessingSteps( val rootHashRecipients = Recipients.groups( checked( - NonEmptyList.fromListUnsafe( - recipientsSet.toList.map(participant => NonEmptySet.of(mediatorId, participant)) + NonEmptyUtil.fromUnsafe( + recipientsSet.toSeq.map(participant => NonEmpty(Set, mediatorId, participant)) ) ) ) @@ -772,7 +774,6 @@ object TransferInProcessingSteps { contract.metadata.signatories, contract.metadata.stakeholders, key = None, - byInterface = None, contract.contractInstance.version, ) val committedTransaction = LfCommittedTransaction( diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala index ee5b807ed..c0790f9a6 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala @@ -10,6 +10,8 @@ import cats.syntax.foldable._ import cats.syntax.traverse._ import cats.syntax.traverseFilter._ import cats.{Applicative, MonoidK} +import com.daml.nonempty.NonEmptyUtil +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.crypto.{DomainSnapshotSyncCryptoApi, HashOps} import com.digitalasset.canton.data.ViewType.TransferOutViewType import com.digitalasset.canton.data.{CantonTimestamp, FullTransferOutTree, ViewType} @@ -190,8 +192,8 @@ class TransferOutProcessingSteps( val rootHashRecipients = Recipients.groups( checked( - NonEmptyList.fromListUnsafe( - recipients.toList.map(participant => NonEmptySet.of(mediatorId, participant)) + NonEmptyUtil.fromUnsafe( + recipients.toSeq.map(participant => NonEmpty(Set, mediatorId, participant)) ) ) ) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala index 3240f97ff..c6c72ec60 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala @@ -3,9 +3,12 @@ package com.digitalasset.canton.participant.protocol.validation -import cats.data.{EitherT, NonEmptyList} +import cats.data.EitherT import cats.syntax.bifunctor._ +import cats.syntax.traverse._ import com.daml.lf.engine +import com.daml.nonempty.NonEmpty +import com.daml.nonempty.catsinstances._ import com.digitalasset.canton.data.ViewParticipantData.RootAction import com.digitalasset.canton.data.{ CantonTimestamp, @@ -67,9 +70,9 @@ class ModelConformanceChecker( * @return the resulting LfTransaction with [[com.digitalasset.canton.protocol.LfContractId]]s only */ def check( - rootViewsWithInputKeys: NonEmptyList[ + rootViewsWithInputKeys: NonEmpty[Seq[ (TransactionViewTree, Map[LfGlobalKey, Option[LfContractId]]) - ], + ]], requestCounter: RequestCounter, topologySnapshot: TopologySnapshot, commonData: CommonData, @@ -77,7 +80,7 @@ class ModelConformanceChecker( val CommonData(transactionId, ledgerTime, submissionTime, confirmationPolicy) = commonData for { - suffixedTxs <- rootViewsWithInputKeys.traverse { case (v, keys) => + suffixedTxs <- rootViewsWithInputKeys.toNEF.traverse { case (v, keys) => checkView( v, keys, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/UsedAndCreated.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/UsedAndCreated.scala index 3b990c86d..29c2f2cf7 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/UsedAndCreated.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/UsedAndCreated.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.participant.protocol.validation -import cats.data.NonEmptyList +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.LfPartyId import com.digitalasset.canton.data.TransactionViewTree import com.digitalasset.canton.participant.protocol.conflictdetection.{ @@ -24,9 +24,9 @@ case class UsedAndCreated( consumedInputsOfHostedStakeholders: Map[LfContractId, WithContractHash[Set[LfPartyId]]], maybeCreated: Map[LfContractId, Option[SerializableContract]], transient: Map[LfContractId, WithContractHash[Set[LfPartyId]]], - rootViewsWithContractKeys: NonEmptyList[ + rootViewsWithContractKeys: NonEmpty[Seq[ (TransactionViewTree, Map[LfGlobalKey, Option[LfContractId]]) - ], + ]], uckFreeKeysOfHostedMaintainers: Set[LfGlobalKey], uckUpdatedKeysOfHostedMaintainers: Map[LfGlobalKey, ContractKeyJournal.Status], hostedInformeeStakeholders: Set[LfPartyId], diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlPackageStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlPackageStore.scala index a949ce7d0..a851e4c38 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlPackageStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlPackageStore.scala @@ -76,7 +76,7 @@ trait DamlPackageStore extends AutoCloseable { this: NamedLogging => * This checks whether a DAR containing `packages` can be safely removed -- if there's any package that would be * left without a DAR then we won't remove the DAR. */ - def anyPackagePreventsDarRemoval(packages: List[PackageId], removeDar: DarDescriptor)(implicit + def anyPackagePreventsDarRemoval(packages: Seq[PackageId], removeDar: DarDescriptor)(implicit tc: TraceContext ): OptionT[Future, PackageId] } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala index 46736e593..7568754e6 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala @@ -3,12 +3,12 @@ package com.digitalasset.canton.participant.store.db -import cats.data.{Chain, EitherT, NonEmptyList} +import cats.data.{Chain, EitherT} import cats.syntax.foldable._ -import cats.syntax.list._ import cats.syntax.traverse._ import cats.syntax.traverseFilter._ import com.daml.lf.data.Ref.PackageId +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{PositiveNumeric, String100} import com.digitalasset.canton.data.CantonTimestamp @@ -212,7 +212,7 @@ class DbActiveContractStore( } .map(_.toMap) case _: DbStorage.Profile.Postgres => - contractIds.toList.toNel match { + NonEmpty.from(contractIds.toSeq) match { case None => Future.successful(Map.empty) case Some(contractIdsNel) => import DbStorage.Implicits.BuilderChain._ @@ -666,7 +666,7 @@ class DbActiveContractStore( } def checkIdempotence( - idsToCheck: NonEmptyList[LfContractId] + idsToCheck: NonEmpty[Seq[LfContractId]] ): CheckedT[Future, AcsError, AcsWarning, Unit] = { import DbStorage.Implicits.BuilderChain._ val contractIdsNotInsertedInClauses = @@ -731,7 +731,7 @@ class DbActiveContractStore( // Check all contracts whether they have been inserted or are already there // We don't analyze the update counts // so that we can use the fast IGNORE_ROW_ON_DUPKEY_INDEX directive in Oracle - contractIds.toList.toNel.map(checkIdempotence).getOrElse(CheckedT.pure(())) + NonEmpty.from(contractIds).map(checkIdempotence).getOrElse(CheckedT.pure(())) } else CheckedT.pure(()) } } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractKeyJournal.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractKeyJournal.scala index 233ee75da..7f34effab 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractKeyJournal.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractKeyJournal.scala @@ -7,8 +7,8 @@ import cats.Monad import cats.data.EitherT import cats.syntax.either._ import cats.syntax.functorFilter._ -import cats.syntax.list._ import cats.syntax.traverse._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.metrics.MetricHandle.GaugeM @@ -57,7 +57,7 @@ class DbContractKeyJournal( else { processingTime.metric.event { import DbStorage.Implicits.BuilderChain._ - keys.toList.toNel match { + NonEmpty.from(keys.toSeq) match { case None => Future.successful(Map.empty) case Some(keysNel) => val inClauses = DbStorage.toInClauses_( diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala index fa565f88a..555126b70 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala @@ -3,10 +3,11 @@ package com.digitalasset.canton.participant.store.db -import cats.data.{EitherT, NonEmptyList, OptionT} +import cats.data.{EitherT, OptionT} import cats.syntax.foldable._ import cats.syntax.list._ import cats.syntax.traverse._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.{LfPartyId, checked} import com.digitalasset.canton.config.RequireTypes.{PositiveNumeric, String2066} import com.digitalasset.canton.config.{BatchAggregatorConfig, CacheConfig, ProcessingTimeout} @@ -77,7 +78,7 @@ class DbContractStore( override val kind: String = "request" override def logger: TracedLogger = DbContractStore.this.logger - override def executeBatch(ids: NonEmptyList[Traced[LfContractId]])(implicit + override def executeBatch(ids: NonEmpty[Seq[Traced[LfContractId]]])(implicit traceContext: TraceContext ): Future[Iterable[Option[StoredContract]]] = { processingTime.metric.event { @@ -98,7 +99,7 @@ class DbContractStore( } private def lookupQueries( - ids: NonEmptyList[LfContractId] + ids: NonEmpty[Seq[LfContractId]] ): Iterable[DbAction.ReadOnly[Seq[Option[StoredContract]]]] = { import DbStorage.Implicits.BuilderChain._ @@ -300,7 +301,8 @@ class DbContractStore( )(implicit traceContext: TraceContext): Future[Unit] = { import DbStorage.Implicits.BuilderChain._ - contractIds.toList.toNel + NonEmpty + .from(contractIds.toSeq) .map { contractIds => val inClauses = DbStorage.toInClauses_("contract_id", contractIds, maxContractIdSqlInListSize) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStore.scala index 98d9c552d..3ce61bd6c 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStore.scala @@ -3,9 +3,10 @@ package com.digitalasset.canton.participant.store.db -import cats.data.{NonEmptyList, OptionT} +import cats.data.OptionT import com.daml.daml_lf_dev.DamlLf import com.daml.lf.data.Ref.PackageId +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.LfPackageId import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.LengthLimitedString.DarName @@ -191,7 +192,7 @@ class DbDamlPackageStore( ) } - override def anyPackagePreventsDarRemoval(packages: List[PackageId], removeDar: DarDescriptor)( + override def anyPackagePreventsDarRemoval(packages: Seq[PackageId], removeDar: DarDescriptor)( implicit tc: TraceContext ): OptionT[Future, PackageId] = { @@ -201,7 +202,7 @@ class DbDamlPackageStore( val darHex = removeDar.hash.toLengthLimitedHexString def packagesWithoutDar( - nonEmptyPackages: NonEmptyList[PackageId] + nonEmptyPackages: NonEmpty[Seq[PackageId]] ) = { val queryActions = DbStorage .toInClauses( @@ -235,8 +236,8 @@ class DbDamlPackageStore( OptionT(resultF) } - NonEmptyList - .fromList(packages) + NonEmpty + .from(packages) .fold(OptionT.none[Future, PackageId])(packagesWithoutDar) } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala index 312d98d81..658a2ce7c 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala @@ -3,9 +3,10 @@ package com.digitalasset.canton.participant.store.db -import cats.data.{EitherT, NonEmptyList, OptionT} +import cats.data.{EitherT, OptionT} import cats.syntax.alternative._ import cats.syntax.option._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.{BatchAggregatorConfig, ProcessingTimeout} import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.data.CantonTimestamp @@ -285,7 +286,7 @@ object DbInFlightSubmissionStore { override def kind: String = "in-flight submission" override def executeBatch( - submissions: NonEmptyList[Traced[InFlightSubmission[UnsequencedSubmission]]] + submissions: NonEmpty[Seq[Traced[InFlightSubmission[UnsequencedSubmission]]]] )(implicit traceContext: TraceContext): Future[Iterable[Try[Result]]] = { type SubmissionAndCell = @@ -354,7 +355,7 @@ object DbInFlightSubmissionStore { } override protected def bulkUpdateAction( - submissions: NonEmptyList[Traced[InFlightSubmission[UnsequencedSubmission]]] + submissions: NonEmpty[Seq[Traced[InFlightSubmission[UnsequencedSubmission]]]] )(implicit batchTraceContext: TraceContext ): DBIOAction[Array[Int], NoStream, Effect.All] = { @@ -399,7 +400,7 @@ object DbInFlightSubmissionStore { ErrorLoggingContext.fromTracedLogger(logger) DbStorage.bulkOperation( insertQuery, - submissions.map(_.value).toList, + submissions.map(_.value), storage.profile, useTransactionForOracle = true, ) { pp => submission => @@ -428,7 +429,7 @@ object DbInFlightSubmissionStore { submission.changeIdHash /** A list of queries for the items that we want to check for */ - override protected def checkQuery(submissionsToCheck: NonEmptyList[ChangeIdHash])(implicit + override protected def checkQuery(submissionsToCheck: NonEmpty[Seq[ChangeIdHash]])(implicit batchTraceContext: TraceContext ): Iterable[ReadOnly[Iterable[CheckData]]] = { DbStorage.toInClauses_("change_id_hash", submissionsToCheck, maxItemsInSqlInClause).map { diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbMultiDomainEventLog.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbMultiDomainEventLog.scala index 11c3f0be8..7bd412c83 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbMultiDomainEventLog.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbMultiDomainEventLog.scala @@ -6,13 +6,14 @@ package com.digitalasset.canton.participant.store.db import akka.NotUsed import akka.stream._ import akka.stream.scaladsl.{Keep, Sink, Source} -import cats.data.{NonEmptyList, OptionT} +import cats.data.OptionT import cats.syntax.foldable._ import cats.syntax.functorFilter._ import cats.syntax.option._ import cats.syntax.traverseFilter._ import com.daml.platform.akkastreams.dispatcher.Dispatcher import com.daml.platform.akkastreams.dispatcher.SubSource.RangeSource +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.data.CantonTimestamp @@ -470,44 +471,41 @@ class DbMultiDomainEventLog private[db] ( eventIds: Seq[TimestampedEvent.EventId] )(implicit traceContext: TraceContext): Future[ Map[TimestampedEvent.EventId, (GlobalOffset, TimestampedEventAndCausalChange, CantonTimestamp)] - ] = { - NonEmptyList.fromList(eventIds.toList) match { - case None => Future.successful(Map.empty) - case Some(nonEmptyEventIds) => - val inClauses = - DbStorage.toInClauses_( - "el.event_id", - nonEmptyEventIds, - PositiveNumeric.tryCreate(maxBatchSize), - ) - val queries = inClauses.map { inClause => - import DbStorage.Implicits.BuilderChain._ - (sql""" + ] = eventIds match { + case NonEmpty(nonEmptyEventIds) => + val inClauses = DbStorage.toInClauses_( + "el.event_id", + nonEmptyEventIds, + PositiveNumeric.tryCreate(maxBatchSize), + ) + val queries = inClauses.map { inClause => + import DbStorage.Implicits.BuilderChain._ + (sql""" select global_offset, el.local_offset, request_sequencer_counter, el.event_id, content, trace_context, causality_update, publication_time from linearized_event_log lel join event_log el on lel.log_id = el.log_id and lel.local_offset = el.local_offset where """ ++ inClause).as[(GlobalOffset, TimestampedEventAndCausalChange, CantonTimestamp)] - } - storage.sequentialQueryAndCombine(queries, functionFullName).map { events => - events.map { - case data @ ( - globalOffset, - TimestampedEventAndCausalChange(event, _causalChange), - _publicationTime, - ) => - val eventId = event.eventId.getOrElse( - ErrorUtil.internalError( - new DbDeserializationException( - s"Event $event at global offset $globalOffset does not have an event ID." - ) + } + storage.sequentialQueryAndCombine(queries, functionFullName).map { events => + events.map { + case data @ ( + globalOffset, + TimestampedEventAndCausalChange(event, _causalChange), + _publicationTime, + ) => + val eventId = event.eventId.getOrElse( + ErrorUtil.internalError( + new DbDeserializationException( + s"Event $event at global offset $globalOffset does not have an event ID." ) ) - eventId -> data - }.toMap - } - } + ) + eventId -> data + }.toMap + } + case _ => Future.successful(Map.empty) } override def lookupTransactionDomain(transactionId: LedgerTransactionId)(implicit diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStore.scala index 40baada0c..f7a7f68ff 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStore.scala @@ -3,8 +3,9 @@ package com.digitalasset.canton.participant.store.db -import cats.data.{EitherT, NonEmptyList, OptionT} +import cats.data.{EitherT, OptionT} import cats.syntax.option._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.{BatchAggregatorConfig, ProcessingTimeout} import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.data.CantonTimestamp @@ -96,11 +97,11 @@ class DbRequestJournalStore( override def kind: String = "request" override def logger: TracedLogger = DbRequestJournalStore.this.logger - override def executeBatch(items: NonEmptyList[Traced[RequestData]])(implicit + override def executeBatch(items: NonEmpty[Seq[Traced[RequestData]]])(implicit traceContext: TraceContext ): Future[Iterable[Try[Unit]]] = bulkUpdateWithCheck(items, "DbRequestJournalStore.insert") - override protected def bulkUpdateAction(items: NonEmptyList[Traced[RequestData]])(implicit + override protected def bulkUpdateAction(items: NonEmpty[Seq[Traced[RequestData]]])(implicit batchTraceContext: TraceContext ): DBIOAction[Array[Int], NoStream, Effect.All] = { def setData(pp: PositionedParameters)(item: RequestData): Unit = { @@ -146,7 +147,7 @@ class DbRequestJournalStore( override protected def itemIdentifier(item: RequestData): ItemIdentifier = item.rc override protected def dataIdentifier(state: CheckData): ItemIdentifier = state.rc - override protected def checkQuery(itemsToCheck: NonEmptyList[ItemIdentifier])(implicit + override protected def checkQuery(itemsToCheck: NonEmpty[Seq[ItemIdentifier]])(implicit batchTraceContext: TraceContext ): Iterable[ReadOnly[Iterable[CheckData]]] = bulkQueryDbio(itemsToCheck) @@ -187,7 +188,7 @@ class DbRequestJournalStore( } private def bulkQueryDbio( - rcs: NonEmptyList[RequestCounter] + rcs: NonEmpty[Seq[RequestCounter]] ): Iterable[DbAction.ReadOnly[Iterable[RequestData]]] = DbStorage.toInClauses_("request_counter", rcs, maxItemsInSqlInClause).map { inClause => import DbStorage.Implicits.BuilderChain._ @@ -242,11 +243,11 @@ class DbRequestJournalStore( override def kind: String = "request" override def logger: TracedLogger = DbRequestJournalStore.this.logger - override def executeBatch(items: NonEmptyList[Traced[DbRequestJournalStore.ReplaceRequest]])( + override def executeBatch(items: NonEmpty[Seq[Traced[DbRequestJournalStore.ReplaceRequest]]])( implicit traceContext: TraceContext ): Future[Iterable[Try[Result]]] = bulkUpdateWithCheck(items, "DbRequestJournalStore.replace") - override protected def bulkUpdateAction(items: NonEmptyList[Traced[ReplaceRequest]])(implicit + override protected def bulkUpdateAction(items: NonEmpty[Seq[Traced[ReplaceRequest]]])(implicit batchTraceContext: TraceContext ): DBIOAction[Array[Int], NoStream, Effect.All] = { val updateQuery = @@ -274,7 +275,7 @@ class DbRequestJournalStore( override protected def itemIdentifier(item: ReplaceRequest): RequestCounter = item.rc override protected def dataIdentifier(state: RequestData): RequestCounter = state.rc - override protected def checkQuery(itemsToCheck: NonEmptyList[RequestCounter])(implicit + override protected def checkQuery(itemsToCheck: NonEmpty[Seq[RequestCounter]])(implicit batchTraceContext: TraceContext ): Iterable[ReadOnly[Iterable[RequestData]]] = bulkQueryDbio(itemsToCheck) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryDamlPackageStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryDamlPackageStore.scala index 372bebd4f..9be120672 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryDamlPackageStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryDamlPackageStore.scala @@ -128,7 +128,7 @@ class InMemoryDamlPackageStore(override protected val loggerFactory: NamedLogger .to(Seq) ) - override def anyPackagePreventsDarRemoval(packages: List[PackageId], removeDar: DarDescriptor)( + override def anyPackagePreventsDarRemoval(packages: Seq[PackageId], removeDar: DarDescriptor)( implicit tc: TraceContext ): OptionT[Future, PackageId] = { val known = packages.toSet.intersect(Monoid.combineAll(darPackages.toMap.values)) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala index bea37b9e2..bcf29e3c0 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala @@ -193,7 +193,6 @@ class DAMLe( signatories, stakeholders, key, - _byInterface, version, ) => ContractWithMetadata( diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala index 69392f682..66cb7df6a 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala @@ -3,9 +3,9 @@ package com.digitalasset.canton.participant.protocol -import cats.data.{NonEmptyList, NonEmptySet} import cats.syntax.flatMap._ import cats.syntax.option._ +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto import com.digitalasset.canton.time.TimeProof import com.digitalasset.canton.crypto.{Encrypted, HashPurpose, HashPurposeTest, TestHash} @@ -834,9 +834,10 @@ trait MessageDispatcherTest { this: AsyncWordSpecLike with BaseTest => Batch.of[ProtocolMessage]( view -> Recipients.cc(participantId), rootHashMessage -> Recipients.groups( - NonEmptyList.of( - NonEmptySet.of(participantId, mediatorId), - NonEmptySet.of(participantId, mediatorId2), + NonEmpty.mk( + Seq, + NonEmpty.mk(Set, participantId, mediatorId), + NonEmpty.mk(Set, participantId, mediatorId2), ) ), ), diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceCheckerTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceCheckerTest.scala index a6b6ce77c..30f80cd00 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceCheckerTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceCheckerTest.scala @@ -4,11 +4,12 @@ package com.digitalasset.canton.participant.protocol.validation import java.time.Duration - -import cats.data.{EitherT, NonEmptyList} +import cats.data.EitherT import cats.implicits._ import com.daml.lf.data.ImmArray import com.daml.lf.engine +import com.daml.nonempty.NonEmptyUtil +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.{CantonTimestamp, TransactionViewTree} import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.participant.protocol.TransactionProcessingSteps @@ -60,8 +61,8 @@ class ModelConformanceCheckerTest extends AsyncWordSpec with BaseTest { def viewsWithNoInputKeys( rootViews: Seq[TransactionViewTree] - ): NonEmptyList[(TransactionViewTree, Map[LfGlobalKey, Option[LfContractId]])] = - NonEmptyList.fromListUnsafe( + ): NonEmpty[Seq[(TransactionViewTree, Map[LfGlobalKey, Option[LfContractId]])]] = + NonEmptyUtil.fromUnsafe( rootViews.map(_ -> Map.empty[LfGlobalKey, Option[LfContractId]]).toList ) @@ -77,7 +78,7 @@ class ModelConformanceCheckerTest extends AsyncWordSpec with BaseTest { def check( mcc: ModelConformanceChecker, - views: NonEmptyList[(TransactionViewTree, Map[LfGlobalKey, Option[LfContractId]])], + views: NonEmpty[Seq[(TransactionViewTree, Map[LfGlobalKey, Option[LfContractId]])]], ips: TopologySnapshot = factory.topologySnapshot, ): EitherT[Future, Error, Result] = { val rootViewTrees = views.map(_._1) diff --git a/project/BuildCommon.scala b/project/BuildCommon.scala index 9054ade1a..f665d1310 100644 --- a/project/BuildCommon.scala +++ b/project/BuildCommon.scala @@ -323,8 +323,8 @@ object BuildCommon { "Log4j2Plugins.dat", ) => MergeStrategy.first - case "module-info.class" => MergeStrategy.discard case "META-INF/versions/9/module-info.class" => MergeStrategy.discard + case path if path.contains("module-info.class") => MergeStrategy.discard case PathList("org", "jline", _ @_*) => MergeStrategy.first case x => oldStrategy(x) } @@ -346,9 +346,17 @@ object BuildCommon { // applies to all Canton-based sub-projects (descendants of community-common) lazy val sharedCantonSettings = sharedSettings ++ cantonWarts ++ Seq( - // // Enable logging of begin and end of test cases, test suites, and test runs. - Test / testOptions += Tests.Argument("-C", "com.digitalasset.canton.LogReporter") + Test / testOptions += Tests.Argument("-C", "com.digitalasset.canton.LogReporter"), + // Ignore daml codegen generated files from code coverage + coverageExcludedFiles := formatCoverageExcludes( + """ + | + |.*sbt-buildinfo.BuildInfo + |.*daml-codegen.* + """ + ), + scalacOptions += "-Wconf:src=src_managed/.*:silent", ) // applies to all app sub-projects @@ -459,6 +467,7 @@ object BuildCommon { daml_participant_state, //needed for ReadService/Update classes by PrettyInstances daml_ledger_api_common, daml_ledger_api_client, + daml_nonempty_cats, logback_classic, logback_core, scala_logging, @@ -518,6 +527,7 @@ object BuildCommon { Compile / PB.targets := Seq( scalapb.gen(flatPackage = true) -> (Compile / sourceManaged).value / "protobuf" ), + Compile / PB.protoSources ++= (Test / PB.protoSources).value, buildInfoKeys := Seq[BuildInfoKey]( version, scalaVersion, @@ -525,7 +535,7 @@ object BuildCommon { BuildInfoKey("damlLibrariesVersion" -> Dependencies.daml_libraries_version), BuildInfoKey("vmbc" -> Dependencies.daml_libraries_version), // For now, the release version is the same as the protocol version - BuildInfoKey("protocolVersion" -> version.value), + BuildInfoKey("protocolVersion" -> "2.0.0"), ), buildInfoPackage := "com.digitalasset.canton.buildinfo", buildInfoObject := "BuildInfo", @@ -539,13 +549,7 @@ object BuildCommon { |com\.digitalasset\.canton\.identity\.admin\.v0\..* |com\.digitalasset\.canton\.domain\.api\.v0\..* |com\.digitalasset\.canton\.v0\..* - """ - ), - coverageExcludedFiles := formatCoverageExcludes( - """ - | - |.*sbt-buildinfo.BuildInfo - |.*daml-codegen.* + |com\.digitalasset\.canton\.protobuf\..* """ ), Compile / damlCodeGeneration := Seq( @@ -555,7 +559,6 @@ object BuildCommon { "com.digitalasset.canton.examples", ) ), - scalacOptions += "-Wconf:src=src_managed/.*:silent", addProtobufFilesToHeaderCheck(Compile), addFilesToHeaderCheck("*.daml", "daml", Compile), JvmRulesPlugin.damlRepoHeaderSettings, @@ -622,13 +625,6 @@ object BuildCommon { | |com\.digitalasset\.canton\.participant\.admin\.v0\..* |com\.digitalasset\.canton\.participant\.protocol\.v0\..* - """ - ), - // Ignore daml codegen generated files from code coverage - coverageExcludedFiles := formatCoverageExcludes( - """ - | - |.*daml-codegen.* """ ), Compile / damlCodeGeneration := Seq( @@ -639,7 +635,6 @@ object BuildCommon { ) ), damlFixedDars := Seq("AdminWorkflows.dar"), - scalacOptions += "-Wconf:src=src_managed/.*:silent", addProtobufFilesToHeaderCheck(Compile), addFilesToHeaderCheck("*.daml", "daml", Compile), JvmRulesPlugin.damlRepoHeaderSettings, @@ -767,14 +762,6 @@ object BuildCommon { "doctor", "ai-analysis", ), - // Ignore daml codegen generated files from code coverage - coverageExcludedFiles := formatCoverageExcludes( - """ - | - |.*daml-codegen.* - """ - ), - scalacOptions += "-Wconf:src=src_managed/.*:silent", addProtobufFilesToHeaderCheck(Compile), addFilesToHeaderCheck("*.sh", "../pack", Compile), addFilesToHeaderCheck("*.daml", "daml", Compile), diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 803879cb9..f98089dec 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -73,6 +73,7 @@ object Dependencies { lazy val daml_lf_transaction_test_lib = "com.daml" %% "daml-lf-transaction-test-lib" % daml_libraries_version + lazy val daml_nonempty_cats = "com.daml" %% "nonempty-cats" % daml_libraries_version lazy val daml_ledger_api_common = "com.daml" %% "ledger-api-common" % daml_libraries_version lazy val daml_ledger_api_client = "com.daml" %% "ledger-api-client" % daml_libraries_version lazy val daml_participant_integration_api = @@ -172,7 +173,7 @@ object Dependencies { lazy val prometheus_httpserver = "io.prometheus" % "simpleclient_httpserver" % "0.12.0" lazy val prometheus_hotspot = "io.prometheus" % "simpleclient_hotspot" % "0.12.0" - lazy val opentelemetry_version = "1.1.0" + lazy val opentelemetry_version = "1.12.0" lazy val opentelemetry_api = "io.opentelemetry" % "opentelemetry-api" % opentelemetry_version lazy val opentelemetry_sdk = "io.opentelemetry" % "opentelemetry-sdk" % opentelemetry_version lazy val opentelemetry_sdk_autoconfigure = @@ -182,7 +183,7 @@ object Dependencies { lazy val opentelemetry_jaeger = "io.opentelemetry" % "opentelemetry-exporter-jaeger" % opentelemetry_version lazy val opentelemetry_instrumentation_grpc = - "io.opentelemetry.instrumentation" % "opentelemetry-grpc-1.5" % s"$opentelemetry_version-alpha" + "io.opentelemetry.instrumentation" % "opentelemetry-grpc-1.6" % s"$opentelemetry_version-alpha" lazy val better_files = "com.github.pathikrit" %% "better-files" % "3.8.0" @@ -199,10 +200,6 @@ object Dependencies { lazy val sttp_circe = "com.softwaremill.sttp.client3" %% "circe" % sttp_version lazy val sttp_slf4j = "com.softwaremill.sttp.client3" %% "slf4j-backend" % sttp_version - /*lazy val scalafx = "org.scalafx" %% "scalafx" % "16.0.0-R25" - lazy val javafx_all = Seq("controls", "base", "fxml", "media", "web", "graphics").map { x => - "org.openjfx" % s"javafx-$x" % "16" - }*/ // demo dependencies (you also need to update demo.sc) lazy val scalafx = "org.scalafx" %% "scalafx" % "17.0.1-R26" // TODO(i8460) Don't upgrade until https://github.com/sbt/sbt/issues/6564 is fixed @@ -212,7 +209,7 @@ object Dependencies { lazy val toxiproxy_java = "eu.rekawek.toxiproxy" % "toxiproxy-java" % "2.1.4" - lazy val fabric_sdk = "org.hyperledger.fabric-sdk-java" % "fabric-sdk-java" % "2.2.4" + lazy val fabric_sdk = "org.hyperledger.fabric-sdk-java" % "fabric-sdk-java" % "2.2.13" lazy val web3j = "org.web3j" % "core" % "4.8.9" @@ -229,4 +226,6 @@ object Dependencies { lazy val wartremover_dep = "org.wartremover" % "wartremover" % wartremover.Wart.PluginVersion cross CrossVersion.full + + lazy val scala_csv = "com.github.tototoshi" %% "scala-csv" % "1.3.10" } diff --git a/project/project/DamlVersions.scala b/project/project/DamlVersions.scala index 37621bb47..cce69247a 100644 --- a/project/project/DamlVersions.scala +++ b/project/project/DamlVersions.scala @@ -7,7 +7,7 @@ object DamlVersions { /** The version of the daml compiler (and in most cases of the daml libraries as well). */ - val version: String = "2.1.0-snapshot.20220328.9630.0.66c37bad" + val version: String = "2.1.0-snapshot.20220407.9685.0.7ed507cf" /** Custom Daml artifacts override version. */ diff --git a/version.sbt b/version.sbt index 75bf8c98c..551dbd9ad 100644 --- a/version.sbt +++ b/version.sbt @@ -1 +1 @@ -ThisBuild / version := "2.1.0-SNAPSHOT" +ThisBuild / version := "2.2.0-SNAPSHOT"