diff --git a/sdk/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/participant_repair_service.proto b/sdk/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/participant_repair_service.proto index e19a3efbddd7..35f4472cf187 100644 --- a/sdk/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/participant_repair_service.proto +++ b/sdk/canton/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/participant_repair_service.proto @@ -111,13 +111,6 @@ message MigrateSynchronizerRequest { message MigrateSynchronizerResponse {} message ExportAcsOldRequest { - message TargetSynchronizer { - // The ID of the synchronizer where the contract is supposed to be assigned when the export is being imported - string synchronizer_id = 1; - // The protocol version associated to the synchronizer where the contract is supposed to be assigned when the contracts snapshot is being imported - int32 protocol_version = 2; - } - // The parties for which the ACS should be exported // Required repeated string parties = 1; @@ -136,7 +129,7 @@ message ExportAcsOldRequest { // ID in the key will be assigned to the synchronizer id and protocol version in the value. This is not a proper synchronizer // migration of contracts and it's supposed to be used only in exceptional cases. // Optional, if not provided the contracts will be exported with the same synchronizer id as they are currently assigned - map contract_synchronizer_renames = 4; + reserved 4; // was map contract_synchronizer_renames // If true, do not check whether the provided timestamp is clean (see `timestamp` field). // NOT FOR PRODUCTION USE. diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala index 3dcafafdab04..36fd660d0327 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala @@ -70,6 +70,7 @@ import com.daml.ledger.api.v2.command_submission_service.{ } import com.daml.ledger.api.v2.commands.{Command, Commands, DisclosedContract, PrefetchContractKey} import com.daml.ledger.api.v2.completion.Completion +import com.daml.ledger.api.v2.crypto as lapicrypto import com.daml.ledger.api.v2.event.CreatedEvent import com.daml.ledger.api.v2.event_query_service.EventQueryServiceGrpc.EventQueryServiceStub import com.daml.ledger.api.v2.event_query_service.{ @@ -77,7 +78,6 @@ import com.daml.ledger.api.v2.event_query_service.{ GetEventsByContractIdRequest, GetEventsByContractIdResponse, } -import com.daml.ledger.api.v2.interactive.interactive_submission_service as iss import com.daml.ledger.api.v2.interactive.interactive_submission_service.InteractiveSubmissionServiceGrpc.InteractiveSubmissionServiceStub import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ ExecuteSubmissionAndWaitForTransactionRequest, @@ -308,11 +308,11 @@ object LedgerApiCommands { onboardingTransactions = transactions.map { case (transaction, signatures) => AllocateExternalPartyRequest.SignedTransaction( transaction.getCryptographicEvidence, - signatures.map(_.toProtoV30.transformInto[iss.Signature]), + signatures.map(_.toProtoV30.transformInto[lapicrypto.Signature]), ) }, multiHashSignatures = - multiHashSignatures.map(_.toProtoV30.transformInto[iss.Signature]), + multiHashSignatures.map(_.toProtoV30.transformInto[lapicrypto.Signature]), identityProviderId = "", ) ) @@ -1581,6 +1581,7 @@ object LedgerApiCommands { packageIdSelectionPreference: Seq[LfPackageId], verboseHashing: Boolean, prefetchContractKeys: Seq[PrefetchContractKey], + maxRecordTime: Option[CantonTimestamp], ) extends BaseCommand[ PrepareSubmissionRequest, PrepareSubmissionResponse, @@ -1604,6 +1605,7 @@ object LedgerApiCommands { packageIdSelectionPreference = packageIdSelectionPreference, verboseHashing = verboseHashing, prefetchContractKeys = prefetchContractKeys, + maxRecordTime = maxRecordTime.map(_.toProtoTimestamp), ) ) @@ -1638,13 +1640,12 @@ object LedgerApiCommands { import com.digitalasset.canton.crypto.LedgerApiCryptoConversions.* import io.scalaland.chimney.dsl.* - import com.daml.ledger.api.v2.interactive.interactive_submission_service as iss private def makePartySignatures: PartySignatures = PartySignatures( transactionSignatures.map { case (party, signatures) => SinglePartySignatures( party = party.toProtoPrimitive, - signatures = signatures.map(_.toProtoV30.transformInto[iss.Signature]), + signatures = signatures.map(_.toProtoV30.transformInto[lapicrypto.Signature]), ) }.toSeq ) diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala index 7bbb9e4c1dd1..dfcd2725d854 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala @@ -71,7 +71,6 @@ import com.digitalasset.canton.topology.{ } import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{BinaryFileUtil, GrpcStreamingUtils, OptionUtil, PathUtils} -import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{ReassignmentCounter, SequencerCounter, SynchronizerAlias, config} import com.google.protobuf.ByteString import com.google.protobuf.timestamp.Timestamp @@ -727,7 +726,6 @@ object ParticipantAdminCommands { filterSynchronizerId: Option[SynchronizerId], timestamp: Option[Instant], observer: StreamObserver[v30.ExportAcsOldResponse], - contractSynchronizerRenames: Map[SynchronizerId, (SynchronizerId, ProtocolVersion)], force: Boolean, ) extends GrpcAdminCommand[ v30.ExportAcsOldRequest, @@ -746,15 +744,6 @@ object ParticipantAdminCommands { parties.map(_.toLf).toSeq, filterSynchronizerId.map(_.toProtoPrimitive).getOrElse(""), timestamp.map(Timestamp.apply), - contractSynchronizerRenames.map { - case (source, (targetSynchronizerId, targetProtocolVersion)) => - val targetSynchronizer = v30.ExportAcsOldRequest.TargetSynchronizer( - synchronizerId = targetSynchronizerId.toProtoPrimitive, - protocolVersion = targetProtocolVersion.toProtoPrimitive, - ) - - (source.toProtoPrimitive, targetSynchronizer) - }, force = force, partiesOffboarding = partiesOffboarding, ) diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala index 9663ad80bdbe..14337b24097c 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala @@ -484,6 +484,8 @@ final case class CantonConfig( automaticallyPerformLogicalSynchronizerUpgrade = participantParameters.automaticallyPerformLogicalSynchronizerUpgrade, reassignmentsConfig = participantParameters.reassignmentsConfig, + doNotAwaitOnCheckingIncomingCommitments = + participantParameters.doNotAwaitOnCheckingIncomingCommitments, ) } diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala index 89a47a85e602..8905ca448e2a 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala @@ -673,6 +673,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper userPackageSelectionPreference: Seq[LfPackageId] = Seq.empty, verboseHashing: Boolean = false, prefetchContractKeys: Seq[PrefetchContractKey] = Seq.empty, + maxRecordTime: Option[CantonTimestamp] = None, ): PrepareResponseProto = consoleEnvironment.run { ledgerApiCommand( @@ -688,6 +689,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper userPackageSelectionPreference, verboseHashing, prefetchContractKeys, + maxRecordTime, ) ) } @@ -2465,6 +2467,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper userPackageSelectionPreference: Seq[LfPackageId] = Seq.empty, verboseHashing: Boolean = false, prefetchContractKeys: Seq[javab.data.PrefetchContractKey] = Seq.empty, + maxRecordTime: Option[CantonTimestamp] = None, ): PrepareResponseProto = consoleEnvironment.run { ledgerApiCommand( @@ -2480,6 +2483,7 @@ trait BaseLedgerApiAdministration extends NoTracing with StreamingCommandHelper userPackageSelectionPreference, verboseHashing, prefetchContractKeys.map(k => PrefetchContractKey.fromJavaProto(k.toProto)), + maxRecordTime, ) ) } diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala index 7d13c85c2409..94de88066596 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala @@ -192,8 +192,6 @@ class ParticipantRepairAdministration( outputFile: String = ParticipantRepairAdministration.ExportAcsDefaultFile, filterSynchronizerId: Option[SynchronizerId] = None, timestamp: Option[Instant] = None, - contractSynchronizerRenames: Map[SynchronizerId, (SynchronizerId, ProtocolVersion)] = - Map.empty, force: Boolean = false, timeout: NonNegativeDuration = timeouts.unbounded, ): Unit = @@ -211,7 +209,6 @@ class ParticipantRepairAdministration( filterSynchronizerId, timestamp, responseObserver, - contractSynchronizerRenames, force = force, ) ) diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala index e6c6800626d5..4b5479f03529 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala @@ -23,7 +23,6 @@ import com.digitalasset.canton.admin.api.client.data.{AddPartyStatus, ListPartie import com.digitalasset.canton.admin.participant.v30.ExportPartyAcsResponse import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} import com.digitalasset.canton.config.{ConsoleCommandTimeout, NonNegativeDuration} -import com.digitalasset.canton.console.ConsoleMacros.utils import com.digitalasset.canton.console.commands.TopologyTxFiltering.{AddedFilter, RevokedFilter} import com.digitalasset.canton.console.{ AdminCommandRunner, @@ -220,7 +219,6 @@ class ParticipantPartiesAdministrationGroup( synchronizeParticipants: Seq[ParticipantReference] = consoleEnvironment.participants.all, synchronize: Option[config.NonNegativeDuration] = Some(timeouts.unbounded), // External party specifics - confirmationThreshold: PositiveInt = PositiveInt.one, keysCount: PositiveInt = PositiveInt.one, keysThreshold: PositiveInt = PositiveInt.one, ): ExternalParty = { @@ -233,34 +231,15 @@ class ParticipantPartiesAdministrationGroup( onboardingData <- onboarding_transactions( name, synchronizer, - confirmationThreshold = confirmationThreshold, keysCount = keysCount, keysThreshold = keysThreshold, ) (onboardingTxs, externalParty) = onboardingData - _ = reference.topology.transactions.load( - onboardingTxs.toSeq, - psid, - synchronize = synchronize, - ) - - // Wait until the proposal is known - _ = utils.retry_until_true( - reference.topology.party_to_participant_mappings - .list( - psid, - proposals = true, - filterParticipant = reference.id.filterString, - filterParty = externalParty.filterString, - ) - .nonEmpty - ) - - _ = reference.topology.transactions.authorize[PartyToParticipant]( - txHash = onboardingTxs.partyToParticipant.hash, - mustBeFullyAuthorized = true, - store = psid, + _ = reference.ledger_api.parties.allocate_external( + psid.logical, + onboardingTxs.transactionsWithSingleSignature, + onboardingTxs.multiTransactionSignatures, ) _ <- EitherT.fromEither[FutureUnlessShutdown]( @@ -278,6 +257,128 @@ class ParticipantPartiesAdministrationGroup( consoleEnvironment.run(ConsoleCommandResult.fromEitherTUS(onboardingET)) } + /** Generate the party id and namespace transaction for a centralized namespace party. Creates + * the namespace key in the global crypto store. + */ + private def build_centralized_namespace( + name: String, + protocolVersion: ProtocolVersion, + ): EitherT[ + FutureUnlessShutdown, + String, + (PartyId, TopologyTransaction[TopologyChangeOp.Replace, TopologyMapping]), + ] = for { + namespaceKey <- consoleEnvironment.tryGlobalCrypto + .generateSigningKey(usage = SigningKeyUsage.NamespaceOnly) + .leftMap(_.toString) + partyId = PartyId.tryCreate(name, namespaceKey.fingerprint) + mapping <- EitherT.fromEither[FutureUnlessShutdown]( + NamespaceDelegation.create( + namespace = partyId.namespace, + target = namespaceKey, + CanSignAllMappings, + ) + ) + namespaceTx = TopologyTransaction( + TopologyChangeOp.Replace, + serial = PositiveInt.one, + mapping, + protocolVersion, + ) + } yield (partyId, namespaceTx) + + /** Generate the party id and namespace transaction for a decentralized namespace party from a + * set of existing namespaces. The namespaces must already exist and be authorized in the + * topology of the target synchronizer. + */ + private def build_decentralized_namespace( + name: String, + protocolVersion: ProtocolVersion, + namespaceOwners: NonEmpty[Set[Namespace]], + namespaceThreshold: PositiveInt, + ): ( + PartyId, + TopologyTransaction[TopologyChangeOp.Replace, TopologyMapping], + ) = { + val decentralizedNamespace = + DecentralizedNamespaceDefinition.computeNamespace(namespaceOwners.forgetNE) + val partyId = PartyId.tryCreate(name, decentralizedNamespace.fingerprint) + val namespaceTx = TopologyTransaction( + TopologyChangeOp.Replace, + serial = PositiveInt.one, + DecentralizedNamespaceDefinition.tryCreate( + decentralizedNamespace, + namespaceThreshold, + namespaceOwners, + ), + protocolVersion, + ) + (partyId, namespaceTx) + } + + /** Utility method to create a namespace delegation controlled by an external key. Use to create + * namespaces prior to allocating an external party controlled by a decentralized namespace for + * instance. + * @param synchronizer + * Synchronizer + * @return + * Namespace + */ + @VisibleForTesting // Ensures external this is only used in testing + def create_external_namespace( + synchronizer: Option[SynchronizerAlias] = None, + synchronize: Option[config.NonNegativeDuration] = Some(timeouts.unbounded), + ): Namespace = { + val res = for { + psid <- EitherT + .fromEither[FutureUnlessShutdown]( + lookupOrDetectSynchronizerId(synchronizer) + ) + .leftMap(err => s"Cannot find protocol version: $err") + + namespaceKey <- consoleEnvironment.tryGlobalCrypto + .generateSigningKey(usage = SigningKeyUsage.NamespaceOnly) + .leftMap(_.toString) + + namespace = Namespace(namespaceKey.fingerprint) + + namespaceTx = TopologyTransaction( + TopologyChangeOp.Replace, + serial = PositiveInt.one, + NamespaceDelegation.tryCreate( + namespace = namespace, + target = namespaceKey, + CanSignAllMappings, + ), + psid.protocolVersion, + ) + + signature <- consoleEnvironment.tryGlobalCrypto.privateCrypto + .sign( + namespaceTx.hash.hash, + namespaceKey.fingerprint, + NonEmpty.mk(Set, SigningKeyUsage.Namespace), + ) + .leftMap(_.toString) + + signedNamespace = SignedTopologyTransaction + .withTopologySignatures( + namespaceTx, + NonEmpty.mk(Seq, SingleTransactionSignature(namespaceTx.hash, signature)), + isProposal = false, + psid.protocolVersion, + ) + + _ = reference.topology.transactions.load( + Seq(signedNamespace), + psid, + synchronize = synchronize, + ) + } yield Namespace(namespaceKey.fingerprint) + + consoleEnvironment.run(ConsoleCommandResult.fromEitherTUS(res)) + } + /** Compute the onboarding transaction to enable party `name` * @param name * Name of the party to be enabled @@ -287,6 +388,12 @@ class ParticipantPartiesAdministrationGroup( * Other confirming participants * @param observing * Observing participants + * @param decentralizedNamespaceOwners + * Set when creating a party controlle by a decentralized namespace. The namespaces must + * already exist and be authorized in the topology of the target synchronizer. + * @param namespaceThreshold + * Threshold of the decentralized namespace. Only used when decentralizedNamespaceOwners is + * non empty. */ @VisibleForTesting // Ensures external parties are created only in tests def onboarding_transactions( @@ -297,6 +404,8 @@ class ParticipantPartiesAdministrationGroup( confirmationThreshold: PositiveInt = PositiveInt.one, keysCount: PositiveInt = PositiveInt.one, keysThreshold: PositiveInt = PositiveInt.one, + decentralizedNamespaceOwners: Set[Namespace] = Set.empty, + namespaceThreshold: PositiveInt = PositiveInt.one, ): EitherT[FutureUnlessShutdown, String, (OnboardingTransactions, ExternalParty)] = for { protocolVersion <- EitherT @@ -305,21 +414,22 @@ class ParticipantPartiesAdministrationGroup( ) .leftMap(err => s"Cannot find protocol version: $err") - namespaceKey <- consoleEnvironment.tryGlobalCrypto - .generateSigningKey(usage = SigningKeyUsage.NamespaceOnly) - .leftMap(_.toString) - partyId = PartyId.tryCreate(name, namespaceKey.fingerprint) + decentralizedOwnersNEO = NonEmpty.from(decentralizedNamespaceOwners) - namespaceDelegationTx = TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - NamespaceDelegation.tryCreate( - namespace = partyId.namespace, - target = namespaceKey, - CanSignAllMappings, - ), - protocolVersion, - ) + partyIdAndNamespaceTx <- decentralizedOwnersNEO + .map(namespaceOwnersNE => + EitherT.pure[FutureUnlessShutdown, String]( + build_decentralized_namespace( + name, + protocolVersion, + namespaceOwnersNE, + namespaceThreshold, + ) + ) + ) + .getOrElse(build_centralized_namespace(name, protocolVersion)) + + (partyId, namespaceTx) = partyIdAndNamespaceTx protocolSigningKeys = consoleEnvironment.global_secret.keys.secret .generate_keys(keysCount, usage = SigningKeyUsage.ProtocolOnly) @@ -373,9 +483,10 @@ class ParticipantPartiesAdministrationGroup( partyId = partyId, protocolSigningKeys = protocolSigningKeys.map(_.fingerprint), protocolVersion = protocolVersion, - namespaceDelegationTx = namespaceDelegationTx, + namespaceTx = namespaceTx, partyToKeyTx = partyToKeyTx, partyToParticipantTx = partyToParticipantTx, + decentralizedNamespaceOwners = decentralizedNamespaceOwners, ) } yield ( onboardingTransactions, @@ -402,8 +513,6 @@ class ParticipantPartiesAdministrationGroup( synchronizer: SynchronizerAlias, synchronizeParticipants: Seq[ParticipantReference] = consoleEnvironment.participants.all, synchronize: Option[config.NonNegativeDuration] = Some(timeouts.unbounded), - // External party specifics - confirmationThreshold: PositiveInt = PositiveInt.one, ): Unit = { val onboardingET = for { @@ -414,31 +523,13 @@ class ParticipantPartiesAdministrationGroup( onboardingTxs <- onboarding_transactions_for_existing( party, synchronizer, - confirmationThreshold = confirmationThreshold, + confirmationThreshold = PositiveInt.one, ) - _ = reference.topology.transactions.load( - onboardingTxs.toSeq, - psid, - synchronize = synchronize, - ) - - // Wait until the proposal is known - _ = utils.retry_until_true( - reference.topology.party_to_participant_mappings - .list( - psid, - proposals = true, - filterParticipant = reference.id.filterString, - filterParty = party.filterString, - ) - .nonEmpty - ) - - _ = reference.topology.transactions.authorize[PartyToParticipant]( - txHash = onboardingTxs.partyToParticipant.hash, - mustBeFullyAuthorized = true, - store = psid, + _ = reference.ledger_api.parties.allocate_external( + psid.logical, + onboardingTxs.transactionsWithSingleSignature, + onboardingTxs.multiTransactionSignatures, ) _ <- EitherT.fromEither[FutureUnlessShutdown]( @@ -566,9 +657,10 @@ class ParticipantPartiesAdministrationGroup( partyId = party.partyId, protocolSigningKeys = party.signingFingerprints, protocolVersion = protocolVersion, - namespaceDelegationTx = namespaceDelegationTx, + namespaceTx = namespaceDelegationTx, partyToKeyTx = partyToKeyTx, partyToParticipantTx = partyToParticipantTx, + decentralizedNamespaceOwners = Set.empty, ) } yield onboardingTransactions @@ -580,13 +672,14 @@ class ParticipantPartiesAdministrationGroup( partyId: PartyId, protocolSigningKeys: NonEmpty[Seq[Fingerprint]], protocolVersion: ProtocolVersion, - namespaceDelegationTx: TopologyTransaction[TopologyChangeOp.Replace, NamespaceDelegation], + namespaceTx: TopologyTransaction[TopologyChangeOp.Replace, TopologyMapping], partyToKeyTx: TopologyTransaction[TopologyChangeOp.Replace, PartyToKeyMapping], partyToParticipantTx: TopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant], + decentralizedNamespaceOwners: Set[Namespace], ): EitherT[FutureUnlessShutdown, String, OnboardingTransactions] = { val transactionHashes = NonEmpty.mk( Set, - namespaceDelegationTx.hash, + namespaceTx.hash, partyToParticipantTx.hash, partyToKeyTx.hash, ) @@ -596,11 +689,19 @@ class ParticipantPartiesAdministrationGroup( consoleEnvironment.tryGlobalCrypto.pureCrypto, ) - // Sign the multi hash with the namespace key, as it is needed to authorize all transactions - val namespaceSignature = consoleEnvironment.global_secret.sign( - combinedMultiTxHash.getCryptographicEvidence, - partyId.fingerprint, - NonEmpty.mk(Set, SigningKeyUsage.Namespace: SigningKeyUsage), + val decentralizedOwnersNEO = NonEmpty.from(decentralizedNamespaceOwners) + + val namespaceFingerprints = decentralizedOwnersNEO + .map(_.map(_.fingerprint)) + .getOrElse(NonEmpty.mk(Set, partyId.fingerprint)) + + // Sign the multi hash with the namespace keys, as it is needed to authorize all transactions + val namespaceSignatures = namespaceFingerprints.toSeq.map( + consoleEnvironment.global_secret.sign( + combinedMultiTxHash.getCryptographicEvidence, + _, + NonEmpty.mk(Set, SigningKeyUsage.Namespace: SigningKeyUsage), + ) ) for { @@ -617,14 +718,13 @@ class ParticipantPartiesAdministrationGroup( .leftMap(_.toString) .map(_.toSeq) - multiTxSignatures = NonEmpty.mk( - Seq, - MultiTransactionSignature(transactionHashes, namespaceSignature), + multiTxSignatures = namespaceSignatures.map(namespaceSignature => + MultiTransactionSignature(transactionHashes, namespaceSignature) ) - signedNamespaceDelegation = SignedTopologyTransaction + signedNamespace = SignedTopologyTransaction .withTopologySignatures( - namespaceDelegationTx, + namespaceTx, multiTxSignatures, isProposal = false, protocolVersion, @@ -649,7 +749,7 @@ class ParticipantPartiesAdministrationGroup( .addSingleSignatures(protocolSignatures.toSet) } yield { val keys = Map( - "namespace-delegation" -> signedNamespaceDelegation, + "namespace" -> signedNamespace, "party-to-participant" -> signedPartyToParticipant, "party-to-key" -> signedPartyToKey, ).view.mapValues(_.signatures.map(_.authorizingLongTermKey).mkString(", ")) @@ -659,7 +759,7 @@ class ParticipantPartiesAdministrationGroup( ) OnboardingTransactions( - signedNamespaceDelegation, + signedNamespace, signedPartyToParticipant, signedPartyToKey, ) diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationGroup.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationGroup.scala index 0125ba9be6dd..a169ca0c5902 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationGroup.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationGroup.scala @@ -2237,11 +2237,13 @@ class TopologyAdministrationGroup( mustFullyAuthorize: Boolean = true, serial: Option[PositiveInt] = None, change: TopologyChangeOp = TopologyChangeOp.Replace, + featureFlags: Seq[SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag] = Seq.empty, ): SignedTopologyTransaction[TopologyChangeOp, SynchronizerTrustCertificate] = { val cmd = TopologyAdminCommands.Write.Propose( mapping = SynchronizerTrustCertificate( participantId, synchronizerId, + featureFlags, ), signedBy = Seq.empty, store = store.getOrElse(synchronizerId), @@ -2644,10 +2646,10 @@ class TopologyAdministrationGroup( runAdminCommand(command).discard } - @Help.Summary("List mediator synchronizer topology state") + @Help.Summary("List vetted packages") @Help.Description( """ - synchronizerId: the optional target synchronizer + store: the optional topology store to query from proposals: if true then proposals are shown, otherwise actual validated state """ ) @@ -2679,6 +2681,14 @@ class TopologyAdministrationGroup( @Help.Summary("Inspect mediator synchronizer state") @Help.Group("Mediator Synchronizer State") object mediators extends Helpful { + + @Help.Summary("List mediator synchronizer topology state") + @Help.Description( + """ + synchronizerId: the optional target synchronizer + proposals: if true then proposals are shown, otherwise actual validated state + """ + ) def list( synchronizerId: Option[SynchronizerId] = None, proposals: Boolean = false, diff --git a/sdk/canton/community/app/src/pack/config/participant.conf b/sdk/canton/community/app/src/pack/config/participant.conf index 90dc521a11f6..af741b8feb67 100644 --- a/sdk/canton/community/app/src/pack/config/participant.conf +++ b/sdk/canton/community/app/src/pack/config/participant.conf @@ -69,6 +69,11 @@ canton.participants.participant { keep-alive-server = ${?_shared.admin-api.keep-alive-server} } + http-ledger-api.server { + address = localhost + port = 10005 + } + // Configure GRPC / HTTP Health Server for monitoring // See https://docs.daml.com/canton/usermanual/monitoring.html#grpc-health-check-service monitoring { diff --git a/sdk/canton/community/app/src/pack/config/sandbox.conf b/sdk/canton/community/app/src/pack/config/sandbox.conf index 98bcc23a8c85..c4704940bf36 100644 --- a/sdk/canton/community/app/src/pack/config/sandbox.conf +++ b/sdk/canton/community/app/src/pack/config/sandbox.conf @@ -20,6 +20,10 @@ canton { address = localhost port = 10022 } + http-ledger-api.server { + address = localhost + port = 10023 + } } sequencers.local { public-api { diff --git a/sdk/canton/community/app/src/pack/examples/08-interactive-submission/interactive_submission.py b/sdk/canton/community/app/src/pack/examples/08-interactive-submission/interactive_submission.py index c039ca4d87ed..2d705280d75d 100644 --- a/sdk/canton/community/app/src/pack/examples/08-interactive-submission/interactive_submission.py +++ b/sdk/canton/community/app/src/pack/examples/08-interactive-submission/interactive_submission.py @@ -12,7 +12,7 @@ from google.protobuf.json_format import MessageToJson from com.daml.ledger.api.v2.interactive import interactive_submission_service_pb2_grpc from com.daml.ledger.api.v2.interactive import interactive_submission_service_pb2 -from com.daml.ledger.api.v2 import commands_pb2, value_pb2, completion_pb2 +from com.daml.ledger.api.v2 import commands_pb2, value_pb2, completion_pb2, crypto_pb2 from external_party_onboarding_admin_api import onboard_external_party from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePrivateKey @@ -164,11 +164,11 @@ def execute_and_get_contract_id( interactive_submission_service_pb2.SinglePartySignatures( party=party, signatures=[ - interactive_submission_service_pb2.Signature( - format=interactive_submission_service_pb2.SignatureFormat.SIGNATURE_FORMAT_DER, + crypto_pb2.Signature( + format=crypto_pb2.SignatureFormat.SIGNATURE_FORMAT_DER, signature=signature, signed_by=pub_fingerprint, - signing_algorithm_spec=interactive_submission_service_pb2.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256, + signing_algorithm_spec=crypto_pb2.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256, ) ], ) diff --git a/sdk/canton/community/app/src/pack/examples/08-interactive-submission/setup.sh b/sdk/canton/community/app/src/pack/examples/08-interactive-submission/setup.sh index ed8b0d96ef05..821750bfc9f4 100755 --- a/sdk/canton/community/app/src/pack/examples/08-interactive-submission/setup.sh +++ b/sdk/canton/community/app/src/pack/examples/08-interactive-submission/setup.sh @@ -76,6 +76,7 @@ generate_grpc_service() { echo "Generating python code from protobuf definitions" generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/commands.proto" generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/completion.proto" +generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/crypto.proto" generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/event.proto" generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/offset_checkpoint.proto" generate_grpc_code "$LEDGER_API_PROTO_PATH" "$LEDGER_API_V2_PATH/reassignment.proto" diff --git a/sdk/canton/community/app/src/pack/examples/09-json-api/typescript/openapi.yaml b/sdk/canton/community/app/src/pack/examples/09-json-api/typescript/openapi.yaml index 33080af0baf5..48658572a9b1 100644 --- a/sdk/canton/community/app/src/pack/examples/09-json-api/typescript/openapi.yaml +++ b/sdk/canton/community/app/src/pack/examples/09-json-api/typescript/openapi.yaml @@ -2072,7 +2072,15 @@ components: onboardingTransactions: description: |- TopologyTransactions to onboard the external party - Must contain 3 signed transactions: NamespaceDelegation, PartyToKeyMapping, PartyToParticipant + Can contain: + - A namespace for the party. + This can be either a single NamespaceDelegation, + or DecentralizedNamespaceDefinition along with its authorized namespace owners in the form of NamespaceDelegations. + May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + - A PartyToKeyMapping to register the party's signing keys. + May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + - A PartyToParticipant to register the hosting relationship of the party. + Must be provided. Required type: array items: @@ -4841,6 +4849,17 @@ components: type: array items: $ref: '#/components/schemas/PrefetchContractKey' + maxRecordTime: + description: |- + Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer specified in the `PrepareSubmissionResponse`. + If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + with a new valid max_record_time. + Use this to limit the time-to-life of a prepared transaction, + which is useful to know when it can definitely not be accepted + anymore and resorting to preparing another transaction for the same + intent is safe again. + Optional + type: string JsPrepareSubmissionResponse: title: JsPrepareSubmissionResponse description: '[docs-entry-end: HashingSchemeVersion]' diff --git a/sdk/canton/community/app/src/test/daml/CantonLfDev/daml.yaml b/sdk/canton/community/app/src/test/daml/CantonLfDev/daml.yaml index fc02a0bb9de9..f06fedbfa714 100644 --- a/sdk/canton/community/app/src/test/daml/CantonLfDev/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/CantonLfDev/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.dev name: CantonLfDev diff --git a/sdk/canton/community/app/src/test/daml/CantonLfV21/daml.yaml b/sdk/canton/community/app/src/test/daml/CantonLfV21/daml.yaml index c97d656f9bce..814c566b2460 100644 --- a/sdk/canton/community/app/src/test/daml/CantonLfV21/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/CantonLfV21/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/app/src/test/daml/CantonTest/CantonTests.daml b/sdk/canton/community/app/src/test/daml/CantonTest/CantonTests.daml index 72d1809c5a3d..137c61581b12 100644 --- a/sdk/canton/community/app/src/test/daml/CantonTest/CantonTests.daml +++ b/sdk/canton/community/app/src/test/daml/CantonTest/CantonTests.daml @@ -25,3 +25,4 @@ import Test() import TransientContracts() import WitnessCreate() import Universal() +import LocalContract() diff --git a/sdk/canton/community/app/src/test/daml/CantonTest/LocalContract.daml b/sdk/canton/community/app/src/test/daml/CantonTest/LocalContract.daml new file mode 100644 index 000000000000..44c0f6a1ab46 --- /dev/null +++ b/sdk/canton/community/app/src/test/daml/CantonTest/LocalContract.daml @@ -0,0 +1,35 @@ +-- Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +module LocalContract where + +template Local + with + owner: Party + witness: Party + where + signatory owner + observer witness + + choice GetName : () + controller owner + do pure () + +template Holder + with + owner: Party + holderWitness: Party + where + signatory owner + observer holderWitness + + nonconsuming choice CreateAndUse : () + with + witness: Party + observer holderWitness + controller owner + do + localCid <- create $ Local owner witness + _ <- exercise localCid GetName + pure () + diff --git a/sdk/canton/community/app/src/test/daml/CantonTest/daml.yaml b/sdk/canton/community/app/src/test/daml/CantonTest/daml.yaml index f9b0f18a5eab..cb5700918992 100644 --- a/sdk/canton/community/app/src/test/daml/CantonTest/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/CantonTest/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: CantonTests diff --git a/sdk/canton/community/app/src/test/daml/CantonTestDev/daml.yaml b/sdk/canton/community/app/src/test/daml/CantonTestDev/daml.yaml index 4fef9a73d526..b9183f777a84 100644 --- a/sdk/canton/community/app/src/test/daml/CantonTestDev/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/CantonTestDev/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.dev name: CantonTestsDev diff --git a/sdk/canton/community/app/src/test/daml/JsonApiTest/Account/daml.yaml b/sdk/canton/community/app/src/test/daml/JsonApiTest/Account/daml.yaml index 7e927f651958..2a3a416fc206 100644 --- a/sdk/canton/community/app/src/test/daml/JsonApiTest/Account/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/JsonApiTest/Account/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/app/src/test/daml/JsonApiTest/CIou/daml.yaml b/sdk/canton/community/app/src/test/daml/JsonApiTest/CIou/daml.yaml index 3c277f85a8dd..fc1b396a8f1e 100644 --- a/sdk/canton/community/app/src/test/daml/JsonApiTest/CIou/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/JsonApiTest/CIou/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/Iface/daml.yaml b/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/Iface/daml.yaml index cc1d58770cd7..e26b5a150245 100644 --- a/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/Iface/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/Iface/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: ifoo source: IFoo.daml version: 0.0.1 diff --git a/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/IncompatibleV3/daml.yaml b/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/IncompatibleV3/daml.yaml index 686019e953ac..84fd3659f9d4 100644 --- a/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/IncompatibleV3/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/IncompatibleV3/daml.yaml @@ -1,6 +1,6 @@ -sdk-version: 3.4.0-snapshot.20251001.14245.0.v20f88882 +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: foo -data-dependencies: +data-dependencies: - ../../../../scala-2.13/resource_managed/test/ifoo-0.0.1.dar source: Foo.daml version: 0.0.3 diff --git a/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/V1/daml.yaml b/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/V1/daml.yaml index 91dc55220778..9012f0c203a1 100644 --- a/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/V1/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: foo data-dependencies: - ../../../../scala-2.13/resource_managed/test/ifoo-0.0.1.dar diff --git a/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/V2/daml.yaml b/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/V2/daml.yaml index 802f6654e2d8..717138897f80 100644 --- a/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/V2/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/JsonApiTest/Upgrades/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: foo data-dependencies: - ../../../../scala-2.13/resource_managed/test/ifoo-0.0.1.dar diff --git a/sdk/canton/community/app/src/test/daml/JsonApiTest/User/daml.yaml b/sdk/canton/community/app/src/test/daml/JsonApiTest/User/daml.yaml index f09d3caaa893..1a06a08b776e 100644 --- a/sdk/canton/community/app/src/test/daml/JsonApiTest/User/daml.yaml +++ b/sdk/canton/community/app/src/test/daml/JsonApiTest/User/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: User diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmitCommandTrialErrorTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmitCommandTrialErrorTest.scala index 708c214aee24..ee2fa172761f 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmitCommandTrialErrorTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SubmitCommandTrialErrorTest.scala @@ -121,6 +121,8 @@ trait SubmitCommandTrialErrorTest extends CommunityIntegrationTest with SharedEn participant2.dars.upload(CantonExamplesPath) + participant2.packages.synchronize_vetting() + participant1.ledger_api.javaapi.commands.submit(Seq(Bank), cmds) } diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/SequencerConnectionServiceIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/SequencerConnectionServiceIntegrationTest.scala new file mode 100644 index 000000000000..ed150bfc9fd3 --- /dev/null +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/connection/SequencerConnectionServiceIntegrationTest.scala @@ -0,0 +1,181 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.connection + +import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.console.InstanceReference +import com.digitalasset.canton.integration.bootstrap.{ + NetworkBootstrapper, + NetworkTopologyDescription, +} +import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + ConfigTransforms, + EnvironmentDefinition, + SharedEnvironment, +} +import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} +import com.digitalasset.canton.sequencing.{ + SequencerConnectionValidation, + SequencerConnectionXPool, + SequencerConnections, + SequencerSubscriptionPool, + SubmissionRequestAmplification, +} +import com.digitalasset.canton.{SequencerAlias, config} +import monocle.macros.syntax.lens.* +import org.slf4j.event.Level.INFO + +import scala.concurrent.duration.DurationInt + +sealed trait SequencerConnectionServiceIntegrationTest + extends CommunityIntegrationTest + with SharedEnvironment { + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P2S2M1_Config + .addConfigTransforms( + ConfigTransforms.setConnectionPool(true), + _.focus(_.parameters.timeouts.processing.sequencerInfo) + .replace(config.NonNegativeDuration.tryFromDuration(2.seconds)), + ) + .withNetworkBootstrap { implicit env => + import env.* + new NetworkBootstrapper( + NetworkTopologyDescription( + daName, + synchronizerOwners = Seq[InstanceReference](sequencer1, mediator1), + synchronizerThreshold = PositiveInt.one, + sequencers = Seq(sequencer1, sequencer2), + mediators = Seq(mediator1), + overrideMediatorToSequencers = Some( + Map( + mediator1 -> (Seq(sequencer1, sequencer2), + /* trust threshold */ PositiveInt.one, /* liveness margin */ NonNegativeInt.zero) + ) + ), + ) + ) + } + + "SequencerConnectionService" must { + "Allow modifying the pool configuration" in { implicit env => + import env.* + + val connectionsConfig = Seq(sequencer1, sequencer2).map(s => + s.config.publicApi.clientConfig.asSequencerConnection(SequencerAlias.tryCreate(s.name)) + ) + + clue("connect participant1 to all sequencers") { + participant1.synchronizers.connect_bft( + connections = connectionsConfig, + sequencerTrustThreshold = PositiveInt.one, + sequencerLivenessMargin = NonNegativeInt.zero, + submissionRequestAmplification = SubmissionRequestAmplification.NoAmplification, + synchronizerAlias = daName, + physicalSynchronizerId = Some(daId), + validation = SequencerConnectionValidation.Disabled, + ) + } + + participant1.health.ping(participant1.id) + + mediator1.sequencer_connection.get().value.sequencerTrustThreshold shouldBe PositiveInt.one + + clue("reconfigure mediator's trust threshold") { + loggerFactory.assertLogsSeq( + SuppressionRule.LevelAndAbove(INFO) && (SuppressionRule + .forLogger[SequencerConnectionXPool] || SuppressionRule + .forLogger[SequencerSubscriptionPool]) + )( + mediator1.sequencer_connection.modify_connections { + _.withSequencerTrustThreshold(PositiveInt.two).valueOrFail("set trust threshold to 2") + }, + forExactly(2, _)(_.infoMessage should include("Configuration updated")), + ) + + mediator1.sequencer_connection.get().value.sequencerTrustThreshold shouldBe PositiveInt.two + + // The mediator is still functional + participant1.health.ping(participant1.id) + } + + clue("reconfigure mediator's connections to use a single connection") { + mediator1.sequencer_connection.modify_connections { old => + SequencerConnections.tryMany( + connectionsConfig.drop(1), + sequencerTrustThreshold = PositiveInt.one, + old.sequencerLivenessMargin, + old.submissionRequestAmplification, + old.sequencerConnectionPoolDelays, + ) + } + + // The configuration has changed + mediator1.sequencer_connection + .get() + .value + .connections + .forgetNE + .loneElement shouldBe connectionsConfig(1) + + // The mediator is still functional + participant1.health.ping(participant1.id) + } + + clue("fail to reconfigure mediator's connections if validation fails") { + sequencer1.stop() + + assertThrowsAndLogsCommandFailures( + mediator1.sequencer_connection.modify_connections { old => + SequencerConnections.tryMany( + connectionsConfig, + sequencerTrustThreshold = PositiveInt.two, + old.sequencerLivenessMargin, + old.submissionRequestAmplification, + old.sequencerConnectionPoolDelays, + ) + }, + _.commandFailureMessage should include( + "FAILED_PRECONDITION/TimeoutError(Connection pool failed to initialize" + ), + ) + + // The configuration has not changed + mediator1.sequencer_connection + .get() + .value + .connections + .forgetNE + .loneElement shouldBe connectionsConfig(1) + + // The mediator is still functional + // We possibly need to retry, because if participant1 has a single subscription on sequencer2, it will not detect + // that sequencer1 is down until it first sends to it, and could therefore still pick it for the first send. + // An alternative would be to use amplification. + eventually() { + loggerFactory.assertLoggedWarningsAndErrorsSeq( + participant1.health.maybe_ping(participant1.id, timeout = 2.seconds) shouldBe defined, + LogEntry.assertLogSeq( + mustContainWithClue = Seq.empty, + mayContain = Seq( + _.warningMessage should include regex + raw"Request failed for server-.*\. Is the server running\? Did you configure the server address as 0\.0\.0\.0\?" + + raw" Are you using the right TLS settings\?" + ), + ), + ) + } + } + } + } +} + +class SequencerConnectionServiceIntegrationTestDefault + extends SequencerConnectionServiceIntegrationTest { + registerPlugin(new UsePostgres(loggerFactory)) + registerPlugin(new UseReferenceBlockSequencer[DbConfig.Postgres](loggerFactory)) +} diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocateExternalPartyAuthIT.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocateExternalPartyAuthIT.scala index ac3b99016bce..8740b9eada2c 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocateExternalPartyAuthIT.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/auth/AllocateExternalPartyAuthIT.scala @@ -4,19 +4,24 @@ package com.digitalasset.canton.integration.tests.ledgerapi.auth import com.daml.ledger.api.v2.admin.party_management_service.* -import com.daml.ledger.api.v2.interactive.interactive_submission_service as iss import com.digitalasset.canton.HasExecutionContext import com.digitalasset.canton.config.DbConfig +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.integration.TestConsoleEnvironment import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer -import com.digitalasset.canton.interactive.ExternalPartyUtils -import io.scalaland.chimney.dsl.* +import com.digitalasset.canton.topology.DefaultTestIdentities +import com.digitalasset.canton.topology.transaction.ParticipantPermission.Confirmation +import com.digitalasset.canton.topology.transaction.{ + HostingParticipant, + PartyToParticipant, + TopologyChangeOp, + TopologyTransaction, +} -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.Future final class AllocateExternalPartyAuthIT extends AdminOrIDPAdminServiceCallAuthTests - with ExternalPartyUtils with HasExecutionContext { registerPlugin(new UseReferenceBlockSequencer[DbConfig.H2](loggerFactory)) @@ -25,28 +30,28 @@ final class AllocateExternalPartyAuthIT override def serviceCall( context: ServiceCallContext - )(implicit env: TestConsoleEnvironment): Future[Any] = { - val (onboardingTransactions, _) = - generateExternalPartyOnboardingTransactions("alice", Seq(env.participant1.id)) - + )(implicit env: TestConsoleEnvironment): Future[Any] = stub(PartyManagementServiceGrpc.stub(channel), context.token) .allocateExternalParty( AllocateExternalPartyRequest( synchronizer = env.synchronizer1Id.toProtoPrimitive, - onboardingTransactions = onboardingTransactions.transactionsWithSingleSignature.map { - case (transaction, signatures) => - AllocateExternalPartyRequest.SignedTransaction( - transaction.getCryptographicEvidence, - signatures.map(_.toProtoV30.transformInto[iss.Signature]), - ) - }, - multiHashSignatures = onboardingTransactions.multiTransactionSignatures.map( - _.toProtoV30.transformInto[iss.Signature] + onboardingTransactions = Seq( + AllocateExternalPartyRequest.SignedTransaction( + TopologyTransaction( + op = TopologyChangeOp.Replace, + serial = PositiveInt.one, + PartyToParticipant.tryCreate( + DefaultTestIdentities.party1, + PositiveInt.one, + Seq(HostingParticipant(env.participant1.id, Confirmation)), + ), + testedProtocolVersion, + ).toByteString, + Seq.empty, + ) ), + multiHashSignatures = Seq.empty, identityProviderId = context.identityProviderId, ) ) - } - - override implicit def externalPartyExecutionContext: ExecutionContext = parallelExecutionContext } diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitDummyPreparedSubmission.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitDummyPreparedSubmission.scala index e412fc97eddb..58e0e47a165a 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitDummyPreparedSubmission.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/services/SubmitDummyPreparedSubmission.scala @@ -45,6 +45,7 @@ trait SubmitDummyPreparedSubmission extends SubmitDummyCommand { packageIdSelectionPreference = Seq.empty, verboseHashing = true, prefetchContractKeys = Seq.empty, + maxRecordTime = Option.empty, ) protected def dummyExecuteSubmissionRequest( diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/BaseInteractiveSubmissionTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/BaseInteractiveSubmissionTest.scala index 8de55250a25e..5cfd2727ae34 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/BaseInteractiveSubmissionTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/BaseInteractiveSubmissionTest.scala @@ -19,6 +19,7 @@ import com.daml.ledger.api.v2.transaction_filter.{ UpdateFormat, WildcardFilter, } +import com.digitalasset.canton.BaseTest import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService.TransactionWrapper import com.digitalasset.canton.config.RequireTypes.PositiveInt @@ -28,7 +29,6 @@ import com.digitalasset.canton.console.{ ParticipantReference, } import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.data.OnboardingTransactions import com.digitalasset.canton.integration.tests.ledgerapi.submission.BaseInteractiveSubmissionTest.{ ParticipantSelector, defaultConfirmingParticipant, @@ -40,17 +40,15 @@ import com.digitalasset.canton.integration.{ ConfigTransforms, TestConsoleEnvironment, } -import com.digitalasset.canton.interactive.ExternalPartyUtils import com.digitalasset.canton.logging.{LogEntry, NamedLogging} +import com.digitalasset.canton.topology.ForceFlag.DisablePartyWithActiveContracts import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.{ExternalParty, PartyId, PhysicalSynchronizerId} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} +import com.digitalasset.canton.topology.{ExternalParty, ForceFlags, PartyId, SynchronizerId} import com.google.protobuf.ByteString import monocle.Monocle.toAppliedFocusOps import org.scalatest.Suite import java.util.UUID -import scala.concurrent.ExecutionContext object BaseInteractiveSubmissionTest { type ParticipantSelector = TestConsoleEnvironment => LocalParticipantReference @@ -60,15 +58,10 @@ object BaseInteractiveSubmissionTest { val defaultConfirmingParticipant: ParticipantSelector = _.participant3 } -trait BaseInteractiveSubmissionTest - extends ExternalPartyUtils - with BaseTest - with HasExecutionContext { +trait BaseInteractiveSubmissionTest extends BaseTest { this: Suite & NamedLogging => - override val externalPartyExecutionContext: ExecutionContext = parallelExecutionContext - protected def ppn(implicit env: TestConsoleEnvironment): LocalParticipantReference = defaultPreparingParticipant(env) protected def cpn(implicit env: TestConsoleEnvironment): LocalParticipantReference = @@ -86,56 +79,31 @@ trait BaseInteractiveSubmissionTest ), ) - protected def loadOnboardingTransactions( - externalParty: ExternalParty, - confirming: ParticipantReference, - synchronizerId: PhysicalSynchronizerId, - onboardingTransactions: OnboardingTransactions, - extraConfirming: Seq[ParticipantReference] = Seq.empty, - observing: Seq[ParticipantReference] = Seq.empty, + protected def offboardParty( + party: ExternalParty, + participant: LocalParticipantReference, + synchronizerId: SynchronizerId, )(implicit env: TestConsoleEnvironment): Unit = { - // Start by loading the transactions signed by the party - confirming.topology.transactions.load( - onboardingTransactions.toSeq, - store = synchronizerId, - ) - - val partyId = externalParty.partyId - val allParticipants = Seq(confirming) ++ extraConfirming ++ observing + import env.* - // Then each hosting participant must sign and load the PartyToParticipant transaction - allParticipants.map { hp => - // Eventually because it could take some time before the transaction makes it to all participants - val partyToParticipantProposal = eventually() { - hp.topology.party_to_participant_mappings - .list( - synchronizerId, - proposals = true, - filterParty = partyId.toProtoPrimitive, - ) - .loneElement - } - - // In practice, participant operators are expected to inspect the transaction here before authorizing it - val transactionHash = partyToParticipantProposal.context.transactionHash - hp.topology.transactions.authorize[PartyToParticipant]( - transactionHash, - mustBeFullyAuthorized = false, - store = synchronizerId, - ) - } + val partyToParticipantTx = participant.topology.party_to_participant_mappings + .list(synchronizerId, filterParty = party.toProtoPrimitive) + .loneElement + val partyToParticipantMapping = partyToParticipantTx.item + val removeTopologyTx = TopologyTransaction( + TopologyChangeOp.Remove, + partyToParticipantTx.context.serial.increment, + partyToParticipantMapping, + testedProtocolVersion, + ) - allParticipants.foreach { hp => - // Wait until all participants agree the hosting is effective - env.utils.retry_until_true( - hp.topology.party_to_participant_mappings - .list( - synchronizerId, - filterParty = partyId.toProtoPrimitive, - ) - .nonEmpty - ) - } + val removeCharlieSignedTopologyTx = + global_secret.sign(removeTopologyTx, party, testedProtocolVersion) + participant.topology.transactions.load( + Seq(removeCharlieSignedTopologyTx), + synchronizerId, + forceFlags = ForceFlags(DisablePartyWithActiveContracts), + ) } protected def exec( diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/ExternalPartyOnboardingIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/ExternalPartyOnboardingIntegrationTest.scala new file mode 100644 index 000000000000..cc67ae949cec --- /dev/null +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/ExternalPartyOnboardingIntegrationTest.scala @@ -0,0 +1,304 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.ledgerapi.submission + +import cats.syntax.traverse.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.CommandFailure +import com.digitalasset.canton.console.commands.PartiesAdministration +import com.digitalasset.canton.integration.{ + CommunityIntegrationTest, + EnvironmentDefinition, + HasCycleUtils, + SharedEnvironment, +} +import com.digitalasset.canton.logging.LogEntry +import com.digitalasset.canton.participant.topology.ParticipantTopologyManagerError.ExternalPartyAlreadyExists +import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId +import com.digitalasset.canton.topology.transaction.ParticipantPermission.{ + Confirmation, + Observation, +} +import com.digitalasset.canton.topology.transaction.{HostingParticipant, PartyToParticipant} + +import scala.concurrent.Future + +trait ExternalPartyOnboardingIntegrationTestSetup + extends CommunityIntegrationTest + with SharedEnvironment + with BaseInteractiveSubmissionTest + with HasCycleUtils { + + override def environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition.P3_S1M1 + .withSetup { implicit env => + import env.* + participants.all.synchronizers.connect_local(sequencer1, alias = daName) + } + .addConfigTransforms(enableInteractiveSubmissionTransforms*) +} + +class ExternalPartyOnboardingIntegrationTest extends ExternalPartyOnboardingIntegrationTestSetup { + "External party onboarding" should { + "host parties on multiple participants with a threshold" in { implicit env => + import env.* + val (onboardingTransactions, externalParty) = + participant1.parties.external + .onboarding_transactions( + "Alice", + additionalConfirming = Seq(participant2), + observing = Seq(participant3), + confirmationThreshold = PositiveInt.two, + ) + .futureValueUS + .value + + Seq(participant1, participant2, participant3).map { hostingNode => + hostingNode.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + multiSignatures = onboardingTransactions.multiTransactionSignatures, + ) + } + + PartiesAdministration.Allocation.waitForPartyKnown( + partyId = externalParty.partyId, + hostingParticipant = participant1, + synchronizeParticipants = Seq(participant1, participant2, participant3), + synchronizerId = synchronizer1Id.logical, + ) + } + + "allocate a party from one of their observing nodes" in { implicit env => + import env.* + + val (onboardingTransactions, externalParty) = participant1.parties.external + .onboarding_transactions( + "Bob", + observing = Seq(participant2), + ) + .futureValueUS + .value + val partyId = externalParty.partyId + + participant2.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + onboardingTransactions.multiTransactionSignatures, + ) + + // Use the admin API to authorize the hosting in this test, but it can also be done via the + // allocateExternalParty endpoint on the admin API + // See multi hosted decentralized party below for an example + val partyToParticipantProposal = eventually() { + participant1.topology.party_to_participant_mappings + .list( + synchronizer1Id, + proposals = true, + filterParty = partyId.toProtoPrimitive, + ) + .loneElement + } + val transactionHash = partyToParticipantProposal.context.transactionHash + participant1.topology.transactions.authorize[PartyToParticipant]( + transactionHash, + mustBeFullyAuthorized = false, + store = TopologyStoreId.Synchronizer(synchronizer1Id), + ) + + PartiesAdministration.Allocation.waitForPartyKnown( + partyId = externalParty.partyId, + hostingParticipant = participant1, + synchronizeParticipants = Seq(participant1, participant2), + synchronizerId = synchronizer1Id.logical, + ) + } + + "allocate a decentralized multi-hosted multi-sig external party" in { implicit env => + import env.* + + // Create the namespace owners first + val namespace1 = participant1.parties.external.create_external_namespace() + val namespace2 = participant1.parties.external.create_external_namespace() + val namespace3 = participant1.parties.external.create_external_namespace() + val namespaceOwners = NonEmpty.mk(Set, namespace1, namespace2, namespace3) + + val confirmationThreshold = PositiveInt.two + val keysCount = PositiveInt.three + val keysThreshold = PositiveInt.two + val namespaceThreshold = PositiveInt.three + + // Generate the corresponding onboarding transactions + val onboardingData = participant1.parties.external.onboarding_transactions( + name = "Emily", + additionalConfirming = Seq(participant2), + observing = Seq(participant3), + confirmationThreshold = confirmationThreshold, + keysCount = keysCount, + keysThreshold = keysThreshold, + decentralizedNamespaceOwners = namespaceOwners.forgetNE, + namespaceThreshold = namespaceThreshold, + ) + + val (onboardingTransactions, emilyE) = onboardingData.futureValueUS.value + + // Start by having the extra hosting nodes authorize the hosting + // We can do that even before the party namespace is authorized + Seq(participant2, participant3).map { hostingNode => + hostingNode.ledger_api.parties.allocate_external( + synchronizer1Id, + Seq(onboardingTransactions.partyToParticipant.transaction -> Seq.empty), + multiSignatures = Seq.empty, + ) + } + + // Then load all transactions via the allocate endpoint + participant1.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + multiSignatures = onboardingTransactions.multiTransactionSignatures, + ) + + // Eventually everything should be authorized correctly + eventually() { + val p2p = participant1.topology.party_to_participant_mappings + .list(filterParty = emilyE.partyId.filterString, synchronizerId = synchronizer1Id) + + p2p.loneElement.item.partyId shouldBe emilyE.partyId + p2p.loneElement.item.threshold shouldBe confirmationThreshold + p2p.loneElement.item.participants contains HostingParticipant(participant1, Confirmation) + p2p.loneElement.item.participants contains HostingParticipant(participant2, Confirmation) + p2p.loneElement.item.participants contains HostingParticipant(participant3, Observation) + } + + eventually() { + val p2k = participant1.topology.party_to_key_mappings.list( + filterParty = emilyE.partyId.filterString, + store = synchronizer1Id, + ) + + p2k.loneElement.item.party shouldBe emilyE.partyId + p2k.loneElement.item.threshold shouldBe keysThreshold + p2k.loneElement.item.signingKeys.forgetNE + .map(_.fingerprint) should contain theSameElementsAs emilyE.signingFingerprints.forgetNE + } + + eventually() { + val dnd = participant1.topology.decentralized_namespaces.list( + filterNamespace = emilyE.partyId.namespace.filterString, + store = synchronizer1Id, + ) + + dnd.loneElement.item.namespace shouldBe emilyE.partyId.uid.namespace + dnd.loneElement.item.threshold shouldBe namespaceThreshold + dnd.loneElement.item.owners.forgetNE shouldBe namespaceOwners.forgetNE + } + } + + "provide useful error message when the participant is not connected to the synchronizer" in { + implicit env => + import env.* + val (onboardingTransactions, _) = + participant1.parties.external.onboarding_transactions("Alice").futureValueUS.value + + participant1.synchronizers.disconnect_all() + + loggerFactory.assertThrowsAndLogs[CommandFailure]( + participant1.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + onboardingTransactions.multiTransactionSignatures, + ), + _.errorMessage should include( + s"This node is not connected to the requested synchronizer ${synchronizer1Id.logical}." + ), + ) + + participant1.synchronizers.reconnect_all() + } + + "provide useful error message when onboarding the same party twice" in { implicit env => + import env.* + + val (onboardingTransactions, partyE) = + participant1.parties.external.onboarding_transactions("Alice").futureValueUS.value + + def allocate() = + participant1.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + onboardingTransactions.multiTransactionSignatures, + ) + + // Allocate once + allocate() + participant1.ledger_api.parties.list().find(_.party == partyE.partyId) shouldBe defined + + // Allocate a second time + loggerFactory.assertThrowsAndLogsSeq[CommandFailure]( + allocate(), + LogEntry.assertLogSeq( + Seq( + ( + _.errorMessage should include( + ExternalPartyAlreadyExists.Failure(partyE.partyId, synchronizer1Id).cause + ), + "Expected party already exists error", + ) + ) + ), + ) + } + + "provide useful error message when concurrently retrying onboarding requests for the same party" in { + implicit env => + import env.* + + val (onboardingTransactions, partyE) = + participant1.parties.external.onboarding_transactions("Alice").futureValueUS.value + + def allocate() = + participant1.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + onboardingTransactions.multiTransactionSignatures, + ) + + loggerFactory.assertLoggedWarningsAndErrorsSeq( + { + val results = timeouts.default.await("Waiting for concurrent allocation attempts")( + Seq + .fill(10)( + Future(allocate()).map(_ => Right(())).recover { case ex => Left(ex) } + ) + .sequence + ) + // Only one of them should be a success + results.count(_.isRight) shouldBe 1 + }, + LogEntry.assertLogSeq( + Seq( + ( + _.errorMessage should include( + s"Party ${partyE.partyId.uid.identifier.str} is in the process of being allocated on this node." + ), + "Expected party already exists error", + ) + ), + // It's not impossible that one of the calls gets in late when the party is already fully allocated and + // when no other call is in flight, so catch that case here + Seq( + _.errorMessage should include( + ExternalPartyAlreadyExists.Failure(partyE.partyId, synchronizer1Id).cause + ) + ), + ), + ) + + // Check the party was still allocated + participant1.ledger_api.parties.list().find(_.party == partyE.partyId) shouldBe defined + } + } +} diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionConfirmationIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionConfirmationIntegrationTest.scala index b773857709e8..44ef19d9995c 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionConfirmationIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/InteractiveSubmissionConfirmationIntegrationTest.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.integration.tests.ledgerapi.submission import com.daml.ledger.api.v2.interactive.interactive_submission_service.PrepareSubmissionResponse import com.daml.nonempty.NonEmptyUtil import com.daml.scalautil.future.FutureConversion.* -import com.digitalasset.canton.LfTimestamp import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.CommandFailure import com.digitalasset.canton.crypto.InteractiveSubmission.TransactionMetadataForHashing @@ -37,6 +36,7 @@ import com.digitalasset.canton.sequencing.protocol.MemberRecipient import com.digitalasset.canton.synchronizer.sequencer.{HasProgrammableSequencer, SendDecision} import com.digitalasset.canton.topology.{ExternalParty, PartyId} import com.digitalasset.canton.version.HashingSchemeVersion +import com.digitalasset.canton.{HasExecutionContext, LfTimestamp} import com.digitalasset.daml.lf.data.ImmArray import com.digitalasset.daml.lf.data.Ref.{SubmissionId, UserId} import io.grpc.Status @@ -54,6 +54,7 @@ final class InteractiveSubmissionConfirmationIntegrationTest with SharedEnvironment with BaseInteractiveSubmissionTest with HasProgrammableSequencer + with HasExecutionContext with HasCycleUtils { private var aliceE: ExternalParty = _ diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/MultiHostingInteractiveSubmissionIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/MultiHostingInteractiveSubmissionIntegrationTest.scala index 0d3f5799e916..a9e75b454d8f 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/MultiHostingInteractiveSubmissionIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/MultiHostingInteractiveSubmissionIntegrationTest.scala @@ -21,14 +21,6 @@ import com.digitalasset.canton.integration.{ } import com.digitalasset.canton.logging.LogEntry import com.digitalasset.canton.topology.ExternalParty -import com.digitalasset.canton.topology.admin.grpc.TopologyStoreId -import com.digitalasset.canton.topology.transaction.{ - HostingParticipant, - ParticipantPermission, - PartyToParticipant, - TopologyChangeOp, - TopologyTransaction, -} import io.grpc.Status /** Test and demonstrates onboarding of a multi hosted external party @@ -39,10 +31,12 @@ sealed trait MultiHostingInteractiveSubmissionIntegrationTest with BaseInteractiveSubmissionTest with HasCycleUtils { + // Alice is onboarded as a multi hosted party at the beginning of the test suite and re-used in subsequent tests private var aliceE: ExternalParty = _ override protected def epn(implicit env: TestConsoleEnvironment): LocalParticipantReference = env.participant1 + private val cpns: ParticipantsSelector = env => Seq(env.participant1, env.participant2) private val opns: ParticipantsSelector = env => Seq(env.participant3) @@ -61,64 +55,39 @@ sealed trait MultiHostingInteractiveSubmissionIntegrationTest participants.all.synchronizers.connect_local(sequencer1, alias = daName) participants.all.dars.upload(CantonExamplesPath) - } - .addConfigTransforms(enableInteractiveSubmissionTransforms*) - "Interactive submission" should { - "host parties on multiple participants with a threshold" in { implicit env => - import env.* - val (onboardingTransactions, externalParty) = - participant1.parties.external - .onboarding_transactions( - "Alice", - additionalConfirming = Seq(participant2), - observing = Seq(participant3), - confirmationThreshold = PositiveInt.two, - ) - .futureValueUS - .value - - loadOnboardingTransactions( - externalParty, - confirming = participant1, - synchronizerId = daId, - onboardingTransactions, - extraConfirming = Seq(participant2), - observing = Seq(participant3), - ) + // Create a multi hosted party for this test suite + val (onboardingTransactions, externalParty) = + participant1.parties.external + .onboarding_transactions( + "Alice", + additionalConfirming = Seq(participant2), + observing = Seq(participant3), + confirmationThreshold = PositiveInt.two, + ) + .futureValueUS + .value - aliceE = externalParty + aliceE = externalParty - val newPTP = TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.two, - mapping = PartyToParticipant - .create( - aliceE.partyId, - threshold = PositiveInt.two, - Seq( - HostingParticipant(participant1, ParticipantPermission.Confirmation, false), - HostingParticipant(participant2, ParticipantPermission.Confirmation, false), - HostingParticipant(participant3, ParticipantPermission.Observation, false), - ), + Seq(participant1, participant2, participant3).map { hostingNode => + hostingNode.ledger_api.parties.allocate_external( + synchronizer1Id, + onboardingTransactions.transactionsWithSingleSignature, + multiSignatures = onboardingTransactions.multiTransactionSignatures, ) - .value, - protocolVersion = testedProtocolVersion, - ) + } - eventually() { - participants.all.forall( - _.topology.party_to_participant_mappings - .is_known( - daId, - aliceE, - hostingParticipants = participants.all, - threshold = Some(newPTP.mapping.threshold), - ) - ) shouldBe true + PartiesAdministration.Allocation.waitForPartyKnown( + partyId = externalParty.partyId, + hostingParticipant = participant1, + synchronizeParticipants = Seq(participant1, participant2, participant3), + synchronizerId = synchronizer1Id.logical, + ) } - } + .addConfigTransforms(enableInteractiveSubmissionTransforms*) + "Interactive submission" should { "create a contract and read it from all confirming and observing participants" in { implicit env => val contractId = @@ -137,46 +106,6 @@ sealed trait MultiHostingInteractiveSubmissionIntegrationTest events.distinct should have size 1 } - "allocate a party from one of their observing nodes" in { implicit env => - import env.* - - val (onboardingTransactions, externalParty) = generateExternalPartyOnboardingTransactions( - "Bob", - Seq(participant1.id), - observing = Seq(participant2), - ) - val partyId = externalParty.partyId - - participant2.ledger_api.parties.allocate_external( - synchronizer1Id, - onboardingTransactions.transactionsWithSingleSignature, - onboardingTransactions.multiTransactionSignatures, - ) - - val partyToParticipantProposal = eventually() { - participant1.topology.party_to_participant_mappings - .list( - synchronizer1Id, - proposals = true, - filterParty = partyId.toProtoPrimitive, - ) - .loneElement - } - val transactionHash = partyToParticipantProposal.context.transactionHash - participant1.topology.transactions.authorize[PartyToParticipant]( - transactionHash, - mustBeFullyAuthorized = false, - store = TopologyStoreId.Synchronizer(synchronizer1Id), - ) - - PartiesAdministration.Allocation.waitForPartyKnown( - partyId = externalParty.partyId, - hostingParticipant = participant1, - synchronizeParticipants = Seq(participant1, participant2), - synchronizerId = synchronizer1Id.logical, - ) - } - "fail if not enough confirming participants confirm" in { implicit env => import env.* // Stop one of the 2 CPNs - threshold is 2 so the transaction cannot be committed diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/TimeBasedInteractiveIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/TimeBasedInteractiveIntegrationTest.scala index afa48c9fdeff..52352456b353 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/TimeBasedInteractiveIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/submission/TimeBasedInteractiveIntegrationTest.scala @@ -10,7 +10,9 @@ import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.Updat import com.digitalasset.canton.admin.api.client.data.TemplateId.fromIdentifier import com.digitalasset.canton.damltests.java.cycle.Cycle import com.digitalasset.canton.damltests.java.statictimetest.Pass +import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.integration.plugins.UseProgrammableSequencer import com.digitalasset.canton.integration.tests.ledgerapi.submission.BaseInteractiveSubmissionTest.defaultConfirmingParticipant import com.digitalasset.canton.integration.util.UpdateFormatHelpers.getUpdateFormat import com.digitalasset.canton.integration.{ @@ -20,10 +22,18 @@ import com.digitalasset.canton.integration.{ HasCycleUtils, SharedEnvironment, } +import com.digitalasset.canton.logging.{LogEntry, SuppressionRule} +import com.digitalasset.canton.participant.protocol.TransactionProcessor.SubmissionErrors.TimeoutError +import com.digitalasset.canton.synchronizer.sequencer.{ + HasProgrammableSequencer, + SendDecision, + SendPolicy, +} import com.digitalasset.canton.topology.{ExternalParty, ForceFlags} import com.digitalasset.canton.{HasExecutionContext, config} import com.digitalasset.daml.lf.data.Time import io.grpc.Status +import org.slf4j.event.Level import scalapb.TimestampConverters import java.time.{Duration, Instant} @@ -37,7 +47,8 @@ final class TimeBasedInteractiveIntegrationTest with SharedEnvironment with BaseInteractiveSubmissionTest with HasCycleUtils - with HasExecutionContext { + with HasExecutionContext + with HasProgrammableSequencer { private val oneDay = Duration.ofHours(24) @@ -52,6 +63,8 @@ final class TimeBasedInteractiveIntegrationTest } .addConfigTransforms(enableInteractiveSubmissionTransforms*) + registerPlugin(new UseProgrammableSequencer(this.getClass.toString, loggerFactory)) + private var aliceE: ExternalParty = _ private def createPassCmd( @@ -171,6 +184,78 @@ final class TimeBasedInteractiveIntegrationTest execAndWait(prepared, signatures).discard } + "respect max record time" in { implicit env => + import env.* + val simClock = env.environment.simClock.value + + def test(sequenceAt: CantonTimestamp => CantonTimestamp, expectSuccess: Boolean): Unit = { + // Set max record time below ledgerTimeRecordTimeTolerance + val maxRecordTime = simClock.now.add(ledgerTimeRecordTimeTolerance.dividedBy(2)) + val prepared = + cpn.ledger_api.interactive_submission.prepare( + Seq(aliceE), + Seq(createCycleCommand(aliceE, "test")), + maxRecordTime = Some(maxRecordTime), + ) + + val signatures = Map( + aliceE.partyId -> global_secret.sign(prepared.preparedTransactionHash, aliceE) + ) + + getProgrammableSequencer(sequencer1.name).withSendPolicy( + "Delay sequencing of submission request", + SendPolicy.processTimeProofs { implicit traceContext => submissionRequest => + if (submissionRequest.isConfirmationRequest && submissionRequest.sender == epn.id) { + // When we receive the confirmation request, advance time to the desired sequencing time + simClock.advanceTo(sequenceAt(maxRecordTime)) + } + SendDecision.Process + }, + ) { + + // exec will pick LET = clock.now + // and max sequencing time + // = Min(LET + ledgerTimeRecordTimeTolerance, maxRecordTime) + // = Min(clock.now + ledgerTimeRecordTimeTolerance, clock.now + ledgerTimeRecordTimeTolerance / 2) + // = maxRecordTime + if (expectSuccess) { + execAndWait(prepared, signatures) + } else { + loggerFactory.assertEventuallyLogsSeq(SuppressionRule.LevelAndAbove(Level.WARN))( + { + val (submissionId, ledgerEnd) = exec(prepared, signatures, epn) + // Request a time proof to advance synchronizer time on the participant so it realizes + // that the request has timed out and emits a completion event + epn.underlying.value.sync + .lookupSynchronizerTimeTracker(synchronizer1Id) + .value + .requestTick(maxRecordTime.immediateSuccessor, immediately = true) + val completion = findCompletion(submissionId, ledgerEnd, aliceE, epn) + completion.status.value.code shouldBe io.grpc.Status.Code.ABORTED.value() + completion.status.value.message should include(TimeoutError.code.id) + () + }, + LogEntry.assertLogSeq( + Seq( + ( + _.warningMessage should include("Submission timed out"), + "expected submission timed out warning", + ) + ) + ), + ) + } + } + } + + // Expect success when the event goes just before the max record time + // Technically exactly at max record time is fine but because there's concurrent ticks going on, testing at exactly + // max sequencing time ends up not going through if a tick gets sequenced before + test(_.minusMillis(1), expectSuccess = true) + // Expect failure when the event goes through right after max record time + test(_.immediateSuccessor, expectSuccess = false) + } + "rejects execution requests outside the submission tolerance" in { implicit env => import env.* val simClock = env.environment.simClock.value diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadIntegrationTest.scala index 8f42117bc764..5e76c38bf17e 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/pkgdars/PackageUploadIntegrationTest.scala @@ -385,10 +385,7 @@ trait PackageUploadIntegrationTest LogEntryOptionality.Required, logEntry => logEntry.shouldBeOneOfCommandFailure( - Seq( - TopologyManagerError.SerialMismatch, - TopologyManagerError.ParticipantTopologyManagerError.DangerousVettingCommandsRequireForce, - ), + Seq(TopologyManagerError.SerialMismatch), path, ), ) diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/LogicalSynchronizerUpgradeTopologyIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/LogicalSynchronizerUpgradeTopologyIntegrationTest.scala index 3fd8b2c67f2b..e19ae9f93d4f 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/LogicalSynchronizerUpgradeTopologyIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/LogicalSynchronizerUpgradeTopologyIntegrationTest.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.integration.tests.topology import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.DbConfig -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.console.{CommandFailure, LocalParticipantReference} import com.digitalasset.canton.crypto.SigningKeyUsage import com.digitalasset.canton.data.CantonTimestamp @@ -21,40 +21,57 @@ import com.digitalasset.canton.integration.{ import com.digitalasset.canton.participant.store.SynchronizerConnectionConfigStore import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnections} +import com.digitalasset.canton.topology.TopologyManagerError.InvalidSynchronizerSuccessor import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllMappings import com.digitalasset.canton.topology.{ KnownPhysicalSynchronizerId, + PhysicalSynchronizerId, SequencerId, TopologyManagerError, UnknownPhysicalSynchronizerId, } import com.google.protobuf.ByteString import monocle.syntax.all.* +import org.scalatest.Assertion import java.net.URI +import java.util.concurrent.atomic.AtomicReference sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest extends CommunityIntegrationTest with SharedEnvironment { override def environmentDefinition: EnvironmentDefinition = - EnvironmentDefinition.P3_S2M2.addConfigTransform( - ConfigTransforms.updateAllParticipantConfigs_( - _.focus(_.parameters.automaticallyPerformLogicalSynchronizerUpgrade).replace(false) + EnvironmentDefinition.P3_S2M2 + .addConfigTransform( + ConfigTransforms.updateAllParticipantConfigs_( + _.focus(_.parameters.automaticallyPerformLogicalSynchronizerUpgrade).replace(false) + ) ) - ) + .withSetup { env => + latestSuccessorPSId.set(Some(env.daId)) + } - private def successorSynchronizerId(implicit env: TestConsoleEnvironment) = - env.daId.copy(serial = NonNegativeInt.one) + /* + PSId of the successor needs to be strictly increasing with different announcements. + This allows to track the latest used. + */ + private val latestSuccessorPSId = new AtomicReference[Option[PhysicalSynchronizerId]](None) + + private def allocateSuccessorPSId(): PhysicalSynchronizerId = + latestSuccessorPSId.updateAndGet { existing => + Some(existing.value.copy(serial = existing.value.serial.increment.toNonNegative)) + }.value private lazy val upgradeTime = CantonTimestamp.now().plusSeconds(3600) "migration announcement does not permit further topology transactions" in { implicit env => import env.* + val successorPSId = allocateSuccessorPSId() synchronizerOwners1.foreach { owner => owner.topology.synchronizer_upgrade.announcement.propose( - successorPhysicalSynchronizerId = successorSynchronizerId, + successorPhysicalSynchronizerId = successorPSId, upgradeTime = upgradeTime, ) } @@ -65,7 +82,7 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest loggerFactory.assertThrowsAndLogs[CommandFailure]( owner1.topology.namespace_delegations .propose_delegation(owner1.namespace, targetKey, CanSignAllMappings, daId), - _ shouldBeCantonErrorCode (TopologyManagerError.OngoingSynchronizerUpgrade), + _.shouldBeCantonErrorCode(TopologyManagerError.OngoingSynchronizerUpgrade), ) } @@ -73,7 +90,7 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest import env.* synchronizerOwners1.foreach( _.topology.synchronizer_upgrade.announcement.revoke( - successorPhysicalSynchronizerId = successorSynchronizerId, + successorPhysicalSynchronizerId = latestSuccessorPSId.get().value, upgradeTime = upgradeTime, ) ) @@ -103,9 +120,10 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest ) // announce the migration to prepare for the sequencer connection announcements + val successorPSId = allocateSuccessorPSId() synchronizerOwners1.foreach( _.topology.synchronizer_upgrade.announcement.propose( - successorPhysicalSynchronizerId = successorSynchronizerId, + successorPhysicalSynchronizerId = successorPSId, upgradeTime = upgradeTime, ) ) @@ -126,7 +144,7 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest .get(daName, UnknownPhysicalSynchronizerId) .toOption shouldBe None connectionConfigStore(participant2) - .get(daName, KnownPhysicalSynchronizerId(successorSynchronizerId)) + .get(daName, KnownPhysicalSynchronizerId(successorPSId)) .toOption shouldBe None // sequencer2 announces its connection details for the successor synchronizer @@ -178,7 +196,7 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest // unfrozen successor synchronizer synchronizerOwners1.foreach( _.topology.synchronizer_upgrade.announcement.revoke( - successorSynchronizerId, + latestSuccessorPSId.get().value, upgradeTime = upgradeTime, ) ) @@ -213,13 +231,61 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest } } + "successor PSId should increase between announcements" in { implicit env => + import env.* + + val successor1 = allocateSuccessorPSId() + val successor2 = allocateSuccessorPSId() + val successor3 = allocateSuccessorPSId() + + Seq(successor1, successor2).foreach { successor => + synchronizerOwners1.foreach { owner => + owner.topology.synchronizer_upgrade.announcement.propose( + successorPhysicalSynchronizerId = successor, + upgradeTime = upgradeTime, + ) + } + + synchronizerOwners1.foreach { owner => + owner.topology.synchronizer_upgrade.announcement.revoke( + successorPhysicalSynchronizerId = successor, + upgradeTime = upgradeTime, + ) + } + } + + // Re-using successor1 or successor2 should fail + Seq(successor1, successor2).foreach { successor => + loggerFactory.assertThrowsAndLogs[CommandFailure]( + sequencer1.topology.synchronizer_upgrade.announcement.propose( + successorPhysicalSynchronizerId = successor, + upgradeTime = upgradeTime, + ), + entry => { + entry shouldBeCantonErrorCode (InvalidSynchronizerSuccessor) + entry.errorMessage should include( + InvalidSynchronizerSuccessor.Reject + .conflictWithPreviousAnnouncement(successor, successor2) + .cause + ) + }, + ) + } + + // But successor3 should be fine + sequencer1.topology.synchronizer_upgrade.announcement.propose( + successorPhysicalSynchronizerId = successor3, + upgradeTime = upgradeTime, + ) + } + private def connectionConfigStore(participant: LocalParticipantReference) = participant.underlying.value.sync.synchronizerConnectionConfigStore private def checkUpgradedSequencerConfig( participant: LocalParticipantReference, expectedSequencerPorts: (SequencerId, Int)* - )(implicit env: TestConsoleEnvironment) = { + )(implicit env: TestConsoleEnvironment): Assertion = { import env.* val portMap = expectedSequencerPorts.groupBy(_._1).view.mapValues(_.map(_._2)).toMap eventually() { @@ -228,7 +294,7 @@ sealed trait LogicalSynchronizerUpgradeTopologyIntegrationTest configStore.get(daName, KnownPhysicalSynchronizerId(daId)).value currentConfig.status shouldBe SynchronizerConnectionConfigStore.Active val successorConfig = - configStore.get(daName, KnownPhysicalSynchronizerId(successorSynchronizerId)).value + configStore.get(daName, KnownPhysicalSynchronizerId(latestSuccessorPSId.get().value)).value successorConfig.status shouldBe SynchronizerConnectionConfigStore.UpgradingTarget val currentSequencers = currentConfig.config.sequencerConnections.aliasToConnection.map { diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyAdministrationIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyAdministrationIntegrationTest.scala index ed4f8cb6606c..239d13d0bb6c 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyAdministrationIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/topology/TopologyAdministrationIntegrationTest.scala @@ -85,9 +85,12 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv certs.head } + val expectedFeatureFlags = Seq.empty + val expectedTrustCert1 = SynchronizerTrustCertificate( participant1.id, daId, + featureFlags = expectedFeatureFlags, ) trustCert1.context.serial shouldBe PositiveInt.one @@ -357,7 +360,6 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv participant1.id, store = daId, packages = packageIds, - force = ForceFlags(ForceFlag.AllowUnvetPackage), operation = TopologyChangeOp.Remove, ) val result = participant1.topology.vetted_packages @@ -381,7 +383,6 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv participant1.id, store = daId, packages = Seq.empty, - force = ForceFlag.AllowUnvetPackage, ) val packageIds4 = participant1.topology.vetted_packages .list(store = daId, filterParticipant = participant1.filterString) @@ -442,7 +443,7 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv participant1.id, store = daId, removes = startingPackages, - force = ForceFlags(ForceFlag.AllowUnvetPackage, ForceFlag.AllowUnvettedDependencies), + force = ForceFlags(ForceFlag.AllowUnvettedDependencies), ) val removedPackagesResult = getVettedPackages() @@ -459,7 +460,6 @@ trait TopologyAdministrationTest extends CommunityIntegrationTest with SharedEnv store = daId, adds = VettedPackage.unbounded(startingPackages), removes = startingPackages, - force = ForceFlags(ForceFlag.AllowUnvetPackage), ) ) .getMessage should include("Cannot both add and remove a packageId: ") diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/LogicalUpgradeUtils.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/LogicalUpgradeUtils.scala index 8868d1576a3e..f684bf2011cc 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/LogicalUpgradeUtils.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/LogicalUpgradeUtils.scala @@ -211,9 +211,7 @@ private[upgrade] object LogicalUpgradeUtils { genesisStateFile: File, ) { def uid: UniqueIdentifier = - UniqueIdentifier.tryFromProtoPrimitive( - uidFile.contentAsString - ) + UniqueIdentifier.tryFromProtoPrimitive(uidFile.contentAsString) def keys: Seq[(ByteString, Option[String])] = keyFiles.map { file => diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUBase.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUBase.scala index eda63ede11f2..34782dbdb632 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUBase.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUBase.scala @@ -4,7 +4,9 @@ package com.digitalasset.canton.integration.tests.upgrade.lsu import com.digitalasset.canton.admin.api.client.data.StaticSynchronizerParameters -import com.digitalasset.canton.config.{NonNegativeFiniteDuration, RequireTypes} +import com.digitalasset.canton.config +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.console.InstanceReference import com.digitalasset.canton.data.{CantonTimestamp, SynchronizerSuccessor} import com.digitalasset.canton.integration.* import com.digitalasset.canton.integration.plugins.UsePostgres @@ -13,8 +15,7 @@ import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.Syn import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.integration.util.EntitySyntax import com.digitalasset.canton.topology.PhysicalSynchronizerId -import com.digitalasset.canton.version.ProtocolVersion.ProtocolVersionWithStatus -import com.digitalasset.canton.version.{ProtocolVersion, ProtocolVersionAnnotation} +import com.digitalasset.canton.version.ProtocolVersion import monocle.macros.syntax.lens.* /** This trait provides helpers for the logical synchronizer upgrade tests. The main goal is to @@ -33,7 +34,8 @@ trait LSUBase protected var newSynchronizerNodes: SynchronizerNodes = _ protected def newOldSequencers: Map[String, String] protected def newOldMediators: Map[String, String] - protected val newOldNodesResolution: Map[String, String] = newOldSequencers ++ newOldMediators + protected def newOldNodesResolution: Map[String, String] = + newOldSequencers ++ newOldMediators protected def upgradeTime: CantonTimestamp @@ -50,6 +52,24 @@ trait LSUBase ConfigTransforms.useStaticTime, ) + protected def fixtureWithDefaults(upgradeTime: CantonTimestamp = upgradeTime)(implicit + env: TestConsoleEnvironment + ): Fixture = { + val currentPSId = env.daId + + Fixture( + currentPSId = currentPSId, + upgradeTime = upgradeTime, + oldSynchronizerNodes = oldSynchronizerNodes, + newSynchronizerNodes = newSynchronizerNodes, + newOldNodesResolution = newOldNodesResolution, + oldSynchronizerOwners = env.synchronizerOwners1, + newPV = ProtocolVersion.dev, + // increasing the serial as well, so that the test also works when running with PV=dev + newSerial = currentPSId.serial.increment.toNonNegative, + ) + } + /** Perform synchronizer side of the LSU: * * - Upgrade announcement @@ -58,16 +78,14 @@ trait LSUBase */ protected def performSynchronizerNodesLSU( fixture: Fixture - )(implicit env: TestConsoleEnvironment): Unit = { - import env.* - - synchronizerOwners1.foreach( - _.topology.synchronizer_upgrade.announcement.propose(fixture.newPSId, upgradeTime) + ): Unit = { + fixture.oldSynchronizerOwners.foreach( + _.topology.synchronizer_upgrade.announcement.propose(fixture.newPSId, fixture.upgradeTime) ) migrateSynchronizerNodes(fixture) - oldSynchronizerNodes.sequencers.zip(newSynchronizerNodes.sequencers).foreach { + fixture.oldSynchronizerNodes.sequencers.zip(fixture.newSynchronizerNodes.sequencers).foreach { case (oldSequencer, newSequencer) => oldSequencer.topology.synchronizer_upgrade.sequencer_successors.propose_successor( sequencerId = oldSequencer.id, @@ -85,37 +103,41 @@ trait LSUBase ): Unit = { exportNodesData( SynchronizerNodes( - sequencers = oldSynchronizerNodes.sequencers, - mediators = oldSynchronizerNodes.mediators, + sequencers = fixture.oldSynchronizerNodes.sequencers, + mediators = fixture.oldSynchronizerNodes.mediators, ) ) // Migrate nodes preserving their data (and IDs) - newSynchronizerNodes.all.foreach { newNode => + fixture.newSynchronizerNodes.all.foreach { newNode => migrateNode( migratedNode = newNode, newStaticSynchronizerParameters = fixture.newStaticSynchronizerParameters, synchronizerId = fixture.currentPSId, - newSequencers = newSynchronizerNodes.sequencers, + newSequencers = fixture.newSynchronizerNodes.sequencers, exportDirectory = baseExportDirectory, - sourceNodeNames = newOldNodesResolution, + sourceNodeNames = fixture.newOldNodesResolution, ) } } } private[lsu] object LSUBase { - final case class Fixture(currentPSId: PhysicalSynchronizerId, upgradeTime: CantonTimestamp) { - val newPV: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Alpha] = ProtocolVersion.dev - - // increasing the serial as well, so that the test also works when running with PV=dev - val newSerial: RequireTypes.NonNegativeNumeric[Int] = currentPSId.serial.increment.toNonNegative - + final case class Fixture( + currentPSId: PhysicalSynchronizerId, + upgradeTime: CantonTimestamp, + oldSynchronizerNodes: SynchronizerNodes, + newSynchronizerNodes: SynchronizerNodes, + newOldNodesResolution: Map[String, String], + oldSynchronizerOwners: Set[InstanceReference], + newPV: ProtocolVersion, + newSerial: NonNegativeInt, + ) { val newStaticSynchronizerParameters: StaticSynchronizerParameters = StaticSynchronizerParameters.defaultsWithoutKMS( newPV, newSerial, - topologyChangeDelay = NonNegativeFiniteDuration.Zero, + topologyChangeDelay = config.NonNegativeFiniteDuration.Zero, ) val newPSId: PhysicalSynchronizerId = diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUCancellationIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUCancellationIntegrationTest.scala new file mode 100644 index 000000000000..a5078f66c08b --- /dev/null +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUCancellationIntegrationTest.scala @@ -0,0 +1,296 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.integration.tests.upgrade.lsu + +import com.digitalasset.canton.admin.api.client.data.DynamicSynchronizerParameters as ConsoleDynamicSynchronizerParameters +import com.digitalasset.canton.config +import com.digitalasset.canton.config.{DbConfig, SynchronizerTimeTrackerConfig} +import com.digitalasset.canton.console.CommandFailure +import com.digitalasset.canton.data.{CantonTimestamp, SynchronizerSuccessor} +import com.digitalasset.canton.discard.Implicits.* +import com.digitalasset.canton.integration.* +import com.digitalasset.canton.integration.EnvironmentDefinition.S1M1 +import com.digitalasset.canton.integration.bootstrap.NetworkBootstrapper +import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.MultiSynchronizer +import com.digitalasset.canton.integration.plugins.{ + UseBftSequencer, + UsePostgres, + UseReferenceBlockSequencer, +} +import com.digitalasset.canton.integration.tests.examples.IouSyntax +import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes +import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture +import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig +import com.digitalasset.canton.sequencing.SequencerConnections +import com.digitalasset.canton.topology.{PartyId, TopologyManagerError} +import com.digitalasset.canton.version.ProtocolVersion +import monocle.macros.syntax.lens.* + +import scala.annotation.nowarn + +/** The goal is to ensure that an LSU can be cancelled and that another LSU can be done + * subsequently. + * + * Test setup: + * + * - LSU is announced + * - Before the upgrade time, it is cancelled + * - Another LSU is announced + * - Second LSU is performed + */ +@nowarn("msg=dead code") +abstract class LSUCancellationIntegrationTest extends LSUBase { + + override protected def testName: String = "logical-synchronizer-upgrade" + + registerPlugin(new UsePostgres(loggerFactory)) + + override protected lazy val newOldSequencers: Map[String, String] = + throw new IllegalAccessException("Use fixtures instead") + override protected lazy val newOldMediators: Map[String, String] = + throw new IllegalAccessException("Use fixtures instead") + + private lazy val upgradeTime1: CantonTimestamp = CantonTimestamp.Epoch.plusSeconds(30) + private lazy val upgradeTime2: CantonTimestamp = CantonTimestamp.Epoch.plusSeconds(90) + + private var fixture1: Fixture = _ + private var fixture2: Fixture = _ + + private var bob: PartyId = _ + + private var dynamicSynchronizerParameters: ConsoleDynamicSynchronizerParameters = _ + + override protected lazy val upgradeTime: CantonTimestamp = throw new IllegalAccessException( + "Use upgradeTime1 and upgradeTime2 instead" + ) + + override protected def configTransforms: List[ConfigTransform] = { + val lowerBound1 = List("sequencer2") // successor of sequencer1 for the first upgrade + .map(sequencerName => + ConfigTransforms + .updateSequencerConfig(sequencerName)( + _.focus(_.parameters.sequencingTimeLowerBoundExclusive).replace(Some(upgradeTime1)) + ) + ) + + val lowerBound2 = List("sequencer3") // successor of sequencer1 for the second upgrade + .map(sequencerName => + ConfigTransforms + .updateSequencerConfig(sequencerName)( + _.focus(_.parameters.sequencingTimeLowerBoundExclusive).replace(Some(upgradeTime2)) + ) + ) + + val allNewNodes = Set("sequencer2", "sequencer3", "mediator2", "mediator3") + + lowerBound1 ++ lowerBound2 ++ List( + ConfigTransforms.disableAutoInit(allNewNodes), + ConfigTransforms.useStaticTime, + ) + } + + override lazy val environmentDefinition: EnvironmentDefinition = + EnvironmentDefinition + .buildBaseEnvironmentDefinition( + numParticipants = 1, + numSequencers = 3, + numMediators = 3, + ) + /* + The test is made slightly more robust by controlling explicitly which nodes are running. + This allows to ensure that correct synchronizer nodes are used for each LSU. + */ + .withManualStart + .withNetworkBootstrap { implicit env => + new NetworkBootstrapper(S1M1) + } + .addConfigTransforms(configTransforms*) + .withSetup { implicit env => + import env.* + + val daSequencerConnection = + SequencerConnections.single(sequencer1.sequencerConnection.withAlias(daName.toString)) + + participants.local.start() + + participants.all.synchronizers.connect( + SynchronizerConnectionConfig( + synchronizerAlias = daName, + sequencerConnections = daSequencerConnection, + timeTracker = SynchronizerTimeTrackerConfig(observationLatency = + config.NonNegativeFiniteDuration.Zero + ), + ) + ) + + participants.all.dars.upload(CantonExamplesPath) + participant1.health.ping(participant1) + + synchronizerOwners1.foreach( + _.topology.synchronizer_parameters.propose_update( + daId, + _.copy(reconciliationInterval = config.PositiveDurationSeconds.ofSeconds(1)), + ) + ) + } + + /** Check whether an LSU is ongoing + * @param successor + * Defined iff an upgrade is ongoing + */ + private def checkLSUOngoing( + successor: Option[SynchronizerSuccessor] + )(implicit env: TestConsoleEnvironment) = { + import env.* + + val connectedSynchronizer = participant1.underlying.value.sync + .connectedSynchronizerForAlias(daName) + .value + + connectedSynchronizer.ephemeral.recordOrderPublisher.getSynchronizerSuccessor shouldBe successor + + connectedSynchronizer.synchronizerCrypto.currentSnapshotApproximation.ipsSnapshot + .synchronizerUpgradeOngoing() + .futureValueUS + .map { case (successor, _) => successor } shouldBe successor + } + + "Logical synchronizer upgrade should be cancellable and re-announced" should { + "initial setup" in { implicit env => + import env.* + + fixture1 = Fixture( + currentPSId = daId, + upgradeTime = upgradeTime1, + oldSynchronizerNodes = SynchronizerNodes(Seq(sequencer1), Seq(mediator1)), + newSynchronizerNodes = SynchronizerNodes(Seq(sequencer2), Seq(mediator2)), + newOldNodesResolution = Map("sequencer2" -> "sequencer1", "mediator2" -> "mediator1"), + oldSynchronizerOwners = synchronizerOwners1, + newPV = ProtocolVersion.dev, + newSerial = daId.serial.increment.toNonNegative, + ) + + fixture2 = Fixture( + currentPSId = daId, + upgradeTime = upgradeTime2, + oldSynchronizerNodes = SynchronizerNodes(Seq(sequencer1), Seq(mediator1)), + newSynchronizerNodes = SynchronizerNodes(Seq(sequencer3), Seq(mediator3)), + newOldNodesResolution = Map("sequencer3" -> "sequencer1", "mediator3" -> "mediator1"), + oldSynchronizerOwners = synchronizerOwners1, + newPV = ProtocolVersion.dev, + newSerial = fixture1.newSerial.increment.toNonNegative, + ) + + dynamicSynchronizerParameters = participant1.topology.synchronizer_parameters.latest(daId) + + // Some assertions below don't make sense if the value is too low + dynamicSynchronizerParameters.decisionTimeout.asJava.getSeconds should be > 10L + + daId should not be fixture1.newPSId + fixture1.newPSId should not be fixture2.newPSId + + val alice = participant1.parties.enable("Alice") + val bank = participant1.parties.enable("Bank") + IouSyntax.createIou(participant1)(bank, alice).discard + } + + "first LSU and cancellation" in { implicit env => + import env.* + + val clock = environment.simClock.value + + sequencer2.start() + mediator2.start() + + performSynchronizerNodesLSU(fixture1) + + eventually()(checkLSUOngoing(Some(fixture1.synchronizerSuccessor))) + + // Fails because the upgrade is ongoing + loggerFactory.assertThrowsAndLogs[CommandFailure]( + participant1.parties.enable("Bob"), + _.shouldBeCantonErrorCode(TopologyManagerError.OngoingSynchronizerUpgrade), + ) + + clock.advanceTo(upgradeTime1.minusSeconds(5)) + + fixture1.oldSynchronizerOwners.foreach( + _.topology.synchronizer_upgrade.announcement.revoke(fixture1.newPSId, fixture1.upgradeTime) + ) + + eventually()(checkLSUOngoing(None)) + + sequencer2.stop() + mediator2.stop() + + clock.advanceTo(upgradeTime1.immediateSuccessor) + + // Time offset on the old sequencer is not applied + sequencer1.underlying.value.sequencer.timeTracker + .fetchTime() + .futureValueUS should be < upgradeTime1.plus( + dynamicSynchronizerParameters.decisionTimeout.asJava + ) + + // Call should fail if no upgrade is ongoing + eventually() { + participant1.underlying.value.sync + .upgradeSynchronizerTo(daId, fixture1.synchronizerSuccessor) + .value + .futureValueUS + .left + .value shouldBe "No synchronizer upgrade ongoing" + } + + bob = participant1.parties.enable("Bob") + } + + "second LSU" in { implicit env => + import env.* + + val clock = environment.simClock.value + + sequencer3.start() + mediator3.start() + + performSynchronizerNodesLSU(fixture2) + + clock.advanceTo(upgradeTime2.immediateSuccessor) + + eventually() { + participants.all.forall(_.synchronizers.is_connected(fixture2.newPSId)) shouldBe true + } + + // Time offset is applied on the old sequencer + sequencer1.underlying.value.sequencer.timeTracker + .fetchTime() + .futureValueUS should be >= upgradeTime2.plus( + dynamicSynchronizerParameters.decisionTimeout.asJava + ) + + // Bob is known + participant1.topology.party_to_participant_mappings + .list(fixture2.newPSId, filterParty = bob.filterString) + .loneElement + } + } +} + +final class LSUCancellationReferenceIntegrationTest extends LSUCancellationIntegrationTest { + registerPlugin( + new UseReferenceBlockSequencer[DbConfig.Postgres]( + loggerFactory, + MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2"), Set("sequencer3")), + ) + ) +} + +final class LSUCancellationBftOrderingIntegrationTest extends LSUCancellationIntegrationTest { + registerPlugin( + new UseBftSequencer( + loggerFactory, + MultiSynchronizer.tryCreate(Set("sequencer1"), Set("sequencer2"), Set("sequencer3")), + ) + ) +} diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUEndToEndIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUEndToEndIntegrationTest.scala index e6c2335c9b24..4d6781330dbe 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUEndToEndIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUEndToEndIntegrationTest.scala @@ -18,7 +18,6 @@ import com.digitalasset.canton.integration.plugins.{ } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.SequencerConnections @@ -78,7 +77,7 @@ abstract class LSUEndToEndIntegrationTest extends LSUBase { "work end-to-end" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() participant1.health.ping(participant2) diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUExternalPartiesTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUExternalPartiesTest.scala index 5ba29b0e3a59..bb168aeda13a 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUExternalPartiesTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUExternalPartiesTest.scala @@ -15,7 +15,6 @@ import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.Mu import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.SequencerConnections @@ -72,7 +71,7 @@ abstract class LSUExternalPartiesIntegrationTest extends LSUBase { "work with external parties" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() val alice = participant1.parties.external.enable("AliceE") val bob = participant2.parties.enable("Bob") diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUPruningIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUPruningIntegrationTest.scala index 137162f1a8fe..c6131528de9a 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUPruningIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUPruningIntegrationTest.scala @@ -23,7 +23,6 @@ import com.digitalasset.canton.integration.plugins.{ } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.sequencing.SequencerConnections import monocle.macros.syntax.lens.* @@ -94,7 +93,7 @@ abstract class LSUPruningIntegrationTest extends LSUBase { "work correctly" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() participant1.health.ping(participant2) diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUReassignmentsIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUReassignmentsIntegrationTest.scala index 86fd7e60544a..2c4cdcd727c1 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUReassignmentsIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUReassignmentsIntegrationTest.scala @@ -13,7 +13,6 @@ import com.digitalasset.canton.integration.plugins.UseReferenceBlockSequencer.Mu import com.digitalasset.canton.integration.plugins.{UsePostgres, UseReferenceBlockSequencer} import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.protocol.LfContractId import com.digitalasset.canton.sequencing.SequencerConnections @@ -80,7 +79,7 @@ abstract class LSUReassignmentsIntegrationTest extends LSUBase { "work with reassignments" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() val alice = participant1.parties.enable("Alice", synchronizer = Some(daName)) participant1.parties.enable("Alice", synchronizer = Some(acmeName)) diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSURestartIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSURestartIntegrationTest.scala index 6f1dc3b59e04..455e52608ece 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSURestartIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSURestartIntegrationTest.scala @@ -16,7 +16,6 @@ import com.digitalasset.canton.integration.plugins.{ UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import monocle.macros.syntax.lens.* /* @@ -75,7 +74,7 @@ abstract class LSURestartIntegrationTest extends LSUBase { "work when participants are restarted" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() participant1.health.ping(participant1) diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTimeoutInflightIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTimeoutInflightIntegrationTest.scala index 9670e7102cde..66f6f1455927 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTimeoutInflightIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTimeoutInflightIntegrationTest.scala @@ -18,7 +18,6 @@ import com.digitalasset.canton.integration.plugins.{ } import com.digitalasset.canton.integration.tests.examples.IouSyntax import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.logging.LogEntry import com.digitalasset.canton.participant.protocol.TransactionProcessor.SubmissionErrors import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig @@ -106,7 +105,7 @@ abstract class LSUTimeoutInFlightIntegrationTest extends LSUBase with HasProgram "be timed out around LSU" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() val alice = participant1.parties.enable("alice") val bob = participant2.parties.enable("bob") diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTopologyIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTopologyIntegrationTest.scala index d3cd5953bd66..77d5b06d512c 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTopologyIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/LSUTopologyIntegrationTest.scala @@ -17,7 +17,6 @@ import com.digitalasset.canton.integration.plugins.{ UseReferenceBlockSequencer, } import com.digitalasset.canton.integration.tests.upgrade.LogicalUpgradeUtils.SynchronizerNodes -import com.digitalasset.canton.integration.tests.upgrade.lsu.LSUBase.Fixture import com.digitalasset.canton.topology.TopologyManagerError import com.digitalasset.canton.topology.store.TimeQuery import com.digitalasset.canton.topology.transaction.TopologyMapping @@ -89,7 +88,7 @@ abstract class LSUTopologyIntegrationTest extends LSUBase { "work end-to-end" in { implicit env => import env.* - val fixture = Fixture(daId, upgradeTime) + val fixture = fixtureWithDefaults() val newPSId = fixture.newPSId val newStaticSynchronizerParameters = fixture.newStaticSynchronizerParameters diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/UpgradeTimeOldSynchronizerIntegrationTest.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/UpgradeTimeOldSynchronizerIntegrationTest.scala index 63a38323a13a..006221fdcee7 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/UpgradeTimeOldSynchronizerIntegrationTest.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/integration/tests/upgrade/lsu/UpgradeTimeOldSynchronizerIntegrationTest.scala @@ -64,10 +64,9 @@ class UpgradeTimeOldSynchronizerIntegrationTest participant1.synchronizers.connect_local(sequencer1, daName) - synchronizerOwners1.foreach { owner => - owner.topology.synchronizer_upgrade.announcement - .propose(successorPSId, upgradeTime) - } + synchronizerOwners1.foreach( + _.topology.synchronizer_upgrade.announcement.propose(successorPSId, upgradeTime) + ) eventually() { participant1.topology.synchronizer_upgrade.announcement @@ -163,9 +162,13 @@ class UpgradeTimeOldSynchronizerIntegrationTest logger.debug("Ping failed") participant1.testing.fetch_synchronizer_times() - // TODO(#26580): Test also cancelling and updating the upgrade announcement, e.g.: - // - cancel the upgrade announcement and check that the time is not offset (or that offsetting got removed and this was logged) - // - update the upgrade announcement to a later time and check that the time offsetting is updated accordingly + val dynamicSynchronizerParameters = participant1.topology.synchronizer_parameters.latest(daId) + + sequencer1.underlying.value.sequencer.timeTracker + .fetchTime() + .futureValueUS should be >= upgradeTime.plus( + dynamicSynchronizerParameters.decisionTimeout.asJava + ) val cleanSynchronizerIndex = participant1.underlying.value.sync.stateInspection .getAcsInspection(daId) diff --git a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/util/SetupPackageVetting.scala b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/util/SetupPackageVetting.scala index b4894c238572..57f1809d49a0 100644 --- a/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/util/SetupPackageVetting.scala +++ b/sdk/canton/community/app/src/test/scala/com/digitalasset/canton/util/SetupPackageVetting.scala @@ -194,7 +194,6 @@ class SetupPackageVetting( object SetupPackageVetting { val AllUnvettingFlags: ForceFlags = ForceFlags( - ForceFlag.AllowUnvetPackage, ForceFlag.AllowUnvettedDependencies, ForceFlag.AllowUnvetPackageWithActiveContracts, ) diff --git a/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto b/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto index 60532f44cc48..d097d1572c0f 100644 --- a/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto +++ b/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto @@ -60,6 +60,13 @@ message Enums { TOPOLOGY_MAPPING_CODE_SYNCHRONIZER_MIGRATION_ANNOUNCEMENT = 19; TOPOLOGY_MAPPING_CODE_SEQUENCER_CONNECTION_SUCCESSOR = 20; } + + enum ParticipantFeatureFlag { + PARTICIPANT_FEATURE_FLAG_UNSPECIFIED = 0; + // UNUSED in PV >= 34 - Was meant to tactically fix a bug in the external signing hash computation + // in model conformance in PV 33 + PARTICIPANT_FEATURE_FLAG_PV33_EXTERNAL_SIGNING_LOCAL_CONTRACT_IN_SUBVIEW = 1; + } } // [start NamespaceDelegation definition] @@ -168,6 +175,9 @@ message SynchronizerTrustCertificate { reserved 3; // was bool reassignment_only_to_given_target_synchronizer_ids = 3; reserved 4; // was repeated string target_synchronizer_ids = 4; + + // Feature flags that this node declares to support on this synchronizer + repeated Enums.ParticipantFeatureFlag feature_flags = 5; } // the optional trust certificate of the synchronizer towards the participant diff --git a/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_write_service.proto b/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_write_service.proto index c88076d801ac..70060695a893 100644 --- a/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_write_service.proto +++ b/sdk/canton/community/base/src/main/protobuf/com/digitalasset/canton/topology/admin/v30/topology_manager_write_service.proto @@ -232,8 +232,9 @@ enum ForceFlag { FORCE_FLAG_ALIEN_MEMBER = 1; /* Deprecated, increasing ledger time record time tolerance does not require a force flag for PV >= 32 */ FORCE_FLAG_LEDGER_TIME_RECORD_TIME_TOLERANCE_INCREASE = 2; - /** Required when revoking the vetting of a package */ - FORCE_FLAG_ALLOW_UNVET_PACKAGE = 3; + // Previously FORCE_FLAG_ALLOW_UNVET_PACKAGE, now always enabled as it is not dangerous anymore + reserved 3; + reserved "FORCE_FLAG_ALLOW_UNVET_PACKAGE"; /** Required when vetting unknown packages (not uploaded). */ FORCE_FLAG_ALLOW_UNKNOWN_PACKAGE = 4; /** Required when vetting a package with unvetted dependencies */ diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/InteractiveSubmission.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/InteractiveSubmission.scala index dec0f75c2faa..d7de14e64d48 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/InteractiveSubmission.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/InteractiveSubmission.scala @@ -288,7 +288,8 @@ object InteractiveSubmission { _ <- EitherT.cond[FutureUnlessShutdown]( validSignaturesSet.sizeIs >= authInfo.threshold.unwrap, (), - s"Received ${validSignatures.size} valid signatures (${invalidSignatures.size} invalid), but expected at least ${authInfo.threshold} valid for $party", + s"Received ${validSignatures.size} valid signatures (${invalidSignatures.size} invalid), but expected at least ${authInfo.threshold} valid for $party. " + + s"Transaction hash to be signed: ${hash.toHexString}. Ensure the correct transaction hash is signed with the correct key(s).", ) } yield { logger.debug( diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/data/OnboardingTransactions.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/data/OnboardingTransactions.scala index 95ab2d628f5e..a5b1efa7e1fb 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/data/OnboardingTransactions.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/data/OnboardingTransactions.scala @@ -9,12 +9,12 @@ import com.digitalasset.canton.topology.transaction.* /** Onboarding transactions for an external party */ final case class OnboardingTransactions( - namespaceDelegation: SignedTopologyTransaction[TopologyChangeOp.Replace, NamespaceDelegation], + namespace: SignedTopologyTransaction[TopologyChangeOp.Replace, TopologyMapping], partyToParticipant: SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant], partyToKeyMapping: SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToKeyMapping], ) { def toSeq: Seq[SignedTopologyTransaction[TopologyChangeOp.Replace, TopologyMapping]] = - Seq(namespaceDelegation, partyToParticipant, partyToKeyMapping) + Seq(namespace, partyToParticipant, partyToKeyMapping) def transactionsWithSingleSignature : Seq[(TopologyTransaction[TopologyChangeOp.Replace, TopologyMapping], Seq[Signature])] = diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractInstance.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractInstance.scala index c63eda73d2cc..0770945d6f17 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractInstance.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/ContractInstance.scala @@ -49,7 +49,8 @@ sealed trait GenContractInstance extends PrettyPrinting { } object ContractInstance { - private final case class ContractInstanceImpl[Time <: CreationTime]( + // TODO(#28382) revert removal of private access modifier + final case class ContractInstanceImpl[Time <: CreationTime]( override val inst: FatContractInstance { type CreatedAtTime = Time }, override val metadata: ContractMetadata, override val serialization: ByteString, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/ConnectionX.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/ConnectionX.scala index a06b84504e1d..8ffebe214b53 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/ConnectionX.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/ConnectionX.scala @@ -69,7 +69,15 @@ object ConnectionX { customTrustCertificates: Option[ByteString], expectedSequencerIdO: Option[SequencerId], tracePropagation: TracingConfig.Propagation, - ) + ) extends PrettyPrinting { + override protected def pretty: Pretty[ConnectionXConfig] = prettyOfClass( + param("name", _.name.singleQuoted), + param("endpoint", _.endpoint.toURI(transportSecurity)), + param("transportSecurity", _.transportSecurity), + param("customTrustCertificates", _.customTrustCertificates.nonEmpty), + paramIfDefined("expectedSequencerId", _.expectedSequencerIdO), + ) + } class ConnectionXHealth( override val name: String, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPool.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPool.scala index 2662f573dcce..b9cf5839bda6 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPool.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPool.scala @@ -16,6 +16,7 @@ import com.digitalasset.canton.lifecycle.{ HasRunOnClosing, OnShutdownRunner, } +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLogging, TracedLogger} import com.digitalasset.canton.networking.Endpoint import com.digitalasset.canton.protocol.StaticSynchronizerParameters @@ -170,10 +171,18 @@ object SequencerConnectionXPool { minRestartConnectionDelay: config.NonNegativeFiniteDuration, maxRestartConnectionDelay: config.NonNegativeFiniteDuration, expectedPSIdO: Option[PhysicalSynchronizerId] = None, - ) { + ) extends PrettyPrinting { // TODO(i24780): when persisting, use com.digitalasset.canton.version.Invariant machinery for validation import SequencerConnectionXPoolConfig.* + override protected def pretty: Pretty[SequencerConnectionXPoolConfig] = prettyOfClass( + param("connections", _.connections), + param("trustThreshold", _.trustThreshold), + param("minRestartConnectionDelay", _.minRestartConnectionDelay), + param("maxRestartConnectionDelay", _.maxRestartConnectionDelay), + paramIfDefined("expectedPSIdO", _.expectedPSIdO), + ) + def validate: Either[SequencerConnectionXPoolError, Unit] = { val (names, endpoints) = connections.map(conn => conn.name -> conn.endpoint).unzip @@ -224,7 +233,12 @@ object SequencerConnectionXPool { private[sequencing] final case class ChangedConnections( added: Set[ConnectionXConfig], removed: Set[ConnectionXConfig], - ) + ) extends PrettyPrinting { + override protected def pretty: Pretty[ChangedConnections] = prettyOfClass( + param("added", _.added), + param("removed", _.removed), + ) + } /** Create a sequencer connection pool configuration from the existing format. * @@ -313,7 +327,8 @@ trait SequencerConnectionXPoolFactory { import SequencerConnectionXPool.{SequencerConnectionXPoolConfig, SequencerConnectionXPoolError} def create( - initialConfig: SequencerConnectionXPoolConfig + initialConfig: SequencerConnectionXPoolConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, @@ -325,6 +340,7 @@ trait SequencerConnectionXPoolFactory { sequencerConnections: SequencerConnections, expectedPSIdO: Option[PhysicalSynchronizerId], tracingConfig: TracingConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImpl.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImpl.scala index 1041a1fd9c81..ba8103a1b297 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImpl.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnectionXPoolImpl.scala @@ -377,10 +377,14 @@ class SequencerConnectionXPoolImpl private[sequencing] ( } } yield { configRef.set(newConfig) + logger.info(s"Configuration updated to: $newConfig") // If the trust threshold is now reached, process it bootstrapIfThresholdReachedO.foreach(initializePool) + logger.debug( + s"Configuration update triggers the following connection changes: $changedConnections" + ) updateTrackedConnections( toBeAdded = changedConnections.added, toBeRemoved = changedConnections.removed, @@ -725,18 +729,21 @@ class GrpcSequencerConnectionXPoolFactory( import SequencerConnectionXPool.{SequencerConnectionXPoolConfig, SequencerConnectionXPoolError} override def create( - initialConfig: SequencerConnectionXPoolConfig + initialConfig: SequencerConnectionXPoolConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, materializer: Materializer, ): Either[SequencerConnectionXPoolError, SequencerConnectionXPool] = { + val loggerWithPoolName = loggerFactory.append("pool", name) + val connectionFactory = new GrpcInternalSequencerConnectionXFactory( clientProtocolVersions, minimumProtocolVersion, futureSupervisor, timeouts, - loggerFactory, + loggerWithPoolName, ) for { @@ -752,7 +759,7 @@ class GrpcSequencerConnectionXPoolFactory( seedForRandomnessO, futureSupervisor, timeouts, - loggerFactory, + loggerWithPoolName, ) } } @@ -761,6 +768,7 @@ class GrpcSequencerConnectionXPoolFactory( sequencerConnections: SequencerConnections, expectedPSIdO: Option[PhysicalSynchronizerId], tracingConfig: TracingConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, @@ -774,6 +782,6 @@ class GrpcSequencerConnectionXPoolFactory( ) logger.debug(s"poolConfig = $poolConfig") - create(poolConfig) + create(poolConfig, name) } } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPool.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPool.scala index 74869d39bc39..d96cec6ea66e 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPool.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPool.scala @@ -4,12 +4,14 @@ package com.digitalasset.canton.sequencing import com.digitalasset.canton.config -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.health.{AtomicHealthComponent, ComponentHealthState} import com.digitalasset.canton.lifecycle.{FlagCloseable, HasRunOnClosing} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLogging, TracedLogger} import com.digitalasset.canton.sequencing.SequencerSubscriptionPool.SequencerSubscriptionPoolConfig import com.digitalasset.canton.sequencing.SequencerSubscriptionPoolImpl.SubscriptionStartProvider +import com.digitalasset.canton.sequencing.client.SequencerClient.SequencerTransports import com.digitalasset.canton.sequencing.client.{SequencerClient, SequencerClientSubscriptionError} import com.digitalasset.canton.topology.Member import com.digitalasset.canton.tracing.TraceContext @@ -62,20 +64,36 @@ trait SequencerSubscriptionPool extends FlagCloseable with NamedLogging { object SequencerSubscriptionPool { /** Subscription pool configuration - * @param trustThreshold - * Minimal number of subscriptions needed to satisfy the trust requirements. * @param livenessMargin * Number of extra subscriptions to maintain to ensure liveness. * @param subscriptionRequestDelay * Delay between the attempts to obtain new connections, when the current number of - * subscriptions is not [[trustThreshold]] + [[livenessMargin]]. + * subscriptions is not `trustThreshold` + [[livenessMargin]]. */ final case class SequencerSubscriptionPoolConfig( - trustThreshold: PositiveInt, livenessMargin: NonNegativeInt, subscriptionRequestDelay: config.NonNegativeFiniteDuration, - ) { - lazy val activeThreshold: PositiveInt = trustThreshold + livenessMargin + ) extends PrettyPrinting { + override protected def pretty: Pretty[SequencerSubscriptionPoolConfig] = prettyOfClass( + param("livenessMargin", _.livenessMargin), + param("subscriptionRequestDelay", _.subscriptionRequestDelay), + ) + } + + object SequencerSubscriptionPoolConfig { + + /** Create a sequencer subscription pool configuration from the existing format. + * + * TODO(i27260): remove when no longer needed + */ + def fromSequencerTransports( + sequencerTransports: SequencerTransports[?] + ): SequencerSubscriptionPoolConfig = + SequencerSubscriptionPoolConfig( + livenessMargin = sequencerTransports.sequencerLivenessMargin, + subscriptionRequestDelay = + sequencerTransports.sequencerConnectionPoolDelays.subscriptionRequestDelay, + ) } class SequencerSubscriptionPoolHealth( diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolImpl.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolImpl.scala index e70db0bf5dd4..3b4b7ff91e40 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolImpl.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerSubscriptionPoolImpl.scala @@ -4,8 +4,9 @@ package com.digitalasset.canton.sequencing import cats.syntax.either.* +import com.digitalasset.canton.config as cantonConfig import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.health.HealthListener import com.digitalasset.canton.lifecycle.LifeCycle @@ -15,7 +16,10 @@ import com.digitalasset.canton.sequencing.SequencerSubscriptionPool.{ SequencerSubscriptionPoolConfig, SequencerSubscriptionPoolHealth, } -import com.digitalasset.canton.sequencing.SequencerSubscriptionPoolImpl.SubscriptionStartProvider +import com.digitalasset.canton.sequencing.SequencerSubscriptionPoolImpl.{ + ConfigWithThreshold, + SubscriptionStartProvider, +} import com.digitalasset.canton.sequencing.client.SequencerClient.CloseReason.UnrecoverableError import com.digitalasset.canton.sequencing.client.SequencerClientSubscriptionError.{ ApplicationHandlerPassive, @@ -71,10 +75,17 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( override def config: SequencerSubscriptionPoolConfig = configRef.get + /** Use this instead of [[config]] to obtain a snapshot of all the current configuration + * parameters at once. + */ + private def currentConfigWithThreshold: ConfigWithThreshold = + ConfigWithThreshold(config, pool.config.trustThreshold) + override def updateConfig( newConfig: SequencerSubscriptionPoolConfig )(implicit traceContext: TraceContext): Unit = { configRef.set(newConfig) + logger.info(s"Configuration updated to: $newConfig") // We might need new connections adjustConnectionsIfNeeded() @@ -99,8 +110,7 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( def adjustInternal(): Unit = blocking { lock.synchronized { if (!isClosing && currentRequest.get == myToken) { - val currentConfig = config - val activeThreshold = currentConfig.activeThreshold + val activeThreshold = currentConfigWithThreshold.activeThreshold val current = trackedSubscriptions.toSet logger.debug( @@ -150,8 +160,8 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( case Left(_) if nbToRequest < 0 => val toRemove = trackedSubscriptions.take(-nbToRequest) - logger.debug( - s"Dropping ${toRemove.size} subscriptions: ${toRemove.map(_.subscription.connection.name).mkString(", ")}" + logger.info( + s"Dropping ${toRemove.size} extra subscription(s): ${toRemove.map(_.subscription.connection.name).mkString(", ")}" ) removeSubscriptionsFromPool(toRemove.toSeq*) @@ -208,7 +218,8 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( def isThresholdStillReachable(ignoreCurrent: Boolean): Boolean = blocking(lock.synchronized { val ignored: Set[ConnectionX.ConnectionXConfig] = if (ignoreCurrent) Set(connection.config) else Set.empty - val result = pool.isThresholdStillReachable(config.trustThreshold, ignored) + val trustThreshold = currentConfigWithThreshold.trustThreshold + val result = pool.isThresholdStillReachable(trustThreshold, ignored) logger.debug(s"isThresholdStillReachable(ignored = $ignored) = $result") result }) @@ -307,7 +318,7 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( } private def updateHealth()(implicit traceContext: TraceContext): Unit = { - val currentConfig = config + val currentConfig = currentConfigWithThreshold trackedSubscriptions.size match { case nb if nb >= currentConfig.activeThreshold.unwrap => health.resolveUnhealthy() @@ -316,8 +327,8 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( s"below liveness margin: $nb subscription(s) available, trust threshold = ${currentConfig.trustThreshold}," + s" liveness margin = ${currentConfig.livenessMargin}" ) - case _ if !pool.isThresholdStillReachable(config.trustThreshold, Set.empty) => - val reason = s"Trust threshold ${config.trustThreshold} is no longer reachable" + case _ if !pool.isThresholdStillReachable(currentConfig.trustThreshold, Set.empty) => + val reason = s"Trust threshold ${currentConfig.trustThreshold} is no longer reachable" health.fatalOccurred(reason) closeReasonPromise.tryComplete(Success(UnrecoverableError(reason))).discard case nb => @@ -367,6 +378,15 @@ final class SequencerSubscriptionPoolImpl private[sequencing] ( } object SequencerSubscriptionPoolImpl { + private final case class ConfigWithThreshold( + private val poolConfig: SequencerSubscriptionPoolConfig, + trustThreshold: PositiveInt, + ) { + val livenessMargin: NonNegativeInt = poolConfig.livenessMargin + val subscriptionRequestDelay: cantonConfig.NonNegativeFiniteDuration = + poolConfig.subscriptionRequestDelay + lazy val activeThreshold: PositiveInt = trustThreshold + livenessMargin + } /** Trait for an object that can provide the starting event for a subscription */ diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SubscriptionHandlerX.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SubscriptionHandlerX.scala index a6c32dd170bc..5d4c1545ec2e 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SubscriptionHandlerX.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/SubscriptionHandlerX.scala @@ -27,7 +27,7 @@ import com.digitalasset.canton.{SequencerAlias, time} import java.util.concurrent.atomic.AtomicReference import scala.concurrent.ExecutionContext -class SubscriptionHandlerX( +class SubscriptionHandlerX private[sequencing] ( clock: Clock, metrics: SequencerClientMetrics, applicationHandlerFailure: SingleUseCell[ApplicationHandlerFailure], diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala index 4297cedbb70a..c4d954950a79 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationTokenProvider.scala @@ -291,6 +291,11 @@ object AuthenticationTokenProvider { logger: TracedLogger, )(implicit tc: TraceContext): ErrorKind = exception match { + case ex: StatusRuntimeException + if ex.getStatus.getCode == Status.Code.UNAVAILABLE && + ex.getMessage.contains("Channel shutdown invoked") => + FatalErrorKind + // Ideally we would like to retry only on retryable gRPC status codes (such as `UNAVAILABLE`), // but as this could be hard to get right, we compromise by retrying on all gRPC status codes, // and use a finite number of retries. diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala index 688287921d47..65e4c71af67a 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClient.scala @@ -51,6 +51,7 @@ import com.digitalasset.canton.sequencing.SequencerAggregatorPekko.{ HasSequencerSubscriptionFactoryPekko, SubscriptionControl, } +import com.digitalasset.canton.sequencing.SequencerConnectionXPool.SequencerConnectionXPoolConfig import com.digitalasset.canton.sequencing.SequencerSubscriptionPool.SequencerSubscriptionPoolConfig import com.digitalasset.canton.sequencing.client.PeriodicAcknowledgements.FetchCleanTimestamp import com.digitalasset.canton.sequencing.client.SendAsyncClientError.SendAsyncClientResponseError @@ -192,8 +193,9 @@ trait RichSequencerClient extends SequencerClient { def healthComponent: CloseableHealthComponent def changeTransport( - sequencerTransports: SequencerTransports[?] - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] + sequencerTransports: SequencerTransports[?], + newConnectionPoolConfigO: Option[SequencerConnectionXPoolConfig], + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] /** Future which is completed when the client is not functional any more and is ready to be * closed. The value with which the future is completed will indicate the reason for completion. @@ -1187,12 +1189,8 @@ class RichSequencerClientImpl( ) if (config.useNewConnectionPool) { - val subscriptionPoolConfig = SequencerSubscriptionPoolConfig( - trustThreshold = sequencerTransports.sequencerTrustThreshold, - livenessMargin = sequencerTransports.sequencerLivenessMargin, - subscriptionRequestDelay = - sequencerTransports.sequencerConnectionPoolDelays.subscriptionRequestDelay, - ) + val subscriptionPoolConfig = + SequencerSubscriptionPoolConfig.fromSequencerTransports(sequencerTransports) val eventBatchProcessor = new EventBatchProcessor { override def process( eventBatch: Seq[SequencedSerializedEvent] @@ -1688,17 +1686,45 @@ class RichSequencerClientImpl( }(EitherT.leftT[FutureUnlessShutdown, Unit](_)) } - def changeTransport( - sequencerTransports: SequencerTransports[?] - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - sequencerAggregator.changeMessageAggregationConfig( - MessageAggregationConfig( - sequencerTransports.expectedSequencersO, - sequencerTransports.sequencerTrustThreshold, + override def changeTransport( + sequencerTransports: SequencerTransports[?], + newConnectionPoolConfigO: Option[SequencerConnectionXPoolConfig], + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] = + for { + _ <- + EitherT.fromEither[FutureUnlessShutdown](if (config.useNewConnectionPool) { + val newConnectionPoolConfig = newConnectionPoolConfigO.getOrElse( + ErrorUtil.invalidState( + "Connection pool enabled, yet connection pool config not provided" + ) + ) + + for { + _ <- connectionPool + .updateConfig(newConnectionPoolConfig) + .leftMap(error => s"Failed to update connection pool configuration: $error") + } yield { + sequencerSubscriptionPoolRef.get.foreach { subscriptionPool => + val newSubscriptionPoolConfig = + SequencerSubscriptionPoolConfig.fromSequencerTransports(sequencerTransports) + subscriptionPool.updateConfig(newSubscriptionPoolConfig) + } + } + } else Either.unit) + + _ = sequencerAggregator.changeMessageAggregationConfig( + MessageAggregationConfig( + sequencerTransports.expectedSequencersO, + sequencerTransports.sequencerTrustThreshold, + ) ) - ) - FutureUnlessShutdown.outcomeF(sequencersTransportState.changeTransport(sequencerTransports)) - } + + _ <- EitherT.right( + FutureUnlessShutdown.outcomeF( + sequencersTransportState.changeTransport(sequencerTransports) + ) + ) + } yield () private val subscriptionPoolCompletePromise = Promise[SequencerClient.CloseReason]() diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala index 81d991e9aec0..e5017e31dcc9 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/Batch.scala @@ -54,6 +54,8 @@ final case class Batch[+Env <: Envelope[?]] private (envelopes: List[Env])( case AllMembersOfSynchronizer => AllMembersOfSynchronizer } + lazy val isBroadcast: Boolean = allRecipients.contains(AllMembersOfSynchronizer) + private[protocol] def toProtoV30: v30.CompressedBatch = { val batch = v30.Batch(envelopes = envelopes.map(_.closeEnvelope.toProtoV30)) val compressed = ByteStringUtil.compressGzip(checkedToByteString(batch)) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala index e8a8c08ac4b4..c633e83dcbf4 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/IndexedStringStore.scala @@ -48,7 +48,7 @@ abstract class IndexedStringFromDb[A <: IndexedString[B], B] { def indexed( indexedStringStore: IndexedStringStore - )(item: B)(implicit ec: ExecutionContext): FutureUnlessShutdown[A] = + )(item: B)(implicit ec: ExecutionContext, traceContext: TraceContext): FutureUnlessShutdown[A] = indexedStringStore .getOrCreateIndex(dbTyp, asString(item)) .map(buildIndexed(item, _)) @@ -58,16 +58,22 @@ abstract class IndexedStringFromDb[A <: IndexedString[B], B] { )(implicit ec: ExecutionContext, loggingContext: ErrorLoggingContext, - ): OptionT[FutureUnlessShutdown, A] = + ): OptionT[FutureUnlessShutdown, A] = { + implicit val traceContext: TraceContext = loggingContext.traceContext + fromDbIndexET(indexedStringStore)(index).leftMap { err => loggingContext.logger.error( s"Corrupt log id: $index for $dbTyp within context $context: $err" )(loggingContext.traceContext) }.toOption + } def fromDbIndexET( indexedStringStore: IndexedStringStore - )(index: Int)(implicit ec: ExecutionContext): EitherT[FutureUnlessShutdown, String, A] = + )(index: Int)(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): EitherT[FutureUnlessShutdown, String, A] = EitherT(indexedStringStore.getForIndex(dbTyp, index).map { strO => for { str <- strO.toRight("No entry for given index") @@ -180,8 +186,12 @@ object IndexedStringType { /** uid index such that we can store integers instead of long strings in our database */ trait IndexedStringStore extends AutoCloseable { - def getOrCreateIndex(dbTyp: IndexedStringType, str: String300): FutureUnlessShutdown[Int] - def getForIndex(dbTyp: IndexedStringType, idx: Int): FutureUnlessShutdown[Option[String300]] + def getOrCreateIndex(dbTyp: IndexedStringType, str: String300)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Int] + def getForIndex(dbTyp: IndexedStringType, idx: Int)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Option[String300]] } object IndexedStringStore { @@ -190,10 +200,7 @@ object IndexedStringStore { config: CacheConfig, timeouts: ProcessingTimeout, loggerFactory: NamedLoggerFactory, - )(implicit - ec: ExecutionContext, - tc: TraceContext, - ): IndexedStringStore = + )(implicit ec: ExecutionContext): IndexedStringStore = storage match { case _: MemoryStorage => InMemoryIndexedStringStore() case jdbc: DbStorage => @@ -209,7 +216,7 @@ class IndexedStringCache( parent: IndexedStringStore, config: CacheConfig, val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext, tc: TraceContext) +)(implicit ec: ExecutionContext) extends IndexedStringStore with NamedLogging { @@ -248,13 +255,13 @@ class IndexedStringCache( override def getForIndex( dbTyp: IndexedStringType, idx: Int, - ): FutureUnlessShutdown[Option[String300]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Option[String300]] = index2strFUS.get((idx, dbTyp)) override def getOrCreateIndex( dbTyp: IndexedStringType, str: String300, - ): FutureUnlessShutdown[Int] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = str2Index.get((str, dbTyp)) override def close(): Unit = { diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala index 5715ed334e7e..9cc977a34f50 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala @@ -11,6 +11,7 @@ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.store.{IndexedStringStore, IndexedStringType} +import com.digitalasset.canton.tracing.TraceContext import scala.concurrent.ExecutionContext @@ -22,13 +23,12 @@ class DbIndexedStringStore( extends IndexedStringStore with DbStore { - import com.digitalasset.canton.tracing.TraceContext.Implicits.Empty.* import storage.api.* override def getOrCreateIndex( dbTyp: IndexedStringType, str: String300, - ): FutureUnlessShutdown[Int] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = getIndexForStr(dbTyp.source, str).getOrElseF { insertIgnore(dbTyp.source, str).flatMap { _ => getIndexForStr(dbTyp.source, str).getOrElse { @@ -40,7 +40,9 @@ class DbIndexedStringStore( } } - private def getIndexForStr(dbType: Int, str: String300): OptionT[FutureUnlessShutdown, Int] = + private def getIndexForStr(dbType: Int, str: String300)(implicit + traceContext: TraceContext + ): OptionT[FutureUnlessShutdown, Int] = OptionT( storage .query( @@ -51,7 +53,9 @@ class DbIndexedStringStore( ) ) - private def insertIgnore(dbType: Int, str: String300): FutureUnlessShutdown[Unit] = { + private def insertIgnore(dbType: Int, str: String300)(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Unit] = { // not sure how to get "last insert id" here in case the row was inserted // therefore, we're just querying the db again. this is a bit dorky, // but we'll hardly ever do this, so should be good @@ -64,7 +68,7 @@ class DbIndexedStringStore( override def getForIndex( dbTyp: IndexedStringType, idx: Int, - ): FutureUnlessShutdown[Option[String300]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Option[String300]] = storage .query( sql"select string from common_static_strings where id = $idx and source = ${dbTyp.source}" diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala index 3d78f38a345d..3c55f01afd68 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemoryIndexedStringStore.scala @@ -7,6 +7,7 @@ import com.digitalasset.canton.config.CantonRequireTypes.String300 import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.store.{IndexedStringStore, IndexedStringType} +import com.digitalasset.canton.tracing.TraceContext import scala.collection.concurrent.TrieMap import scala.collection.mutable.ArrayBuffer @@ -27,7 +28,7 @@ class InMemoryIndexedStringStore(val minIndex: Int, val maxIndex: Int) extends I override def getOrCreateIndex( dbTyp: IndexedStringType, str: String300, - ): FutureUnlessShutdown[Int] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = FutureUnlessShutdown.pure(getOrCreateIndexForTesting(dbTyp, str)) /** @throws java.lang.IllegalArgumentException @@ -51,7 +52,7 @@ class InMemoryIndexedStringStore(val minIndex: Int, val maxIndex: Int) extends I override def getForIndex( dbTyp: IndexedStringType, idx: Int, - ): FutureUnlessShutdown[Option[String300]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Option[String300]] = FutureUnlessShutdown.pure { blocking { synchronized { diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/time/SynchronizerTimeTracker.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/time/SynchronizerTimeTracker.scala index 5b88a1a6ec8d..1a110bf44182 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/time/SynchronizerTimeTracker.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/time/SynchronizerTimeTracker.scala @@ -671,7 +671,7 @@ object SynchronizerTimeTracker { * proof. Use this only for debugging purposes to identify the reason for the time proof * requests. */ - private val PrintCallStackForExecutedTimeProofRequests: Boolean = true + private val PrintCallStackForExecutedTimeProofRequests: Boolean = false @inline private def callStackForExecutedTimeProofRequest(): String = diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ExternalPartyOnboardingDetails.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ExternalPartyOnboardingDetails.scala index f1e4d8ca6687..951ab26e8d48 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ExternalPartyOnboardingDetails.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ExternalPartyOnboardingDetails.scala @@ -3,75 +3,120 @@ package com.digitalasset.canton.topology -import cats.data.Ior +import cats.syntax.alternative.* +import cats.syntax.option.* import cats.syntax.traverse.* import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.Signature import com.digitalasset.canton.topology.ExternalPartyOnboardingDetails.{ - ExternalPartyNamespace, + Centralized, + Decentralized, + OptionallySignedPartyToParticipant, + PartyNamespace, SignedPartyToKeyMapping, - SignedPartyToParticipant, } import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllMappings -import com.digitalasset.canton.topology.transaction.ParticipantPermission.Confirmation -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.PositiveSignedTopologyTransaction +import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.{ + GenericSignedTopologyTransaction, + PositiveSignedTopologyTransaction, +} import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace import com.digitalasset.canton.topology.transaction.TopologyTransaction.PositiveTopologyTransaction import com.digitalasset.canton.version.ProtocolVersion import scala.reflect.ClassTag -/** Data class containing onbarding signed topology transactions for an external party. - * - * The following invariants are enforced in the create method of the companion object: - * - * - The namespaces of all 3 transactions are consistent - * - There is at least one confirming participant - * - There is no participant with submission permission +/** Data class containing onboarding signed topology transactions for an external party. The + * constructor of this class ensures that only transactions related to the creation of an external + * party can be submitted. It performs validations on the kind of transactions and their + * relationship with each other. It does NOT validate anything related to the authorization of + * those transactions. That logic is implemented in the topology manager. * - * @param signedNamespaceTransaction - * Either a NamespaceDelegation or DecentralizedNamespaceDefinition + * @param partyNamespace + * Fully authorized party namespace. Can be either a single namespace or decentralized one with + * accompanying individual namespace owner transactions * @param signedPartyToKeyMappingTransaction * Party to Key mapping transaction - * @param signedPartyToParticipantTransaction - * Party to Participant transaction + * @param optionallySignedPartyToParticipant + * Party to Participant transaction, either signed or unsigned * @param isConfirming * True if the allocating node is a confirming node for the party */ final case class ExternalPartyOnboardingDetails private ( - signedNamespaceTransaction: ExternalPartyNamespace, - signedPartyToKeyMappingTransaction: SignedPartyToKeyMapping, - signedPartyToParticipantTransaction: SignedPartyToParticipant, + partyNamespace: Option[PartyNamespace], + signedPartyToKeyMappingTransaction: Option[SignedPartyToKeyMapping], + optionallySignedPartyToParticipant: OptionallySignedPartyToParticipant, isConfirming: Boolean, ) { + // Invariants + require( + !optionallySignedPartyToParticipant.mapping.participants + .map(_.permission) + .contains(ParticipantPermission.Submission), + "External party cannot be hosted with Submission permission", + ) + require( + partyNamespace.forall(_.namespace == optionallySignedPartyToParticipant.mapping.namespace), + "The party namespace does not match the PartyToParticipant namespace", + ) + require( + partyNamespace.forall { + case decentralized: Decentralized => + decentralized.individualNamespaceTransaction.sizeIs <= ExternalPartyOnboardingDetails.maxDecentralizedOwnersSize.value + case _ => true + }, + "Decentralized namespace has over the maximum limit of namespace owners", + ) + require( + signedPartyToKeyMappingTransaction.forall( + _.mapping.namespace == optionallySignedPartyToParticipant.mapping.namespace + ), + "The PartyToKeyMapping namespace does not match the PartyToParticipant namespace", + ) - /** Namespace of the external party. Either from a single or decentralized namespace + /** Return true if we expect the party to be fully allocated and authorized with the provided + * transactions */ - def namespace: Namespace = signedNamespaceTransaction.namespace + def fullyAllocatesParty: Boolean = + // Expect fully allocated if there's a centralized namespace + // (It could be fully allocated as well with a decentralized namespace but checking this + // would require re-running the authorization checks implemented in the topology manager) + partyNamespace.exists { + case _: Centralized => true + case _ => false + } && + // and a party to key + signedPartyToKeyMappingTransaction.isDefined && + // and is not multi hosted + hostingParticipants.sizeIs == 1 + + /** Namespace of the external party. + */ + def namespace: Namespace = optionallySignedPartyToParticipant.mapping.namespace /** PartyId of the external party */ - def partyId: PartyId = signedPartyToParticipantTransaction.mapping.partyId + def partyId: PartyId = optionallySignedPartyToParticipant.mapping.partyId /** Party hint of the external party */ def partyHint: String = partyId.uid.identifier.str - /** Returns true if the party is multi hosted - */ - def isMultiHosted: Boolean = hostingParticipants.sizeIs > 1 - def hostingParticipants: Seq[HostingParticipant] = - signedPartyToParticipantTransaction.mapping.participants - def confirmationThreshold: PositiveInt = signedPartyToParticipantTransaction.mapping.threshold - def signingKeysThreshold: PositiveInt = signedPartyToKeyMappingTransaction.mapping.threshold - def numberOfSigningKeys: Int = signedPartyToKeyMappingTransaction.mapping.signingKeys.length + optionallySignedPartyToParticipant.mapping.participants + def confirmationThreshold: PositiveInt = optionallySignedPartyToParticipant.mapping.threshold } object ExternalPartyOnboardingDetails { + // Maximum number of decentralized namespace owners allowed through the `allocateExternalParty` API + // This is hardcoded here to avoid unreasonably high number of namespace owner transactions to be distributed + // through this endpoint, as the DecentralizedNamespaceDefinition itself does not have any limit on the number of + // namespace owners. If this limit is too low for a given use case, go through the Admin API topology write service instead. + // TODO(i27530): Make this configurable, or lift it when DecentralizedNamespaceDefinition has a limit + val maxDecentralizedOwnersSize: PositiveInt = PositiveInt.tryCreate(10) + // Type aliases for conciseness private type SignedNamespaceDelegation = SignedTopologyTransaction[TopologyChangeOp.Replace, NamespaceDelegation] @@ -79,62 +124,151 @@ object ExternalPartyOnboardingDetails { SignedTopologyTransaction[TopologyChangeOp.Replace, DecentralizedNamespaceDefinition] private type SignedPartyToKeyMapping = SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToKeyMapping] - private type SignedPartyToParticipant = - SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant] - sealed trait ExternalPartyNamespace { - def signedTransaction: PositiveSignedTopologyTransaction - def namespace: Namespace = signedTransaction.mapping.namespace + + /** External party namespace, can be either centralied or decentralized + */ + sealed trait PartyNamespace { + + /** Transactions to be loaded in the topology manager to create the party's namespace + */ + def signedTransactions: Seq[GenericSignedTopologyTransaction] + + /** Namespace of the party + */ + def namespace: Namespace } - final case class SingleNamespace(signedTransaction: SignedNamespaceDelegation) - extends ExternalPartyNamespace - final case class DecentralizedNamespace(signedTransaction: SignedDecentralizedNamespace) - extends ExternalPartyNamespace - // TODO(i27530): Should we check if it's a non fully authorized decentralized namespace definition? - private def isProposal( - transaction: PositiveTopologyTransaction, - allocatingParticipantId: ParticipantId, - ): Boolean = { - // If the party is also hosted on other nodes, it needs to be a proposal, - // as approval from the other nodes is needed to fully authorize the transaction - val isHostedOnOtherNodes = transaction - .selectMapping[PartyToParticipant] - .toList - .flatMap(_.mapping.participants.map(_.participantId)) - .exists(_ != allocatingParticipantId) + /** Decentralized party namespace. All transactions are expected to have all required signatures + * to be fully authorized. If not the party allocation will fail in the topology manager auth + * checks. + * @param decentralizedTransaction + * The decentralized namespace transaction + * @param individualNamespaceTransaction + * The individual namespace owner transactions + */ + final private case class Decentralized( + decentralizedTransaction: SignedDecentralizedNamespace, + individualNamespaceTransaction: Seq[SignedNamespaceDelegation], + ) extends PartyNamespace { + // In that order on purpose, as the individual namespaces must be processed before the decentralized namespace can be authorized + override def signedTransactions: Seq[GenericSignedTopologyTransaction] = + individualNamespaceTransaction :+ decentralizedTransaction + override def namespace: Namespace = decentralizedTransaction.mapping.namespace + } + + /** Centralized party namespace. The transaction is expected to be fully authorized/ If not the + * party allocation will fail in the topology manager auth checks. + * @param singleTransaction + * The signed namespace definition transaction + */ + final private case class Centralized(singleTransaction: SignedNamespaceDelegation) + extends PartyNamespace { + override def signedTransactions: Seq[GenericSignedTopologyTransaction] = Seq(singleTransaction) + override def namespace: Namespace = singleTransaction.mapping.namespace + } - isHostedOnOtherNodes + /** The PartyToParticipant mapping may be submitted signed (by the party's namespace) or unsigned + * (by hosting nodes wanting to authorize the hosting). This trait makes the distinction between + * the two cases. + */ + sealed trait OptionallySignedPartyToParticipant { + def mapping: PartyToParticipant + } + final case class SignedPartyToParticipant( + signed: SignedTopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant] + ) extends OptionallySignedPartyToParticipant { + def mapping: PartyToParticipant = signed.mapping + } + final case class UnsignedPartyToParticipant( + unsigned: TopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant] + ) extends OptionallySignedPartyToParticipant { + def mapping: PartyToParticipant = unsigned.mapping } - private def buildSignedTopologyTransactions( + private val expectedTransactionMappings = Seq( + NamespaceDelegation, + DecentralizedNamespaceDefinition, + PartyToParticipant, + PartyToKeyMapping, + ) + + // TODO(i27530): We may be able to be more precise for P2P and P2K but it's hard to tell + // at this stage especially with decentralized namespaces + private def isProposal( + transaction: PositiveTopologyTransaction + ): Boolean = + // Namespaces must be fully authorized so they can't be proposals + transaction.selectMapping[NamespaceDelegation].isEmpty && + transaction.selectMapping[DecentralizedNamespaceDefinition].isEmpty + + /** Build SignedTopologyTransactions for transactions that have at least one signature. + * Transactions without signatures are returned separately. + */ + private def parseTransactionsWithSignatures( transactionsWithSignatures: NonEmpty[List[(PositiveTopologyTransaction, List[Signature])]], multiSignatures: List[Signature], protocolVersion: ProtocolVersion, - participantId: ParticipantId, - ): Either[String, List[PositiveSignedTopologyTransaction]] = { - val signedTransactionHashes = transactionsWithSignatures.map { case (transaction, _) => + ): Either[ + String, + (List[PositiveSignedTopologyTransaction], List[PositiveTopologyTransaction]), + ] = { + val transactionHashes = transactionsWithSignatures.map { case (transaction, _) => transaction.hash }.toSet - val multiTransactionSignatures = multiSignatures.map( - MultiTransactionSignature(signedTransactionHashes, _) - ) - transactionsWithSignatures.forgetNE.traverse { case (transaction, signatures) => - NonEmpty + val multiTransactionSignatures = + multiSignatures.map(MultiTransactionSignature(transactionHashes, _)) + + // Gather the signatures for a transaction. If no signatures can be found return None + def signaturesForTransaction( + transaction: PositiveTopologyTransaction, + singleTransactionSignatures: Seq[Signature], + multiTransactionSignatures: Seq[MultiTransactionSignature], + ): Option[NonEmpty[Seq[TopologyTransactionSignature]]] = { + // Deduplicate signatures to keep only one per signer + val deduplicatedSignatures = NonEmpty .from( - multiTransactionSignatures ++ signatures.map( + (singleTransactionSignatures.map( SingleTransactionSignature(transaction.hash, _) - ) - ) - .toRight("Missing signatures") - .flatMap(transactionSignatures => - SignedTopologyTransaction.create( - transaction, - transactionSignatures.toSet, - isProposal = isProposal(transaction, participantId), - protocolVersion, - ) + ) ++ multiTransactionSignatures) + // Prefer single transaction signatures over multi transaction ones + // as they're smaller (they don't need the list of signed hashes) + .groupMapReduce(_.authorizingLongTermKey)(identity)((first, _) => first) + .values + .toSeq ) + transaction.mapping match { + // Special case for root namespaces: they require signatures only from the namespace key + case namespaceDelegation: NamespaceDelegation => + deduplicatedSignatures + .map(_.filter(_.authorizingLongTermKey == namespaceDelegation.namespace.fingerprint)) + .flatMap(NonEmpty.from) + case _ => deduplicatedSignatures + } } + + transactionsWithSignatures.forgetNE + .traverse { case (transaction, signatures) => + signaturesForTransaction(transaction, signatures, multiTransactionSignatures) + .traverse(transactionSignatures => + SignedTopologyTransaction.create( + transaction, + transactionSignatures.toSet, + isProposal = isProposal(transaction), + protocolVersion, + ) + ) + .map(_.map(Left(_)).getOrElse(Right(transaction))) + } + .map(_.separate) + } + + private def validateMaximumOneElement[T]( + list: List[T], + error: Int => String, + ): Either[String, Option[T]] = list match { + case Nil => Right(None) + case singleTransaction :: Nil => Right(Some(singleTransaction)) + case moreThanOneMapping => Left(error(moreThanOneMapping.length)) } private def validateMaximumOneMapping[M <: TopologyMapping]( @@ -142,71 +276,110 @@ object ExternalPartyOnboardingDetails { )(implicit classTag: ClassTag[M] ): Either[String, Option[SignedTopologyTransaction[Replace, M]]] = - transactions.flatMap(_.select[TopologyChangeOp.Replace, M]) match { - case Nil => Right(None) - case singleTransaction :: Nil => Right(Some(singleTransaction)) - case moreThanOneMapping => - Left( - s"Only one transaction of type ${classTag.runtimeClass.getName} can be provided, got ${moreThanOneMapping.length}" - ) - } - private def validateExactlyOneMapping[M <: TopologyMapping]( - transactions: List[PositiveSignedTopologyTransaction] - )(implicit - classTag: ClassTag[M] - ): Either[String, SignedTopologyTransaction[Replace, M]] = for { - zeroOrOne <- validateMaximumOneMapping[M](transactions) - exactlyOne <- zeroOrOne.toRight( - s"At least one transaction of type ${classTag.runtimeClass.getName} must be provided, got 0" + validateMaximumOneElement( + transactions.flatMap(_.select[TopologyChangeOp.Replace, M]), + length => + s"Only one transaction of type ${classTag.runtimeClass.getName} can be provided, got $length", ) - } yield exactlyOne - // Find either a NamespaceDelegation or a DecentralizedNamespace - private def validateNamespace( - signedTopologyTransactions: List[PositiveSignedTopologyTransaction] - ): Either[String, ExternalPartyNamespace] = for { - singleRootNamespaceO <- validateMaximumOneMapping[NamespaceDelegation]( - signedTopologyTransactions - ) - _ <- Either.cond( - singleRootNamespaceO.forall(tx => - tx.transaction.mapping.target.fingerprint == tx.transaction.mapping.namespace.fingerprint - ), - (), - "Namespace delegation is not a root delegation. Ensure the target key fingerprint is the same as the namespace fingerprint", - ) - _ <- Either.cond( - singleRootNamespaceO.forall(tx => tx.transaction.mapping.restriction == CanSignAllMappings), - (), - "Namespace delegation must have a CanSignAllMappings restriction.", - ) - // TODO(i27530): Do we need other validations for decentralized namespaces? - decentralizedNamespaceO <- validateMaximumOneMapping[DecentralizedNamespaceDefinition]( - signedTopologyTransactions - ) - namespace <- Ior - .fromOptions(singleRootNamespaceO, decentralizedNamespaceO) - .toRight("Either a NamespaceDelegation or a DecentralizedNamespace is required") + /* + * Look for either a Decentralized namespace with optionally its individual namespace delegation, or a single namespace + * Optional because one may only provide a PartyToParticipant transaction to authorize the hosting. + */ + private def validatePartyNamespace( + signedTransactions: List[PositiveSignedTopologyTransaction], + p2pNamespace: Namespace, + ): Either[String, Option[PartyNamespace]] = + for { + // Look first for a decentralized namespace, can only be at most one + signedDecentralizedTxO <- validateMaximumOneMapping[DecentralizedNamespaceDefinition]( + signedTransactions + ) + partyNamespaceO <- signedDecentralizedTxO match { + case Some(signedDecentralizedTx) => + // If there's one, get the corresponding NamespaceDelegations for it + val namespaceOwners = signedTransactions + .flatMap(_.select[Replace, NamespaceDelegation]) + .filter(namespaceTx => + signedDecentralizedTx.mapping.owners.contains(namespaceTx.mapping.namespace) + ) + Either.cond( + namespaceOwners.sizeIs <= maxDecentralizedOwnersSize.value, + Decentralized( + signedDecentralizedTx, + namespaceOwners, + ).some, + "Decentralized namespaces cannot have more than " + + s"${maxDecentralizedOwnersSize.value} individual namespace owners, got ${namespaceOwners.size}", + ) + case None => + // Otherwise look for a single delegation + for { + namespaceDelegationO <- validateMaximumOneMapping[NamespaceDelegation]( + signedTransactions + ) + _ <- Either.cond( + namespaceDelegationO.forall(NamespaceDelegation.isRootCertificate), + (), + "NamespaceDelegation is not a root namespace. Ensure the namespace and target key are the same", + ) + } yield namespaceDelegationO.map(Centralized(_): PartyNamespace) + } + _ <- partyNamespaceO.traverse(partyNamespace => + Either.cond( + partyNamespace.namespace == p2pNamespace, + (), + s"The Party namespace (${partyNamespace.namespace}) does not match the PartyToParticipant namespace ($p2pNamespace)", + ) + ) + } yield partyNamespaceO + + private def validateExactlyOnePartyToParticipant( + signedTransactions: List[PositiveSignedTopologyTransaction], + unsignedTransactions: List[PositiveTopologyTransaction], + ): Either[String, OptionallySignedPartyToParticipant] = + // Check first if there's a signed P2P + validateMaximumOneMapping[PartyToParticipant](signedTransactions) .flatMap { - _.bimap( - SingleNamespace(_): ExternalPartyNamespace, - DecentralizedNamespace(_): ExternalPartyNamespace, - ).onlyLeftOrRight - .toRight("Only one of NamespaceDelegation or DecentralizedNamespace can be provided") - .map(_.merge) + case Some(signed) => Right(SignedPartyToParticipant(signed)) + case None => + // Otherwise there must be an unsigned one + validateMaximumOneElement( + unsignedTransactions.flatMap(_.select[TopologyChangeOp.Replace, PartyToParticipant]), + length => + s"Only one transaction of type PartyToParticipant can be provided, got $length", + ).flatMap( + _.toRight(s"One transaction of type PartyToParticipant must be provided, got 0") + ).map(UnsignedPartyToParticipant(_)) } - } yield namespace + /** Find and validate the PartyToParticipant transaction. It can be either signed (by the party + * namespace) Or unsigned, in which case it will be signed by this participant (if it can) to + * authorize the hosting of the party + */ private def validatePartyToParticipant( signedTopologyTransactions: List[PositiveSignedTopologyTransaction], + unsignedTopologyTransactions: List[PositiveTopologyTransaction], participantId: ParticipantId, - partyNamespace: Namespace, - ): Either[String, (SignedPartyToParticipant, Boolean)] = + ): Either[String, (OptionallySignedPartyToParticipant, Boolean)] = for { - signedPartyToParticipant <- validateExactlyOneMapping[PartyToParticipant]( - signedTopologyTransactions + optionallySignedPartyToParticipant <- validateExactlyOnePartyToParticipant( + signedTopologyTransactions, + unsignedTopologyTransactions, + ) + hostingParticipants = optionallySignedPartyToParticipant.mapping.participants + nodePermissionsMap = optionallySignedPartyToParticipant.mapping.participants + .groupMap(_.permission)(_.participantId) + nodesWithSubmissionPermission = nodePermissionsMap.getOrElse( + ParticipantPermission.Submission, + Seq.empty, + ) + _ <- Either.cond( + nodesWithSubmissionPermission.isEmpty, + (), + s"The PartyToParticipant transaction must not contain any node with Submission permission. Nodes with submission permission: ${nodesWithSubmissionPermission + .mkString(", ")}", ) - hostingParticipants = signedPartyToParticipant.mapping.participants _ <- hostingParticipants.toList match { case HostingParticipant(hosting, permission, _onboarding) :: Nil => Either.cond( @@ -216,15 +389,7 @@ object ExternalPartyOnboardingDetails { ) case _ => Right(()) } - nodePermissionsMap = signedPartyToParticipant.mapping.participants.groupMap(_.permission)( - _.participantId - ) - _ <- Either.cond( - !nodePermissionsMap.contains(ParticipantPermission.Submission), - (), - "The PartyToParticipant transaction must not contain any node with Submission permission", - ) - confirmingNodes = nodePermissionsMap.getOrElse(Confirmation, List.empty) + confirmingNodes = nodePermissionsMap.getOrElse(ParticipantPermission.Confirmation, List.empty) _ <- Either.cond( confirmingNodes.nonEmpty, (), @@ -244,24 +409,41 @@ object ExternalPartyOnboardingDetails { (), s"This node is not hosting the party either with Confirmation or Observation permission.", ) - _ <- Either.cond( - signedPartyToParticipant.mapping.namespace == partyNamespace, - (), - s"The PartyToParticipant namespace (${signedPartyToParticipant.mapping.namespace}) does not match the party's namespace ($partyNamespace)", - ) - } yield (signedPartyToParticipant, isConfirmingNode) + } yield (optionallySignedPartyToParticipant, isConfirmingNode) + /** Find at most one PartyToKeyMapping. Optional because one may only provide a PartyToParticipant + * transaction to authorize the hosting. If provided, validate the namespace matches the + * PartyToParticipant one. + */ private def validatePartyToKey( signedTopologyTransactions: List[PositiveSignedTopologyTransaction], - partyNamespace: Namespace, - ): Either[String, SignedPartyToKeyMapping] = for { - signedPartyToKey <- validateExactlyOneMapping[PartyToKeyMapping](signedTopologyTransactions) - _ <- Either.cond( - signedPartyToKey.mapping.namespace == partyNamespace, + p2pNamespace: Namespace, + ): Either[String, Option[SignedPartyToKeyMapping]] = for { + signedPartyToKeyO <- validateMaximumOneMapping[PartyToKeyMapping](signedTopologyTransactions) + _ <- signedPartyToKeyO.traverse(signedPartyToKey => + Either.cond( + signedPartyToKey.mapping.namespace == p2pNamespace, + (), + s"The PartyToKeyMapping namespace (${signedPartyToKey.mapping.namespace}) does not match the PartyToParticipant namespace ($p2pNamespace)", + ) + ) + } yield signedPartyToKeyO + + private def failOnUnwantedTransactionTypes(transactions: Seq[PositiveTopologyTransaction]) = { + val unwantedTransactions = + transactions.filterNot(tx => + expectedTransactionMappings.map(_.code).contains(tx.mapping.code) + ) + Either.cond( + unwantedTransactions.isEmpty, (), - s"The PartyToKeyMapping namespace (${signedPartyToKey.mapping.namespace}) does not match the party's namespace ($partyNamespace)", + "Unsupported transactions found: " + unwantedTransactions.distinct + .map(_.mapping.getClass.getSimpleName) + .mkString(", ") + ". Supported transactions are: " + expectedTransactionMappings + .map(_.getClass.getSimpleName.stripSuffix("$")) + .mkString(", "), ) - } yield signedPartyToKey + } def create( signedTransactions: NonEmpty[List[(PositiveTopologyTransaction, List[Signature])]], @@ -270,23 +452,29 @@ object ExternalPartyOnboardingDetails { participantId: ParticipantId, ): Either[String, ExternalPartyOnboardingDetails] = for { - signedTopologyTransactions <- buildSignedTopologyTransactions( + _ <- failOnUnwantedTransactionTypes(signedTransactions.map(_._1)) + parsedTransactionsWithSignatures <- parseTransactionsWithSignatures( signedTransactions, multiSignatures, protocolVersion, - participantId, ) - namespaceTransaction <- validateNamespace(signedTopologyTransactions) - namespace = namespaceTransaction.namespace + (signedTopologyTransactions, unsignedTopologyTransactions) = parsedTransactionsWithSignatures partyToParticipantAndIsConfirming <- validatePartyToParticipant( signedTopologyTransactions, + unsignedTopologyTransactions, participantId, - namespace, ) (partyToParticipant, isConfirming) = partyToParticipantAndIsConfirming - partyToKey <- validatePartyToKey(signedTopologyTransactions, namespace) + partyToKey <- validatePartyToKey( + signedTopologyTransactions, + partyToParticipant.mapping.namespace, + ) + partyNamespace <- validatePartyNamespace( + signedTopologyTransactions, + partyToParticipant.mapping.namespace, + ) } yield ExternalPartyOnboardingDetails( - namespaceTransaction, + partyNamespace, partyToKey, partyToParticipant, isConfirming, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ForceFlags.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ForceFlags.scala index 7a07ee9763ef..a5fc1c7a128b 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ForceFlags.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/ForceFlags.scala @@ -37,8 +37,6 @@ object ForceFlag { case object PreparationTimeRecordTimeToleranceIncrease extends ForceFlag(v30.ForceFlag.FORCE_FLAG_PREPARATION_TIME_RECORD_TIME_TOLERANCE_INCREASE) - case object AllowUnvetPackage extends ForceFlag(v30.ForceFlag.FORCE_FLAG_ALLOW_UNVET_PACKAGE) - case object AllowUnvetPackageWithActiveContracts extends ForceFlag(v30.ForceFlag.FORCE_FLAG_ALLOW_UNVET_PACKAGE_WITH_ACTIVE_CONTRACTS) @@ -81,7 +79,6 @@ object ForceFlag { Seq[ForceFlag]( AlienMember, LedgerTimeRecordTimeToleranceIncrease, - AllowUnvetPackage, AllowUnknownPackage, AllowUnvettedDependencies, DisablePartyWithActiveContracts, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala index 55d467d9eea6..f21c405c3b62 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManager.scala @@ -28,7 +28,7 @@ import com.digitalasset.canton.topology.TopologyManager.assignExpectedUsageToKey import com.digitalasset.canton.topology.TopologyManagerError.{ DangerousCommandRequiresForce, IncreaseOfPreparationTimeRecordTimeTolerance, - ParticipantTopologyManagerError, + InvalidSynchronizerSuccessor, ValueOutOfBounds, } import com.digitalasset.canton.topology.processing.{ @@ -43,6 +43,7 @@ import com.digitalasset.canton.topology.store.TopologyStoreId.{ } import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction import com.digitalasset.canton.topology.store.{ + TimeQuery, TopologyStore, TopologyStoreId, ValidatedTopologyTransaction, @@ -63,7 +64,8 @@ import com.digitalasset.canton.{LfPackageId, config} import java.util.concurrent.atomic.AtomicReference import scala.annotation.unused -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.ExecutionContext +import scala.math.Ordered.orderingToOrdered trait TopologyManagerObserver { def addedNewTransactions( @@ -405,9 +407,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC ) for { existingTransaction <- findExistingTransaction(mapping) - tx <- build(op, mapping, serial, protocolVersion, existingTransaction).mapK( - FutureUnlessShutdown.outcomeK - ) + tx <- build(op, mapping, serial, protocolVersion, existingTransaction) signedTx <- signTransaction( tx, signingKeys, @@ -504,7 +504,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC existingTransaction: Option[GenericSignedTopologyTransaction], )(implicit traceContext: TraceContext - ): EitherT[Future, TopologyManagerError, TopologyTransaction[Op, M]] = { + ): EitherT[FutureUnlessShutdown, TopologyManagerError, TopologyTransaction[Op, M]] = { val existingTransactionTuple = existingTransaction.map(t => (t.operation, t.mapping, t.serial, t.signatures)) for { @@ -514,7 +514,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC EitherT.rightT(PositiveInt.one) case (None, Some(proposed)) => // didn't find an existing transaction, therefore the proposed serial must be 1 - EitherT.cond[Future][TopologyManagerError, PositiveInt]( + EitherT.cond[FutureUnlessShutdown][TopologyManagerError, PositiveInt]( proposed == PositiveInt.one, PositiveInt.one, TopologyManagerError.SerialMismatch.Failure(PositiveInt.one, proposed), @@ -525,7 +525,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC // auto-select existing EitherT.rightT(existingSerial) case (Some((`op`, `mapping`, existingSerial, signatures)), Some(proposed)) => - EitherT.cond[Future]( + EitherT.cond[FutureUnlessShutdown]( existingSerial == proposed, existingSerial, TopologyManagerError.MappingAlreadyExists @@ -538,12 +538,12 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC case (Some((_, _, existingSerial, _)), Some(proposed)) => // check that the proposed serial matches existing+1 val next = existingSerial.increment - EitherT.cond[Future]( + EitherT.cond[FutureUnlessShutdown]( next == proposed, next, TopologyManagerError.SerialMismatch.Failure(next, proposed), ) - }): EitherT[Future, TopologyManagerError, PositiveInt] + }): EitherT[FutureUnlessShutdown, TopologyManagerError, PositiveInt] } yield TopologyTransaction(op, theSerial, mapping, protocolVersion) } @@ -736,14 +736,10 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC ) transactionsInStore <- EitherT - .liftF( - store.findLatestTransactionsAndProposalsByTxHash( - transactions.map(_.hash).toSet - ) - ) - existingHashes = transactionsInStore - .map(tx => tx.hash -> tx) - .toMap + .liftF(store.findLatestTransactionsAndProposalsByTxHash(transactions.map(_.hash).toSet)) + + existingHashes = transactionsInStore.map(tx => tx.hash -> tx).toMap + // find transactions that provide new signatures (existingTransactions, newTransactionsOrAdditionalSignatures) = transactions.partition { tx => @@ -833,6 +829,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC case OwnerToKeyMapping(member, _) => checkTransactionIsForCurrentNode(member, forceChanges, transaction.mapping.code) + case VettedPackages(participantId, newPackages) => checkPackageVettingIsNotDangerous( participantId, @@ -840,6 +837,7 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC forceChanges, transaction.mapping.code, ) + case PartyToParticipant(partyId, threshold, participants) => checkPartyToParticipantIsNotDangerous( partyId, @@ -848,6 +846,12 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC forceChanges, transaction.transaction.operation, ) + + case upgradeAnnouncement: SynchronizerUpgradeAnnouncement => + if (transaction.operation == TopologyChangeOp.Replace) + checkSynchronizerUpgradeAnnouncementIsNotDangerous(upgradeAnnouncement, transaction.serial) + else EitherT.pure(()) + case _ => EitherT.rightT(()) } @@ -950,26 +954,55 @@ abstract class TopologyManager[+StoreID <: TopologyStoreId, +CryptoType <: BaseC .getOrElse(Nil) .toSet } - _ <- checkPackageVettingRevocation(currentlyVettedPackages, newPackageIds, forceChanges) _ <- checkTransactionIsForCurrentNode(participantId, forceChanges, topologyMappingCode) _ <- validatePackageVetting(currentlyVettedPackages, newPackageIds, None, forceChanges) } yield () - private def checkPackageVettingRevocation( - currentlyVettedPackages: Set[LfPackageId], - nextPackageIds: Set[LfPackageId], - forceChanges: ForceFlags, + private def checkSynchronizerUpgradeAnnouncementIsNotDangerous( + upgradeAnnouncement: SynchronizerUpgradeAnnouncement, + serial: PositiveInt, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, TopologyManagerError, Unit] = { - val removed = currentlyVettedPackages -- nextPackageIds - val force = forceChanges.permits(ForceFlag.AllowUnvetPackage) - val changeIsDangerous = removed.nonEmpty - EitherT.cond( - !changeIsDangerous || force, - (), - ParticipantTopologyManagerError.DangerousVettingCommandsRequireForce.Reject(), - ) + + val resF = store + .inspect( + proposals = false, + timeQuery = TimeQuery.Range(None, None), + asOfExclusiveO = None, + op = None, + types = Seq(TopologyMapping.Code.SynchronizerUpgradeAnnouncement), + idFilter = None, + namespaceFilter = None, + ) + .map { result => + result + .collectOfMapping[SynchronizerUpgradeAnnouncement] + .result + .maxByOption(_.serial) match { + case None => ().asRight + + case Some(latestUpgradeAnnouncement) => + // If the latest is another upgrade, we want the PSId to be strictly greater + if (serial == latestUpgradeAnnouncement.serial) + ().asRight + else { + val previouslyAnnouncedSuccessorPSId = + latestUpgradeAnnouncement.mapping.successorSynchronizerId + + Either.cond( + previouslyAnnouncedSuccessorPSId < upgradeAnnouncement.successorSynchronizerId, + (), + InvalidSynchronizerSuccessor.Reject.conflictWithPreviousAnnouncement( + successorSynchronizerId = upgradeAnnouncement.successorSynchronizerId, + previouslyAnnouncedSuccessor = previouslyAnnouncedSuccessorPSId, + ), + ) + } + } + } + + EitherT(resF) } private def checkPartyToParticipantIsNotDangerous( diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala index 7eaccb192adc..0ccb6c178766 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala @@ -762,25 +762,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup { with TopologyManagerError } - @Explanation( - """This error indicates that the topology transactions weren't processed in the allotted time.""" - ) - @Resolution( - "Contact the node administrator to check the result of processing the topology transactions." - ) - object TimeoutWaitingForTransaction - extends ErrorCode( - id = "TOPOLOGY_TIMEOUT_WAITING_FOR_TRANSACTION", - ErrorCategory.DeadlineExceededRequestStateUnknown, - ) { - final case class Failure()(implicit - val loggingContext: ErrorLoggingContext - ) extends CantonError.Impl( - cause = s"The topology transactions weren't processed in the allotted time." - ) - with TopologyManagerError - } - @Explanation( "This error indicates that there already exists a temporary topology store with the desired identifier." ) @@ -840,22 +821,44 @@ object TopologyManagerError extends TopologyManagerErrorGroup { } @Explanation("This error indicates that the successor synchronizer id is not valid.") - @Resolution( - "Change the successor synchronizer ID to have a protocol version that is the same as or newer than the current synchronizer's." - ) + @Resolution("""Change the physical synchronizer id of the successor so that it satisfies: + |- it is greater than the current physical synchronizer id + |- it is greater than all previous synchronizer announcements + |""") object InvalidSynchronizerSuccessor extends ErrorCode(id = "TOPOLOGY_INVALID_SUCCESSOR", InvalidIndependentOfSystemState) { final case class Reject( - currentSynchronizerId: PhysicalSynchronizerId, successorSynchronizerId: PhysicalSynchronizerId, + details: String, )(implicit val loggingContext: ErrorLoggingContext) extends CantonError.Impl( cause = - s"The declared successor $successorSynchronizerId of synchronizer $currentSynchronizerId is not valid." + s"The declared successor $successorSynchronizerId of synchronizer is not valid: $details" ) with TopologyManagerError + object Reject { + def conflictWithCurrentPSId( + currentSynchronizerId: PhysicalSynchronizerId, + successorSynchronizerId: PhysicalSynchronizerId, + )(implicit loggingContext: ErrorLoggingContext): Reject = + Reject( + successorSynchronizerId, + s"successor id is not greater than current synchronizer id $currentSynchronizerId", + ) + + def conflictWithPreviousAnnouncement( + successorSynchronizerId: PhysicalSynchronizerId, + previouslyAnnouncedSuccessor: PhysicalSynchronizerId, + )(implicit loggingContext: ErrorLoggingContext): Reject = + Reject( + successorSynchronizerId = successorSynchronizerId, + details = + s"conflicts with previous announcement with successor $previouslyAnnouncedSuccessor", + ) + } } + @Explanation( "This error indicates that the synchronizer upgrade announcement specified an invalid upgrade time." ) @@ -881,24 +884,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup { abstract class ParticipantErrorGroup extends ErrorGroup() object ParticipantTopologyManagerError extends ParticipantErrorGroup { - @Explanation( - """This error indicates that a dangerous package vetting command was rejected. - |This is the case when a command is revoking the vetting of a package. - |Use the force flag to revoke the vetting of a package.""" - ) - @Resolution("Set the ForceFlag.PackageVettingRevocation if you really know what you are doing.") - object DangerousVettingCommandsRequireForce - extends ErrorCode( - id = "TOPOLOGY_DANGEROUS_VETTING_COMMAND_REQUIRES_FORCE_FLAG", - ErrorCategory.InvalidGivenCurrentSystemStateOther, - ) { - final case class Reject()(implicit val loggingContext: ErrorLoggingContext) - extends CantonError.Impl( - cause = "Revoking a vetted package requires ForceFlag.PackageVettingRevocation" - ) - with TopologyManagerError - } - @Explanation( """This error indicates a vetting request failed due to dependencies not being vetted. |On every vetting request, the set supplied packages is analysed for dependencies. The diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClient.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClient.scala index b9f575ceaccc..eb34ffc62189 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClient.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingSynchronizerTopologyClient.scala @@ -441,10 +441,10 @@ private class ForwardingTopologySnapshotClient( ): FutureUnlessShutdown[Option[PartyKeyTopologySnapshotClient.PartyAuthorizationInfo]] = parent.partyAuthorization(party) - override def isSynchronizerUpgradeOngoing()(implicit + override def synchronizerUpgradeOngoing()(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SynchronizerSuccessor, EffectiveTime)]] = - parent.isSynchronizerUpgradeOngoing() + parent.synchronizerUpgradeOngoing() override def sequencerConnectionSuccessors()(implicit traceContext: TraceContext @@ -728,10 +728,10 @@ class CachingTopologySnapshot( ) .map(_.toMap) - override def isSynchronizerUpgradeOngoing()(implicit + override def synchronizerUpgradeOngoing()(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SynchronizerSuccessor, EffectiveTime)]] = - getAndCache(synchronizerUpgradeCache, parent.isSynchronizerUpgradeOngoing()) + getAndCache(synchronizerUpgradeCache, parent.synchronizerUpgradeOngoing()) override def sequencerConnectionSuccessors()(implicit traceContext: TraceContext diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala index cb9190a9e381..457426ce1f15 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala @@ -478,6 +478,13 @@ trait ParticipantTopologySnapshotClient { traceContext: TraceContext ): FutureUnlessShutdown[Boolean] + def participantsWithSupportedFeature( + participants: Set[ParticipantId], + feature: SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Set[ParticipantId]] + /** Checks whether the provided participant exists, is active and can login at the given point in * time * @@ -689,7 +696,7 @@ trait SynchronizerUpgradeClient { * synchronizer id of the successor of this synchronizer and the upgrade time. Otherwise, returns * None. */ - def isSynchronizerUpgradeOngoing()(implicit + def synchronizerUpgradeOngoing()(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SynchronizerSuccessor, EffectiveTime)]] @@ -878,6 +885,19 @@ private[client] trait ParticipantTopologySnapshotLoader extends ParticipantTopol ): FutureUnlessShutdown[Boolean] = findParticipantState(participantId).map(_.isDefined) + override def participantsWithSupportedFeature( + participants: Set[ParticipantId], + feature: SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag, + )(implicit + traceContext: TraceContext + ): FutureUnlessShutdown[Set[ParticipantId]] = for { + participantAttributesMap <- loadParticipantStates(participants.toSeq) + } yield { + participantAttributesMap.collect { + case (pid, attributes) if attributes.features.contains(feature) => pid + }.toSet + } + override def isParticipantActiveAndCanLoginAt( participantId: ParticipantId, timestamp: CantonTimestamp, @@ -901,7 +921,6 @@ private[client] trait ParticipantTopologySnapshotLoader extends ParticipantTopol )(implicit traceContext: TraceContext ): FutureUnlessShutdown[Map[ParticipantId, ParticipantAttributes]] - } private[client] trait PartyTopologySnapshotBaseClient { diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedTopologySnapshot.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedTopologySnapshot.scala index 905809036ffd..d05519ea0537 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedTopologySnapshot.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/StoreBasedTopologySnapshot.scala @@ -8,7 +8,6 @@ import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.{KeyPurpose, SigningKeyUsage} import com.digitalasset.canton.data.{CantonTimestamp, SynchronizerSuccessor} -import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.protocol.{ @@ -343,7 +342,7 @@ class StoreBasedTopologySnapshot( participantId -> ParticipantAttributes( reducedPermission, participantAttributes.loginAfter, - onboarding, + onboarding = onboarding, ) } }.toMap @@ -488,19 +487,17 @@ class StoreBasedTopologySnapshot( private def getParticipantsWithCertificates( storedTxs: StoredTopologyTransactions[Replace, TopologyMapping] - )(implicit traceContext: TraceContext): Set[ParticipantId] = storedTxs - .collectOfMapping[SynchronizerTrustCertificate] - .result - .groupBy(_.mapping.participantId) - .collect { case (pid, seq) => - // invoke collectLatestMapping only to warn in case a participantId's synchronizer trust certificate is not unique - collectLatestMapping( - TopologyMapping.Code.SynchronizerTrustCertificate, - seq.sortBy(_.validFrom), - ).discard - pid - } - .toSet + )(implicit traceContext: TraceContext): Map[ParticipantId, SynchronizerTrustCertificate] = + storedTxs + .collectOfMapping[SynchronizerTrustCertificate] + .result + .groupBy(_.mapping.participantId) + .flatMap { case (pid, seq) => + collectLatestMapping( + TopologyMapping.Code.SynchronizerTrustCertificate, + seq.sortBy(_.validFrom), + ).map(pid -> _) + } private def getParticipantsWithCertAndKeys( storedTxs: StoredTopologyTransactions[Replace, TopologyMapping], @@ -547,7 +544,7 @@ class StoreBasedTopologySnapshot( participantsFilter: Seq[ParticipantId] )(implicit traceContext: TraceContext - ): FutureUnlessShutdown[Map[ParticipantId, ParticipantSynchronizerPermission]] = + ): FutureUnlessShutdown[Map[ParticipantId, ParticipantAttributes]] = for { // Looks up synchronizer parameters for default rate limits. synchronizerParametersState <- findTransactions( @@ -584,11 +581,12 @@ class StoreBasedTopologySnapshot( } yield { // 1. Participant needs to have requested access to synchronizer by issuing a synchronizer trust certificate val participantsWithCertificates = getParticipantsWithCertificates(storedTxs) + val participantsIdsWithCertificates = participantsWithCertificates.keySet // 2. Participant needs to have keys registered on the synchronizer val participantsWithCertAndKeys = - getParticipantsWithCertAndKeys(storedTxs, participantsWithCertificates) + getParticipantsWithCertAndKeys(storedTxs, participantsIdsWithCertificates) // Warn about participants with cert but no keys - (participantsWithCertificates -- participantsWithCertAndKeys).foreach { pid => + (participantsIdsWithCertificates -- participantsWithCertAndKeys).foreach { pid => logger.warn( s"Participant $pid has a synchronizer trust certificate, but no keys on synchronizer ${synchronizerParametersState.synchronizerId}" ) @@ -597,36 +595,41 @@ class StoreBasedTopologySnapshot( val participantSynchronizerPermissions = getParticipantSynchronizerPermissions(storedTxs, participantsWithCertAndKeys) - val participantIdSynchronizerPermissionsMap = participantsWithCertAndKeys.toSeq.mapFilter { - pid => - if ( - synchronizerParametersState.parameters.onboardingRestriction.isRestricted && !participantSynchronizerPermissions - .contains(pid) - ) { - // 4a. If the synchronizer is restricted, we must have found a ParticipantSynchronizerPermission for the participants, otherwise - // the participants shouldn't have been able to onboard to the synchronizer in the first place. - // In case we don't find a ParticipantSynchronizerPermission, we don't return the participant with default permissions, but we skip it. - logger.warn( - s"Unable to find ParticipantSynchronizerPermission for participant $pid on synchronizer ${synchronizerParametersState.synchronizerId} with onboarding restrictions ${synchronizerParametersState.parameters.onboardingRestriction} at $referenceTime" + participantsWithCertAndKeys.toSeq.mapFilter { pid => + val supportedFeatures = + participantsWithCertificates.get(pid).toList.flatMap(_.featureFlags) + if ( + synchronizerParametersState.parameters.onboardingRestriction.isRestricted && !participantSynchronizerPermissions + .contains(pid) + ) { + // 4a. If the synchronizer is restricted, we must have found a ParticipantSynchronizerPermission for the participants, otherwise + // the participants shouldn't have been able to onboard to the synchronizer in the first place. + // In case we don't find a ParticipantSynchronizerPermission, we don't return the participant with default permissions, but we skip it. + logger.warn( + s"Unable to find ParticipantSynchronizerPermission for participant $pid on synchronizer ${synchronizerParametersState.synchronizerId} with onboarding restrictions ${synchronizerParametersState.parameters.onboardingRestriction} at $referenceTime" + ) + None + } else { + val permissions = participantSynchronizerPermissions + .getOrElse( + pid, + ParticipantSynchronizerPermission + .default(synchronizerParametersState.synchronizerId, pid), ) - None - } else { - // 4b. Apply default permissions/trust of submission/ordinary if missing participant synchronizer permission and - // grab rate limits from dynamic synchronizer parameters if not specified - Some( - pid -> participantSynchronizerPermissions - .getOrElse( - pid, - ParticipantSynchronizerPermission - .default(synchronizerParametersState.synchronizerId, pid), - ) - .setDefaultLimitIfNotSet( - DynamicSynchronizerParameters.defaultParticipantSynchronizerLimits - ) + .setDefaultLimitIfNotSet( + DynamicSynchronizerParameters.defaultParticipantSynchronizerLimits ) - } + // 4b. Apply default permissions/trust of submission/ordinary if missing participant synchronizer permission and + // grab rate limits from dynamic synchronizer parameters if not specified + Some( + pid -> ParticipantAttributes( + permissions.permission, + permissions.loginAfter, + supportedFeatures, + ) + ) + } }.toMap - participantIdSynchronizerPermissionsMap } override def loadParticipantStates( @@ -637,9 +640,7 @@ class StoreBasedTopologySnapshot( if (participants.isEmpty) FutureUnlessShutdown.pure(Map()) else - loadParticipantStatesHelper(participants).map(_.map { case (pid, pdp) => - pid -> pdp.toParticipantAttributes - }) + loadParticipantStatesHelper(participants) /** abstract loading function used to obtain the full key collection for a key owner */ override def allKeys(owner: Member)(implicit @@ -831,7 +832,7 @@ class StoreBasedTopologySnapshot( } } - override def isSynchronizerUpgradeOngoing()(implicit + override def synchronizerUpgradeOngoing()(implicit traceContext: TraceContext ): FutureUnlessShutdown[Option[(SynchronizerSuccessor, EffectiveTime)]] = findTransactions( diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidator.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidator.scala index 6ea008a6bae0..20be17d2a24c 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidator.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/InitialTopologySnapshotValidator.scala @@ -72,107 +72,12 @@ class InitialTopologySnapshotValidator( */ final def validateAndApplyInitialTopologySnapshot( initialSnapshot: GenericStoredTopologyTransactions - )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] = + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, String, Unit] = { + val finalSnapshot = preprocessInitialSnapshot(initialSnapshot) if (!validateInitialSnapshot) { logger.info("Skipping initial topology snapshot validation") - EitherT.right(store.bulkInsert(initialSnapshot)) + EitherT.right(store.bulkInsert(finalSnapshot)) } else { - // the following preprocessing is necessary because the topology transactions have been assigned the same timestamp - // upon export and it's possible that the following situation happened: - // --------------- - // original store: - // ts1: tx hashOfSignatures = h1, validFrom = ts1, validUntil = ts2 - // ts2: tx hashOfSignatures = h1, validFrom = ts2 - // --------------- - // since the topology transaction was stored at two different timestamps, they were inserted into the table just as expected. - // but upon export the transactions have the same timestamp: - // --------------- - // initial snapshot: - // ts1: tx hashOfSignatures = h1, validFrom = ts1, validUntil = ts1 - // ts1: tx hashOfSignatures = h1, validFrom = ts1 - // --------------- - // Therefore the second insert would be ignored because of the deduplication via the unique index and "on conflict do nothing". - // To work around this, we combine the two transaction entries (they are literally the same) by doing the following: - // * take the validFrom value from the first occurrence - // * take the validUntil value from the last occurrence - // * only retain the first occurrence of the transaction with the updated validFrom/validUntil. We need to do this - // because there could be another transaction between the duplicates, that depends on the first duplicate to have been valid. - val finalSnapshot = StoredTopologyTransactions( - initialSnapshot.result - // first retain the global order of the topology transactions within the snapshot - .zipWithIndex - // Find the transaction entries with the same set of signing keys at the same sequenced timestamp. - // The problematic scenario above is only relevant for the genesis snapshot, in which all topology - // transactions have the same sequenced/effective time. - // However, for onboarding snapshots (no matter which node), we MUST not merge transactions from - // different timestamps, because each transaction may affect the epsilon tracker and therefore - // must be preserved. - // The grouping is done with the set of signatures and specifically not hashOfSignatures, - // because legacy transactions allowed multiple signatures with the same key, but due to signature deduplication, - // this could lead to transactions ending up being (silently) deduplicated due to the unique key in the database table. - // This causes a mismatch between the transactions-to-be-validated and the transactions-actually-persisted. - // For more details, see canton#27390 - .groupBy1 { case (tx, _idx) => - (tx.sequenced, tx.hash, tx.transaction.signatures.map(_.authorizingLongTermKey)) - } - .toSeq - .flatMap { case ((sequenced, _, _), transactions) => - // onboarding snapshots should only have a single transaction per bucket, because topology - // transactions are compacted (see `TopologyStateProcessor`) and the effective times are preserved. - - // genesis snapshots produced by canton (and not assembled manually by a user) do not contain - // rejected transactions. - - // NOTICE: given the above assumptions, there should not be a need for the partitioning of the - // topology transactions, but we keep it in to potentially handle a snapshot correctly that we wouldn't otherwise. - - // for all non-rejected transactions with the same hash of signatures, - // only retain a single entry - // * at the lowest index (ie earliest occurrence), - // * with validFrom of the lowest index (i.e. earliest occurrence), - // * with validUntil of the highest index (i.e. latest occurrence) - // - // All rejected transactions can stay in the snapshot as they are. - // - // Proposals do not need special treatment, because they should have - // different sets of signatures and not enough signatures to be fully authorized. - // Therefore, proposals should end up in separate groups of transactions, or - // they can be merged regardless. - val (nonRejected, rejected) = transactions.partition { case (tx, _idx) => - tx.rejectionReason.isEmpty - } - // only merge non-rejected transactions - val mergedNonRejected = NonEmpty - .from(nonRejected) - .map { nonRejectedNE => - val (txWithMinIndex, minIndex) = nonRejectedNE.minBy1 { case (tx_, idx) => idx } - val (txWithMaxIndex, _) = nonRejectedNE.maxBy1 { case (tx_, idx) => idx } - val retainedTransaction = - txWithMinIndex.copy(validUntil = txWithMaxIndex.validUntil) - if (nonRejectedNE.sizeIs > 1) { - logger.info(s"""Combining duplicate valid transactions at $sequenced - |originals: $nonRejected - |result : $retainedTransaction""".stripMargin) - } - ( - ( - retainedTransaction, - minIndex, - ), - ) - - } - .toList - // return the merged and the rejected transactions. - // sorting by index to retain the original order will happen afterwards - (mergedNonRejected ++ rejected) - } - // re-establish the original order by index - .sortBy { case (_tx, idx) => idx } - // throw away the index - .map { case (tx, _idx) => tx } - ) - logger.info( s"Validating ${finalSnapshot.result.size}/${initialSnapshot.result.size} transactions to initialize the topology store ${store.storeId}" ) @@ -231,6 +136,109 @@ class InitialTopologySnapshotValidator( ) } } + } + + /** The following preprocessing is necessary because the topology transactions have been assigned + * the same timestamp upon export and it's possible that the following situation happened: + * {{{ + * original store: + * ts1: tx hashOfSignatures = h1, validFrom = ts1, validUntil = ts2 + * ts2: tx hashOfSignatures = h1, validFrom = ts2 + * }}} + * since the topology transaction was stored at two different timestamps, they were inserted into + * the table just as expected. but upon export the transactions have the same timestamp: + * {{{ + * initial snapshot: + * ts1: tx hashOfSignatures = h1, validFrom = ts1, validUntil = ts1 + * ts1: tx hashOfSignatures = h1, validFrom = ts1 + * }}} + * --------------- Therefore the second insert would be ignored because of the deduplication via + * the unique index and "on conflict do nothing". To work around this, we combine the two + * transaction entries (they are literally the same) by doing the following: + * - take the validFrom value from the first occurrence + * - take the validUntil value from the last occurrence + * - only retain the first occurrence of the transaction with the updated validFrom/validUntil. + * We need to do this because there could be another transaction between the duplicates, that + * depends on the first duplicate to have been valid. + */ + private def preprocessInitialSnapshot( + initialSnapshot: GenericStoredTopologyTransactions + )(implicit traceContext: TraceContext): GenericStoredTopologyTransactions = + StoredTopologyTransactions( + initialSnapshot.result + // first retain the global order of the topology transactions within the snapshot + .zipWithIndex + // Find the transaction entries with the same set of signing keys at the same sequenced timestamp. + // The problematic scenario above is only relevant for the genesis snapshot, in which all topology + // transactions have the same sequenced/effective time. + // However, for onboarding snapshots (no matter which node), we MUST not merge transactions from + // different timestamps, because each transaction may affect the epsilon tracker and therefore + // must be preserved. + // The grouping is done with the set of signatures and specifically not hashOfSignatures, + // because legacy transactions allowed multiple signatures with the same key, but due to signature deduplication, + // this could lead to transactions ending up being (silently) deduplicated due to the unique key in the database table. + // This causes a mismatch between the transactions-to-be-validated and the transactions-actually-persisted. + // For more details, see canton#27390 + .groupBy1 { case (tx, _idx) => + (tx.sequenced, tx.hash, tx.transaction.signatures.map(_.authorizingLongTermKey)) + } + .toSeq + .flatMap { case ((sequenced, _, _), transactions) => + // onboarding snapshots should only have a single transaction per bucket, because topology + // transactions are compacted (see `TopologyStateProcessor`) and the effective times are preserved. + + // genesis snapshots produced by canton (and not assembled manually by a user) do not contain + // rejected transactions. + + // NOTICE: given the above assumptions, there should not be a need for the partitioning of the + // topology transactions, but we keep it in to potentially handle a snapshot correctly that we wouldn't otherwise. + + // for all non-rejected transactions with the same hash of signatures, + // only retain a single entry + // * at the lowest index (ie earliest occurrence), + // * with validFrom of the lowest index (i.e. earliest occurrence), + // * with validUntil of the highest index (i.e. latest occurrence) + // + // All rejected transactions can stay in the snapshot as they are. + // + // Proposals do not need special treatment, because they should have + // different sets of signatures and not enough signatures to be fully authorized. + // Therefore, proposals should end up in separate groups of transactions, or + // they can be merged regardless. + val (nonRejected, rejected) = transactions.partition { case (tx, _idx) => + tx.rejectionReason.isEmpty + } + // only merge non-rejected transactions + val mergedNonRejected = NonEmpty + .from(nonRejected) + .map { nonRejectedNE => + val (txWithMinIndex, minIndex) = nonRejectedNE.minBy1 { case (tx_, idx) => idx } + val (txWithMaxIndex, _) = nonRejectedNE.maxBy1 { case (tx_, idx) => idx } + val retainedTransaction = + txWithMinIndex.copy(validUntil = txWithMaxIndex.validUntil) + if (nonRejectedNE.sizeIs > 1) { + logger.info(s"""Combining duplicate valid transactions at $sequenced + |originals: $nonRejected + |result : $retainedTransaction""".stripMargin) + } + ( + ( + retainedTransaction, + minIndex, + ), + ) + + } + .toList + // return the merged and the rejected transactions. + // sorting by index to retain the original order will happen afterwards + (mergedNonRejected ++ rejected) + } + // re-establish the original order by index + .sortBy { case (_tx, idx) => idx } + // throw away the index + .map { case (tx, _idx) => tx } + ) private def processTransactionsAtSequencedTime( sequenced: SequencedTime, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala index cb551329ec8e..b89a97c7b9e8 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TerminateProcessing.scala @@ -30,6 +30,8 @@ trait TerminateProcessing { def notifyUpgradeAnnouncement(successor: SynchronizerSuccessor)(implicit traceContext: TraceContext ): Unit + + def notifyUpgradeCancellation()(implicit traceContext: TraceContext): Unit } object TerminateProcessing { @@ -53,5 +55,7 @@ object TerminateProcessing { override def notifyUpgradeAnnouncement(successor: SynchronizerSuccessor)(implicit traceContext: TraceContext ): Unit = () + + override def notifyUpgradeCancellation()(implicit traceContext: TraceContext): Unit = () } } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala index 4a3be0e6e126..ba258ef0ee2a 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TopologyTransactionProcessor.scala @@ -32,6 +32,7 @@ import com.digitalasset.canton.topology.store.{TopologyStore, TopologyStoreId} import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction import com.digitalasset.canton.topology.transaction.{ SynchronizerUpgradeAnnouncement, + TopologyChangeOp, ValidatingTopologyMappingChecks, } import com.digitalasset.canton.topology.{ @@ -479,12 +480,14 @@ class TopologyTransactionProcessor( */ validUpgradeAnnouncements = validTransactions .mapFilter(_.selectMapping[SynchronizerUpgradeAnnouncement]) - .map(_.mapping) - // TODO(#26580) Handle cancellation - _ = validUpgradeAnnouncements.foreach(announcement => - terminateProcessing.notifyUpgradeAnnouncement(announcement.successor) - ) + _ = validUpgradeAnnouncements.foreach { announcement => + announcement.operation match { + case TopologyChangeOp.Replace => + terminateProcessing.notifyUpgradeAnnouncement(announcement.mapping.successor) + case TopologyChangeOp.Remove => terminateProcessing.notifyUpgradeCancellation() + } + } _ <- synchronizeWithClosing("notify-topology-transaction-observers")( MonadUtil.sequentialTraverse(listeners.get()) { listenerGroup => diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala index 4cdfb4118032..5b43c8692f6c 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala @@ -262,7 +262,7 @@ object TopologyTransactionRejection { s"The declared successor $successorSynchronizerId of synchronizer $currentSynchronizerId is not valid." override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = - TopologyManagerError.InvalidSynchronizerSuccessor.Reject( + TopologyManagerError.InvalidSynchronizerSuccessor.Reject.conflictWithCurrentPSId( currentSynchronizerId, successorSynchronizerId, ) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala index 8c18ea9f243a..97006f44b05f 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/ParticipantAttributes.scala @@ -8,6 +8,7 @@ import com.digitalasset.canton.data.CantonTimestamp final case class ParticipantAttributes( permission: ParticipantPermission, loginAfter: Option[CantonTimestamp] = None, + features: Seq[SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag] = Seq.empty, onboarding: Boolean = false, ) { def canConfirm: Boolean = permission.canConfirm && !onboarding diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala index cf4641540dc3..bda907630aee 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala @@ -21,6 +21,7 @@ import com.digitalasset.canton.data.{CantonTimestamp, SynchronizerSuccessor} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.networking.{Endpoint, UrlValidator} import com.digitalasset.canton.protocol.v30.Enums +import com.digitalasset.canton.protocol.v30.Enums.ParticipantFeatureFlag import com.digitalasset.canton.protocol.v30.NamespaceDelegation.Restriction import com.digitalasset.canton.protocol.v30.TopologyMapping.Mapping import com.digitalasset.canton.protocol.{ @@ -38,6 +39,7 @@ import com.digitalasset.canton.topology.transaction.DelegationRestriction.{ CanSignAllMappings, } import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.topology.transaction.SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag import com.digitalasset.canton.topology.transaction.TopologyMapping.RequiredAuth.* import com.digitalasset.canton.topology.transaction.TopologyMapping.{ Code, @@ -946,10 +948,15 @@ object PartyToKeyMapping extends TopologyMappingCompanion { } /** Participant synchronizer trust certificate + * @param featureFlags + * Protocol features supported by [[participantId]] on [[synchronizerId]]. Feature flags are used + * to add targeted support for a protocol feature or bugfix without requiring a new protocol + * version. Care must be taken to not create ledger forks when using such flags. */ final case class SynchronizerTrustCertificate( participantId: ParticipantId, synchronizerId: SynchronizerId, + featureFlags: Seq[ParticipantTopologyFeatureFlag] = Seq.empty, ) extends TopologyMapping { override def companion: SynchronizerTrustCertificate.type = SynchronizerTrustCertificate @@ -958,6 +965,7 @@ final case class SynchronizerTrustCertificate( v30.SynchronizerTrustCertificate( participantUid = participantId.uid.toProtoPrimitive, synchronizerId = synchronizerId.toProtoPrimitive, + featureFlags = featureFlags.map(_.toProtoV30), ) override def toProtoV30: v30.TopologyMapping = @@ -982,6 +990,40 @@ final case class SynchronizerTrustCertificate( } object SynchronizerTrustCertificate extends TopologyMappingCompanion { + final case class ParticipantTopologyFeatureFlag private (value: Int)( + name: Option[String] = None + ) { + def toProtoV30: v30.Enums.ParticipantFeatureFlag = + v30.Enums.ParticipantFeatureFlag.fromValue(value) + override def toString: String = name.getOrElse(s"UnrecognizedFeatureFlag($value)") + } + + object ParticipantTopologyFeatureFlag { + + /** Feature flag enabled when the participant supports the fix for a bug that incorrectly + * rejects externally signed transactions with a locally created contract used in a subview. + * See https://github.com/DACH-NY/canton/issues/27883 Used only in PV33. + */ + val ExternalSigningLocalContractsInSubview: ParticipantTopologyFeatureFlag = + ParticipantTopologyFeatureFlag( + v30.Enums.ParticipantFeatureFlag.PARTICIPANT_FEATURE_FLAG_PV33_EXTERNAL_SIGNING_LOCAL_CONTRACT_IN_SUBVIEW.value + )(Some("ExternalSigningLocalContractsInSubview")) + + val knownTopologyFeatureFlags: Seq[ParticipantTopologyFeatureFlag] = Seq( + ExternalSigningLocalContractsInSubview + ) + + def fromProtoV30( + valueP: v30.Enums.ParticipantFeatureFlag + ): Option[ParticipantTopologyFeatureFlag] = + knownTopologyFeatureFlags + .find(_.value == valueP.value) + .orElse( + Option.when(valueP != ParticipantFeatureFlag.PARTICIPANT_FEATURE_FLAG_UNSPECIFIED)( + ParticipantTopologyFeatureFlag(valueP.value)() + ) + ) + } def uniqueKey(participantId: ParticipantId, synchronizerId: SynchronizerId): MappingHash = TopologyMapping.buildUniqueKey(code)( @@ -999,9 +1041,11 @@ object SynchronizerTrustCertificate extends TopologyMappingCompanion { "participant_uid", ) synchronizerId <- SynchronizerId.fromProtoPrimitive(valueP.synchronizerId, "synchronizer_id") + featureFlags = valueP.featureFlags.flatMap(ParticipantTopologyFeatureFlag.fromProtoV30) } yield SynchronizerTrustCertificate( participantId, synchronizerId, + featureFlags, ) } @@ -1102,9 +1146,6 @@ final case class ParticipantSynchronizerPermission( override def companion: ParticipantSynchronizerPermission.type = ParticipantSynchronizerPermission - def toParticipantAttributes: ParticipantAttributes = - ParticipantAttributes(permission, loginAfter) - def toProto: v30.ParticipantSynchronizerPermission = v30.ParticipantSynchronizerPermission( synchronizerId = synchronizerId.toProtoPrimitive, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala index 06a0f6c5c775..642e4d737e9c 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala @@ -86,11 +86,26 @@ class ValidatingTopologyMappingChecks( s"The serial for a REPLACE must be less than ${PositiveInt.MaxValue}." ), ) + + def mappingMismatch(expected: TopologyMapping): Boolean = (toValidate.mapping, expected) match { + // When removing the synchronizer trust certificate, no need to mandate that the removal mapping has the same + // feature flags.. + case ( + removeCertificate: SynchronizerTrustCertificate, + inStoreCertificate: SynchronizerTrustCertificate, + ) => + removeCertificate.uniqueKey != inStoreCertificate.uniqueKey + case _ => + toValidate.mapping != expected + } + val checkRemoveDoesNotChangeMapping = EitherT.fromEither[FutureUnlessShutdown]( inStore .collect { case expected - if toValidate.operation == TopologyChangeOp.Remove && toValidate.mapping != expected.mapping => + if toValidate.operation == TopologyChangeOp.Remove && mappingMismatch( + expected.mapping + ) => TopologyTransactionRejection .RemoveMustNotChangeMapping(toValidate.mapping, expected.mapping) } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/version/ParticipantProtocolFeatureFlags.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/version/ParticipantProtocolFeatureFlags.scala new file mode 100644 index 000000000000..d8aaf86a9ef2 --- /dev/null +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/version/ParticipantProtocolFeatureFlags.scala @@ -0,0 +1,15 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.version + +import com.digitalasset.canton.topology.transaction.SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag + +object ParticipantProtocolFeatureFlags { + + /** Feature flags supported by participant node for each PV + */ + val supportedFeatureFlagsByPV: Map[ProtocolVersion, Set[ParticipantTopologyFeatureFlag]] = Map( + ProtocolVersion.v34 -> Set.empty + ) +} diff --git a/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml b/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml index be5518b29514..196d4c1fcd71 100644 --- a/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml +++ b/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: CantonExamples diff --git a/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sha256 b/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sha256 index 210cbd73c262..a7ad79e96645 100644 --- a/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sha256 +++ b/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sha256 @@ -1 +1 @@ -99f52a91b709655278167579f1848929420acb72eb5fbc996337096b951d0b47 +f5373e3920eab9bac1f24504c1a07f2f3d92437271a94f233e4034b88a308ca7 diff --git a/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql b/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql index 25a75866acc8..15e575c134d2 100644 --- a/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql +++ b/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql @@ -194,8 +194,8 @@ create table med_response_aggregations ( create table common_sequenced_events ( -- discriminate between different users of the sequenced events tables physical_synchronizer_idx integer not null, - -- Proto serialized signed message - sequenced_event bytea not null, + -- Proto serialized signed message with EXTERNAL TOAST storage strategy (compressed content is not attempted to be compressed again by PostgreSQL server) + sequenced_event bytea STORAGE EXTERNAL not null, -- Explicit fields to query the messages, which are stored as blobs type varchar collate "C" not null check(type in ('del', 'err', 'ign')), -- Timestamp of the time of change in microsecond precision relative to EPOCH diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/LedgerApiCryptoConversions.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/LedgerApiCryptoConversions.scala index 7c8c9fec2731..7b6044e36376 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/LedgerApiCryptoConversions.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/crypto/LedgerApiCryptoConversions.scala @@ -4,35 +4,37 @@ package com.digitalasset.canton.crypto import com.daml.ledger.api.v2 -import com.daml.ledger.api.v2.interactive.interactive_submission_service as iss +import com.daml.ledger.api.v2.crypto as lapiCrypto import io.scalaland.chimney.Transformer /** Utility methods to convert between Canton crypto classes and their equivalent on the ledger API. */ object LedgerApiCryptoConversions { implicit val cantonToLAPISignatureFormatTransformer - : Transformer[v30.SignatureFormat, iss.SignatureFormat] = { + : Transformer[v30.SignatureFormat, lapiCrypto.SignatureFormat] = { case v30.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED => - iss.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED - case v30.SignatureFormat.SIGNATURE_FORMAT_DER => iss.SignatureFormat.SIGNATURE_FORMAT_DER - case v30.SignatureFormat.SIGNATURE_FORMAT_CONCAT => iss.SignatureFormat.SIGNATURE_FORMAT_CONCAT - case v30.SignatureFormat.SIGNATURE_FORMAT_RAW => iss.SignatureFormat.SIGNATURE_FORMAT_RAW + lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED + case v30.SignatureFormat.SIGNATURE_FORMAT_DER => lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_DER + case v30.SignatureFormat.SIGNATURE_FORMAT_CONCAT => + lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_CONCAT + case v30.SignatureFormat.SIGNATURE_FORMAT_RAW => lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_RAW case v30.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC => - iss.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC + lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC case v30.SignatureFormat.Unrecognized(unrecognizedValue) => - iss.SignatureFormat.Unrecognized(unrecognizedValue) + lapiCrypto.SignatureFormat.Unrecognized(unrecognizedValue) } implicit val LAPIToCantonSignatureFormatTransformer - : Transformer[iss.SignatureFormat, v30.SignatureFormat] = { - case iss.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED => + : Transformer[lapiCrypto.SignatureFormat, v30.SignatureFormat] = { + case lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED => v30.SignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED - case iss.SignatureFormat.SIGNATURE_FORMAT_DER => v30.SignatureFormat.SIGNATURE_FORMAT_DER - case iss.SignatureFormat.SIGNATURE_FORMAT_CONCAT => v30.SignatureFormat.SIGNATURE_FORMAT_CONCAT - case iss.SignatureFormat.SIGNATURE_FORMAT_RAW => v30.SignatureFormat.SIGNATURE_FORMAT_RAW - case iss.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC => + case lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_DER => v30.SignatureFormat.SIGNATURE_FORMAT_DER + case lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_CONCAT => + v30.SignatureFormat.SIGNATURE_FORMAT_CONCAT + case lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_RAW => v30.SignatureFormat.SIGNATURE_FORMAT_RAW + case lapiCrypto.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC => v30.SignatureFormat.SIGNATURE_FORMAT_SYMBOLIC - case iss.SignatureFormat.Unrecognized(unrecognizedValue) => + case lapiCrypto.SignatureFormat.Unrecognized(unrecognizedValue) => v30.SignatureFormat.Unrecognized(unrecognizedValue) } diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/interactive/InteractiveSubmissionEnricher.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/interactive/InteractiveSubmissionEnricher.scala index 18acd00b6cb1..fd5dc29cf050 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/interactive/InteractiveSubmissionEnricher.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/interactive/InteractiveSubmissionEnricher.scala @@ -3,8 +3,11 @@ package com.digitalasset.canton.interactive +import cats.data.EitherT +import cats.implicits.catsSyntaxEitherId import com.digitalasset.canton.interactive.InteractiveSubmissionEnricher.PackageResolver import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.protocol.LfTemplateId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Ref.PackageId import com.digitalasset.daml.lf.engine.* @@ -42,19 +45,42 @@ class InteractiveSubmissionEnricher(engine: Engine, packageResolver: PackageReso /** Enrich FCI with type info and labels. Leave out trailing none fields. */ - def enrichContract(contract: FatContractInstance)(implicit + def enrichContract(contract: FatContractInstance, targetPackageIds: Set[PackageId])(implicit ec: ExecutionContext, traceContext: TraceContext, - ): FutureUnlessShutdown[FatContractInstance] = - consumeEnricherResult(enricher.enrichContract(contract)) + ): EitherT[FutureUnlessShutdown, String, FatContractInstance] = + EitherT(targetPackageIds.toList.minOption match { + case Some(pkgId) => + enrichCreateNode(contract.toCreateNode, pkgId).map { enriched => + FatContractInstance + .fromCreateNode( + enriched, + contract.createdAt, + contract.authenticationData, + ) + .asRight[String] + } + case None => + FutureUnlessShutdown.pure( + s"Cannot enrich contract ${contract.contractId} without knowing its package ID" + .asLeft[FatContractInstance] + ) + }) - /** Enrich create node with type info and labels. Leave out trailing none fields. - */ - def enrichCreateNode(create: Node.Create)(implicit + private def enrichCreateNode(original: Node.Create, targetPackageId: PackageId)(implicit ec: ExecutionContext, traceContext: TraceContext, - ): FutureUnlessShutdown[Node.Create] = - consumeEnricherResult(enricher.enrichCreate(create)) + ): FutureUnlessShutdown[Node.Create] = { + + def updateTemplateId(create: Node.Create, targetPackageId: PackageId): Node.Create = { + val templateId = LfTemplateId(targetPackageId, create.templateId.qualifiedName) + create.copy(templateId = templateId) + } + + consumeEnricherResult(enricher.enrichCreate(updateTemplateId(original, targetPackageId))).map( + enriched => updateTemplateId(enriched, original.templateId.packageId) + ) + } private[this] def consumeEnricherResult[V]( result: Result[V] diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala index 8c98d1296cc4..15030969968c 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerReadService.scala @@ -932,7 +932,7 @@ class GrpcTopologyManagerReadService( topologySnapshot = topologyClient.currentSnapshotApproximation _ <- EitherT.fromOptionF( - fopt = topologySnapshot.isSynchronizerUpgradeOngoing(), + fopt = topologySnapshot.synchronizerUpgradeOngoing(), ifNone = TopologyManagerError.NoOngoingSynchronizerUpgrade.Failure(): RpcError, ) } yield { diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerWriteService.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerWriteService.scala index 9240197e94a6..1ec9486805f2 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerWriteService.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/topology/admin/grpc/GrpcTopologyManagerWriteService.scala @@ -9,13 +9,11 @@ import cats.syntax.either.* import cats.syntax.parallel.* import cats.syntax.traverse.* import com.digitalasset.base.error.RpcError -import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.ProtoDeserializationError.{ FieldNotSet, ProtoDeserializationFailure, ValueConversionError, } -import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.crypto.* import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} @@ -38,6 +36,7 @@ import com.digitalasset.canton.topology.transaction.TopologyTransaction.TxHash import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil, GrpcStreamingUtils} import com.digitalasset.canton.version.{ProtocolVersion, ProtocolVersionValidation} +import com.digitalasset.canton.{ProtoDeserializationError, config} import com.google.protobuf.ByteString import com.google.protobuf.duration.Duration import io.grpc.stub.StreamObserver @@ -132,7 +131,9 @@ class GrpcTopologyManagerWriteService( waitToBecomeEffectiveO <- EitherT .fromEither[FutureUnlessShutdown]( waitToBecomeEffective - .traverse(NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective")) + .traverse( + config.NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective") + ) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) (op, serial, validatedMapping, signingKeys, forceChanges) = mapping @@ -208,7 +209,9 @@ class GrpcTopologyManagerWriteService( waitToBecomeEffectiveO <- EitherT .fromEither[FutureUnlessShutdown]( request.waitToBecomeEffective - .traverse(NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective")) + .traverse( + config.NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective") + ) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) _ <- addTransactions(signedTxs, request.store, forceChanges, waitToBecomeEffectiveO) @@ -250,7 +253,9 @@ class GrpcTopologyManagerWriteService( waitToBecomeEffectiveO <- EitherT .fromEither[FutureUnlessShutdown]( waitToBecomeEffectiveP - .traverse(NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective")) + .traverse( + config.NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective") + ) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) _ <- addTransactions(signedTxs, store, ForceFlags.all, waitToBecomeEffectiveO) @@ -296,7 +301,9 @@ class GrpcTopologyManagerWriteService( waitToBecomeEffectiveO <- EitherT .fromEither[FutureUnlessShutdown]( waitToBecomeEffectiveP - .traverse(NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective")) + .traverse( + config.NonNegativeFiniteDuration.fromProtoPrimitive("wait_to_become_effective") + ) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) _ <- addTransactions(signedTxs, store, ForceFlags.all, waitToBecomeEffectiveO) @@ -308,7 +315,7 @@ class GrpcTopologyManagerWriteService( signedTxs: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]], store: Option[v30.StoreId], forceChanges: ForceFlags, - waitToBecomeEffective: Option[NonNegativeFiniteDuration], + waitToBecomeEffective: Option[config.NonNegativeFiniteDuration], )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, RpcError, Unit] = @@ -396,7 +403,6 @@ class GrpcTopologyManagerWriteService( manager.managerVersion.serialization, existingTransaction, ) - .mapK(FutureUnlessShutdown.outcomeK) .leftWiden[RpcError] } yield transaction.toByteString -> transaction.hash.hash.getCryptographicEvidence } diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala index e506598bc232..d5443e596a6c 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/ConnectionPoolTestHelpers.scala @@ -189,7 +189,7 @@ trait ConnectionPoolTestHelpers { testTimeouts, loggerFactory, ) - val pool = poolFactory.create(config).valueOrFail("create connection pool") + val pool = poolFactory.create(config, name = "test").valueOrFail("create connection pool") val listener = new TestHealthListener(pool.health) pool.health.registerOnHealthChange(listener) @@ -202,21 +202,18 @@ trait ConnectionPoolTestHelpers { } protected def mkSubscriptionPoolConfig( - trustThreshold: PositiveInt, - livenessMargin: NonNegativeInt, + livenessMargin: NonNegativeInt ): SequencerSubscriptionPoolConfig = SequencerSubscriptionPoolConfig( - trustThreshold = trustThreshold, livenessMargin = livenessMargin, subscriptionRequestDelay = sequencerConnectionPoolDelays.subscriptionRequestDelay, ) protected def withSubscriptionPool[V]( - trustThreshold: PositiveInt, livenessMargin: NonNegativeInt, connectionPool: SequencerConnectionXPool, )(f: (SequencerSubscriptionPool, TestHealthListener) => V): V = { - val config = mkSubscriptionPoolConfig(trustThreshold, livenessMargin) + val config = mkSubscriptionPoolConfig(livenessMargin) val subscriptionPoolFactory = new SequencerSubscriptionPoolFactoryImpl( sequencerSubscriptionFactory = new TestSequencerSubscriptionXFactory(timeouts, loggerFactory), @@ -255,7 +252,7 @@ trait ConnectionPoolTestHelpers { ) { (connectionPool, _, _, _) => connectionPool.start().futureValueUS.valueOrFail("initialization") - withSubscriptionPool(trustThreshold, livenessMargin, connectionPool) { + withSubscriptionPool(livenessMargin, connectionPool) { (subscriptionPool, subscriptionPoolListener) => f(subscriptionPool, subscriptionPoolListener) } @@ -404,7 +401,8 @@ protected object ConnectionPoolTestHelpers { val createdConnections: CreatedConnections = connectionFactory.createdConnections override def create( - initialConfig: SequencerConnectionXPoolConfig + initialConfig: SequencerConnectionXPoolConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, @@ -431,6 +429,7 @@ protected object ConnectionPoolTestHelpers { sequencerConnections: SequencerConnections, expectedPSIdO: Option[PhysicalSynchronizerId], tracingConfig: TracingConfig, + name: String, )(implicit ec: ExecutionContextExecutor, esf: ExecutionSequencerFactory, diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala index acaa937e84dd..2a18e8910c61 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala @@ -7,10 +7,17 @@ import cats.data.EitherT import cats.syntax.either.* import cats.syntax.foldable.* import com.daml.metrics.api.MetricsContext +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.* import com.digitalasset.canton.concurrent.Threading +import com.digitalasset.canton.config as cantonConfig import com.digitalasset.canton.config.* -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong, PositiveInt} +import com.digitalasset.canton.config.RequireTypes.{ + NonNegativeInt, + NonNegativeLong, + Port, + PositiveInt, +} import com.digitalasset.canton.crypto.provider.symbolic.SymbolicCrypto import com.digitalasset.canton.crypto.{ Fingerprint, @@ -25,6 +32,7 @@ import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown, Un import com.digitalasset.canton.logging.pretty.{Pretty, PrettyInstances} import com.digitalasset.canton.logging.{LogEntry, NamedLoggerFactory, NamedLogging, TracedLogger} import com.digitalasset.canton.metrics.{CommonMockMetrics, TrafficConsumptionMetrics} +import com.digitalasset.canton.networking.Endpoint import com.digitalasset.canton.protocol.messages.{DefaultOpenEnvelope, UnsignedProtocolMessage} import com.digitalasset.canton.protocol.{ DynamicSynchronizerParametersLookup, @@ -39,7 +47,10 @@ import com.digitalasset.canton.sequencing.InternalSequencerConnectionX.{ ConnectionAttributes, SequencerConnectionXHealth, } -import com.digitalasset.canton.sequencing.SequencerConnectionXPool.SequencerConnectionXPoolError +import com.digitalasset.canton.sequencing.SequencerConnectionXPool.{ + SequencerConnectionXPoolConfig, + SequencerConnectionXPoolError, +} import com.digitalasset.canton.sequencing.client.SendAsyncClientError.SendAsyncClientResponseError import com.digitalasset.canton.sequencing.client.SequencedEventValidationError.PreviousTimestampMismatch import com.digitalasset.canton.sequencing.client.SequencerClient.CloseReason.{ @@ -87,7 +98,7 @@ import com.digitalasset.canton.topology.DefaultTestIdentities.{ participant1, } import com.digitalasset.canton.topology.client.{SynchronizerTopologyClient, TopologySnapshot} -import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} import com.digitalasset.canton.util.EitherTUtil import com.digitalasset.canton.util.PekkoUtil.syntax.* import com.digitalasset.canton.version.{ @@ -785,8 +796,9 @@ final class SequencerClientTest } else Set(upgradeTime.immediatePredecessor, upgradeTime, upgradeTime.immediateSuccessor) - acknowledgedTimestamps = env.transport.acknowledgedTimestamps - .get() ++ env.pool.acknowledgedTimestamps.get() + acknowledgedTimestamps = + if (env.useNewConnectionPool) env.pool.acknowledgedTimestamps.get + else env.transport.acknowledgedTimestamps.get _ = acknowledgedTimestamps shouldBe expectedAcknowledgedTimestamps @@ -1075,7 +1087,7 @@ final class SequencerClientTest val env = RichEnvFactory.create(useNewConnectionPoolO = Some(false)) val testF = for { _ <- env.subscribeAfter() - _ <- env.changeTransport(secondTransport) + _ <- env.changeTransport(secondTransport, None) } yield { val originalSubscriber = env.transport.subscriber.value originalSubscriber.request.timestamp shouldBe None @@ -1109,7 +1121,7 @@ final class SequencerClientTest _ <- env.transport.subscriber.value.sendToHandler(nextDeliver) _ <- env.client.flushClean() - _ <- env.changeTransport(secondTransport) + _ <- env.changeTransport(secondTransport, None) } yield { val originalSubscriber = env.transport.subscriber.value originalSubscriber.request.timestamp shouldBe None @@ -1129,7 +1141,7 @@ final class SequencerClientTest // TODO(i26481): Enable new connection pool (test uses changeTransport()) val env = RichEnvFactory.create(useNewConnectionPoolO = Some(false)) val testF = for { - _ <- env.changeTransport(secondTransport) + _ <- env.changeTransport(secondTransport, None) _ <- env.sendAsync(Batch.empty(testedProtocolVersion)).value } yield { env.transport.lastSend.get() shouldBe None @@ -1149,7 +1161,7 @@ final class SequencerClientTest // TODO(i26481): Enable new connection pool (test uses changeTransport()) val env = RichEnvFactory.create(useNewConnectionPoolO = Some(false)) val testF = for { - _ <- env.changeTransport(secondTransport) + _ <- env.changeTransport(secondTransport, None) _ <- env.logout().value } yield { env.transport.logoutCalled shouldBe false @@ -1167,7 +1179,7 @@ final class SequencerClientTest val env = RichEnvFactory.create(useNewConnectionPoolO = Some(false)) val testF = for { _ <- env.subscribeAfter() - _ <- env.changeTransport(secondTransport) + _ <- env.changeTransport(secondTransport, None) _ <- env.sendAsync(Batch.empty(testedProtocolVersion)).value } yield { env.transport.lastSend.get() shouldBe None @@ -1190,7 +1202,8 @@ final class SequencerClientTest SequencerAlias.tryCreate("somethingElse"), daSequencerId, secondTransport, - ) + ), + None, ) _ <- env.sendAsync(Batch.empty(testedProtocolVersion)).value } yield { @@ -1209,26 +1222,31 @@ final class SequencerClientTest ) val env = RichEnvFactory.create() - val testF = for { - _ <- env.subscribeAfter() - error <- loggerFactory - .assertLogs( - env - .changeTransport( - SequencerTransports.default( - secondSequencerId, - secondTransport, - ) - ), - _.errorMessage shouldBe "Adding or removing sequencer subscriptions is not supported at the moment", - ) - .failed - } yield { - error - } - testF.futureValueUS shouldBe an[IllegalArgumentException] - testF.futureValueUS.getMessage shouldBe "Adding or removing sequencer subscriptions is not supported at the moment" + // When using the connection pool, this test does not make sense + if (!env.useNewConnectionPool) { + val testF = for { + _ <- env.subscribeAfter() + error <- loggerFactory + .assertLogs( + env + .changeTransport( + SequencerTransports.default( + secondSequencerId, + secondTransport, + ), + None, + ), + _.errorMessage shouldBe "Adding or removing sequencer subscriptions is not supported at the moment", + ) + .failed + } yield { + error + } + + testF.futureValueUS shouldBe an[IllegalArgumentException] + testF.futureValueUS.getMessage shouldBe "Adding or removing sequencer subscriptions is not supported at the moment" + } env.client.close() } } @@ -1316,16 +1334,23 @@ final class SequencerClientTest ) def changeTransport( - newTransport: SequencerClientTransport & SequencerClientTransportPekko + newTransport: SequencerClientTransport & SequencerClientTransportPekko, + newConnectionPoolConfigO: Option[SequencerConnectionXPoolConfig], )(implicit ev: Client <:< RichSequencerClient): FutureUnlessShutdown[Unit] = changeTransport( - SequencerTransports.default(daSequencerId, newTransport) + SequencerTransports.default(daSequencerId, newTransport), + newConnectionPoolConfigO, ) - def changeTransport(sequencerTransports: SequencerTransports[?])(implicit + def changeTransport( + sequencerTransports: SequencerTransports[?], + newConnectionPoolConfigO: Option[SequencerConnectionXPoolConfig], + )(implicit ev: Client <:< RichSequencerClient ): FutureUnlessShutdown[Unit] = - ev(client).changeTransport(sequencerTransports) + ev(client) + .changeTransport(sequencerTransports, newConnectionPoolConfigO) + .valueOrFail("changeTransport") def sendAsync( batch: Batch[DefaultOpenEnvelope], @@ -1514,7 +1539,14 @@ final class SequencerClientTest override val health: SequencerConnectionXHealth = new SequencerConnectionXHealth.AlwaysValidated(s"$name-health", logger) - override def config: ConnectionXConfig = ??? + override def config: ConnectionXConfig = ConnectionXConfig( + name = name, + endpoint = Endpoint("dummy-endpoint", Port.tryCreate(0)), + transportSecurity = false, + customTrustCertificates = None, + expectedSequencerIdO = None, + tracePropagation = TracingConfig.Propagation.Disabled, + ) override def attributes: ConnectionAttributes = ConnectionAttributes( @@ -1631,7 +1663,13 @@ final class SequencerClientTest traceContext: TraceContext ): EitherT[FutureUnlessShutdown, SequencerConnectionXPoolError, Unit] = ??? - override def config: SequencerConnectionXPool.SequencerConnectionXPoolConfig = ??? + override def config: SequencerConnectionXPool.SequencerConnectionXPoolConfig = + SequencerConnectionXPoolConfig( + connections = NonEmpty(Seq, connection.config), + trustThreshold = PositiveInt.one, + minRestartConnectionDelay = cantonConfig.NonNegativeFiniteDuration.Zero, + maxRestartConnectionDelay = cantonConfig.NonNegativeFiniteDuration.Zero, + ) override def updateConfig(newConfig: SequencerConnectionXPool.SequencerConnectionXPoolConfig)( implicit traceContext: TraceContext diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala index b782ef8fea98..c0be0da679be 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/topology/GeneratorsTopology.scala @@ -5,14 +5,18 @@ package com.digitalasset.canton.topology import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.topology.transaction.SynchronizerTrustCertificate.ParticipantTopologyFeatureFlag import com.digitalasset.canton.version.ProtocolVersion import magnolify.scalacheck.auto.* -import org.scalacheck.Arbitrary +import org.scalacheck.{Arbitrary, Gen} final class GeneratorsTopology(protocolVersion: ProtocolVersion) { import com.digitalasset.canton.config.GeneratorsConfig.* import com.digitalasset.canton.Generators.* + implicit val unrecognizedFeatureFlagArb: Arbitrary[ParticipantTopologyFeatureFlag] = Arbitrary { + Gen.oneOf(ParticipantTopologyFeatureFlag.knownTopologyFeatureFlags) + } implicit val fingerprintArb: Arbitrary[Fingerprint] = Arbitrary( string68Arb.arbitrary.map(Fingerprint.tryFromString) ) diff --git a/sdk/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/infra/UseLedgerApiTestToolTest.scala b/sdk/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/infra/UseLedgerApiTestToolTest.scala index 16b048793994..fa651797ca9f 100644 --- a/sdk/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/infra/UseLedgerApiTestToolTest.scala +++ b/sdk/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/infra/UseLedgerApiTestToolTest.scala @@ -20,33 +20,30 @@ import scala.util.matching.Regex final class UseLedgerApiTestToolTest extends AnyFlatSpec with Matchers with BaseTest { private val versions = Seq( - "3.0.0-snapshot.20240209.12523.0.v2fa088f9", - "3.0.0-snapshot.20240212.12541.0.v38aabe2f", - "3.1.0-snapshot.20240404.13035.0.v35213e4a", - "3.1.0-snapshot.20240405.13039.0.vc7eec3f0", - "3.1.1-snapshot.20240407.13039.0.vc7eec3f0", - "3.1.10-snapshot.20240408.13039.0.vc7eec3f0", - "3.2.0-ad-hoc.20240827.13957.0.v70727775", - "3.2.0-snapshot.20240826.13949.0.ve03dfcdf", - "3.2.0-snapshot.20240828.13964.0.v26cc6ace", "3.3.0-snapshot.20250416.15779.0.v6cccc0c4", + "3.3.0-ad-hoc.20250905.16091.0.v704bf59d", + "3.3.0-snapshot.20251007.16123.0.v670c8fae", + "3.3.1-snapshot.20251007.16123.0.v670c8fae", // not existing just for testing patches + "3.3.10-snapshot.20251007.16123.0.v670c8fae", // not existing just for testing patches + "3.4.0-snapshot.20250429.15866.0.vc8f10812", + "3.4.0-snapshot.20251003.17075.0.v69d92264", "dev", ) "findAllReleases" should "find the major.minor.patch releases correctly" in { - findAllReleases(versions) shouldBe Seq("3.0.0", "3.1.0", "3.1.1", "3.1.10", "3.2.0", "3.3.0") + findAllReleases(versions) shouldBe Seq("3.3.0", "3.3.1", "3.3.10", "3.4.0") } "findMatchingVersions" should "find and sort (by date) all the versions matching the given release" in { - findMatchingVersions(versions, "3.1.0") shouldBe Seq( - "3.1.0-snapshot.20240404.13035.0.v35213e4a", - "3.1.0-snapshot.20240405.13039.0.vc7eec3f0", + findMatchingVersions(versions, "3.3.0") shouldBe Seq( + "3.3.0-snapshot.20250416.15779.0.v6cccc0c4", + "3.3.0-ad-hoc.20250905.16091.0.v704bf59d", + "3.3.0-snapshot.20251007.16123.0.v670c8fae", ) - findMatchingVersions(versions, "3.2.0") shouldBe Seq( - "3.2.0-snapshot.20240826.13949.0.ve03dfcdf", - "3.2.0-ad-hoc.20240827.13957.0.v70727775", - "3.2.0-snapshot.20240828.13964.0.v26cc6ace", + findMatchingVersions(versions, "3.4.0") shouldBe Seq( + "3.4.0-snapshot.20250429.15866.0.vc8f10812", + "3.4.0-snapshot.20251003.17075.0.v69d92264", ) } @@ -97,10 +94,14 @@ final class UseLedgerApiTestToolTest extends AnyFlatSpec with Matchers with Base val currentMajorMinor = ReleaseVersion(ReleaseVersion.current.major, ReleaseVersion.current.minor, 0) - val releaseVersions3 = allReleases.filter { case (major, _) => major == 3 } + // We only test releases starting from 3.3.0, as earlier releases are no longer supported + // TODO(#16458): update the earliest tested release once Canton 3 is stable + val releases33andLater = allReleases.filter { case (major, minor) => + major == 3 && minor >= 3 + } extractedVersions .filter(_ <= currentMajorMinor) - .map(_.majorMinor) should contain allElementsOf releaseVersions3.dropRight(1) + .map(_.majorMinor) should contain allElementsOf releases33andLater.dropRight(1) } } diff --git a/sdk/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiConformanceTest.scala b/sdk/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiConformanceTest.scala index 96a2f72bcb92..29a50dc2241e 100644 --- a/sdk/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiConformanceTest.scala +++ b/sdk/canton/community/conformance-testing/src/test/scala/com/digitalasset/canton/integration/tests/ledgerapi/LedgerApiConformanceTest.scala @@ -176,36 +176,6 @@ object LedgerApiConformanceBase { ) } -class MyTest extends TestByName(Seq("VettingIT")) -//class MyTest extends TestByName(Seq("VettingIT")) - -// Can be used to run a single conformance test locally during development, -// passing the test name via environment variable, e.g. -// -// $ LAPI_RUN_CONFORMANCE_TEST=EventQueryServiceIT:TXEventsByContractIdBasic sbt "conformance-testing/testOnly com.digitalasset.canton.integration.tests.ledgerapi.TestByEnvVar" -class TestByEnvVar extends TestByName(sys.env.get("LAPI_RUN_CONFORMANCE_TEST").toList) - -// Subclass this to provide mechanisms for running a specific tests by name. -class TestByName( - providedName: Seq[String], - synchronizers: Int = 1, - envDef: EnvironmentDefinition = EnvironmentDefinition.P1_S1M1, -) extends SingleVersionLedgerApiConformanceBase { - registerPlugin(new UsePostgres(loggerFactory)) - - override def connectedSynchronizersCount = synchronizers - override def environmentDefinition = envDef - .withSetup(setupLedgerApiConformanceEnvironment) - - "Ledger Api Test Tool" can { - providedName.foreach { testName => - s"run test = $testName" in { implicit env => - ledgerApiTestToolPlugin.runSuitesSerially(suites = testName, exclude = Nil) - } - } - } -} - trait LedgerApiShardedConformanceBase extends SingleVersionLedgerApiConformanceBase { override def connectedSynchronizersCount = 1 diff --git a/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml b/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml index 11df8c96a35a..5e2feff1483f 100644 --- a/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: ai-analysis diff --git a/sdk/canton/community/demo/src/main/daml/bank/daml.yaml b/sdk/canton/community/demo/src/main/daml/bank/daml.yaml index 6277c0b9d755..64c3af708c4f 100644 --- a/sdk/canton/community/demo/src/main/daml/bank/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/bank/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: bank diff --git a/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml b/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml index ae2142acf87a..390554eeee9c 100644 --- a/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: doctor diff --git a/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml b/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml index a58457dc395c..eca9aaa6c0cb 100644 --- a/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: health-insurance diff --git a/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml b/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml index f3d0bb83aaf0..fc83453de086 100644 --- a/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: medical-records diff --git a/sdk/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/daml.yaml b/sdk/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/daml.yaml index 3b60af625a85..fb8b8098e8dd 100644 --- a/sdk/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/daml.yaml +++ b/sdk/canton/community/ledger-api-bench-tool/src/main/daml/benchtool/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --enable-interfaces=yes name: benchtool-tests diff --git a/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/party_management_service.proto b/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/party_management_service.proto index 4223f1bed4d5..b40518158ae0 100644 --- a/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/party_management_service.proto +++ b/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/party_management_service.proto @@ -7,7 +7,6 @@ package com.daml.ledger.api.v2.admin; import "com/daml/ledger/api/v2/admin/object_meta.proto"; import "com/daml/ledger/api/v2/crypto.proto"; -import "com/daml/ledger/api/v2/interactive/interactive_submission_service.proto"; import "google/protobuf/field_mask.proto"; option csharp_namespace = "Com.Daml.Ledger.Api.V2.Admin"; @@ -78,6 +77,11 @@ service PartyManagementService { // It can optionally be hosted on other nodes (then called a multi-hosted party). // If hosted on additional nodes, explicit authorization of the hosting relationship must be performed on those nodes // before the party can be used. + // Decentralized namespaces are supported but must be provided fully authorized by their owners. + // The individual owner namespace transactions can be submitted in the same call (fully authorized as well). + // In the simple case of a non-multi hosted, non-decentralized party, the RPC will return once the party is + // effectively allocated and ready to use, similarly to the AllocateParty behavior. + // For more complex scenarios applications may need to query the party status explicitly (only through the admin API as of now). rpc AllocateExternalParty(AllocateExternalPartyRequest) returns (AllocateExternalPartyResponse); // Update selected modifiable participant-local attributes of a party details resource. @@ -202,7 +206,7 @@ message AllocateExternalPartyRequest { // Additional signatures for this transaction specifically // Use for transactions that require additional signatures beyond the namespace key signatures // e.g: PartyToKeyMapping must be signed by all registered keys - repeated interactive.Signature signatures = 2; + repeated Signature signatures = 2; } // TODO(#27670) support synchronizer aliases @@ -210,13 +214,21 @@ message AllocateExternalPartyRequest { // Required string synchronizer = 1; // TopologyTransactions to onboard the external party - // Must contain 3 signed transactions: NamespaceDelegation, PartyToKeyMapping, PartyToParticipant + // Can contain: + // - A namespace for the party. + // This can be either a single NamespaceDelegation, + // or DecentralizedNamespaceDefinition along with its authorized namespace owners in the form of NamespaceDelegations. + // May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + // - A PartyToKeyMapping to register the party's signing keys. + // May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + // - A PartyToParticipant to register the hosting relationship of the party. + // Must be provided. // Required repeated SignedTransaction onboarding_transactions = 2; // Optional signatures of the combined hash of all onboarding_transactions // This may be used instead of providing signatures on each individual transaction - repeated interactive.Signature multi_hash_signatures = 3; + repeated Signature multi_hash_signatures = 3; // The id of the ``Identity Provider`` // If not set, assume the party is managed by the default identity provider. diff --git a/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/crypto.proto b/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/crypto.proto index 39b2c1c353bc..76c71c1649c6 100644 --- a/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/crypto.proto +++ b/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/crypto.proto @@ -61,3 +61,49 @@ message SigningPublicKey { // The key specification SigningKeySpec key_spec = 3; } + +message Signature { + SignatureFormat format = 1; + + bytes signature = 2; + + // The fingerprint/id of the keypair used to create this signature and needed to verify. + string signed_by = 3; + + // The signing algorithm specification used to produce this signature + SigningAlgorithmSpec signing_algorithm_spec = 4; +} + +enum SigningAlgorithmSpec { + SIGNING_ALGORITHM_SPEC_UNSPECIFIED = 0; + + // EdDSA Signature based on Curve25519 with SHA-512 + // http://ed25519.cr.yp.to/ + SIGNING_ALGORITHM_SPEC_ED25519 = 1; + + // Elliptic Curve Digital Signature Algorithm with SHA256 + SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256 = 2; + + // Elliptic Curve Digital Signature Algorithm with SHA384 + SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_384 = 3; +} + +enum SignatureFormat { + SIGNATURE_FORMAT_UNSPECIFIED = 0; + + // Signature scheme specific signature format + // Legacy format no longer used, except for migrations + SIGNATURE_FORMAT_RAW = 1; + + // ASN.1 + DER-encoding of the `r` and `s` integers, as defined in https://datatracker.ietf.org/doc/html/rfc3279#section-2.2.3 + // Used for ECDSA signatures + SIGNATURE_FORMAT_DER = 2; + + // Concatenation of the integers `r || s` in little-endian form, as defined in https://datatracker.ietf.org/doc/html/rfc8032#section-3.3 + // Note that this is different from the format defined in IEEE P1363, which uses concatenation in big-endian form. + // Used for EdDSA signatures + SIGNATURE_FORMAT_CONCAT = 3; + + // Symbolic crypto, must only be used for testing + SIGNATURE_FORMAT_SYMBOLIC = 10000; +} diff --git a/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/interactive/interactive_submission_service.proto b/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/interactive/interactive_submission_service.proto index 8ff91f5229f6..91ee9148b2fe 100644 --- a/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/interactive/interactive_submission_service.proto +++ b/sdk/canton/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/interactive/interactive_submission_service.proto @@ -6,6 +6,7 @@ syntax = "proto3"; package com.daml.ledger.api.v2.interactive; import "com/daml/ledger/api/v2/commands.proto"; +import "com/daml/ledger/api/v2/crypto.proto"; import "com/daml/ledger/api/v2/interactive/interactive_submission_common_data.proto"; import "com/daml/ledger/api/v2/interactive/transaction/v1/interactive_submission_data.proto"; import "com/daml/ledger/api/v2/package_reference.proto"; @@ -100,6 +101,16 @@ message PrepareSubmissionRequest { // Optional MinLedgerTime min_ledger_time = 4; + // Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer specified in the `PrepareSubmissionResponse`. + // If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + // with a new valid max_record_time. + // Use this to limit the time-to-life of a prepared transaction, + // which is useful to know when it can definitely not be accepted + // anymore and resorting to preparing another transaction for the same + // intent is safe again. + // Optional + optional google.protobuf.Timestamp max_record_time = 11; + // Set of parties on whose behalf the command should be executed, if submitted. // If ledger API authorization is enabled, then the authorization metadata must authorize the sender of the request // to **read** (not act) on behalf of each of the given parties. This is because this RPC merely prepares a transaction @@ -173,52 +184,6 @@ message PrepareSubmissionResponse { optional string hashing_details = 4; } -message Signature { - SignatureFormat format = 1; - - bytes signature = 2; - - // The fingerprint/id of the keypair used to create this signature and needed to verify. - string signed_by = 3; - - // The signing algorithm specification used to produce this signature - SigningAlgorithmSpec signing_algorithm_spec = 4; -} - -enum SigningAlgorithmSpec { - SIGNING_ALGORITHM_SPEC_UNSPECIFIED = 0; - - // EdDSA Signature based on Curve25519 with SHA-512 - // http://ed25519.cr.yp.to/ - SIGNING_ALGORITHM_SPEC_ED25519 = 1; - - // Elliptic Curve Digital Signature Algorithm with SHA256 - SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256 = 2; - - // Elliptic Curve Digital Signature Algorithm with SHA384 - SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_384 = 3; -} - -enum SignatureFormat { - SIGNATURE_FORMAT_UNSPECIFIED = 0; - - // Signature scheme specific signature format - // Legacy format no longer used, except for migrations - SIGNATURE_FORMAT_RAW = 1; - - // ASN.1 + DER-encoding of the `r` and `s` integers, as defined in https://datatracker.ietf.org/doc/html/rfc3279#section-2.2.3 - // Used for ECDSA signatures - SIGNATURE_FORMAT_DER = 2; - - // Concatenation of the integers `r || s` in little-endian form, as defined in https://datatracker.ietf.org/doc/html/rfc8032#section-3.3 - // Note that this is different from the format defined in IEEE P1363, which uses concatenation in big-endian form. - // Used for EdDSA signatures - SIGNATURE_FORMAT_CONCAT = 3; - - // Symbolic crypto, must only be used for testing - SIGNATURE_FORMAT_SYMBOLIC = 10000; -} - // Signatures provided by a single party message SinglePartySignatures { // Submitting party @@ -500,6 +465,14 @@ message Metadata { // Contextual information needed to process the transaction but not signed, either because it's already indirectly // signed by signing the transaction, or because it doesn't impact the ledger state repeated GlobalKeyMappingEntry global_key_mapping = 8; + + // Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer `synchronizer_id`. + // If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + // with a new valid max_record_time. + // Unsigned in 3.3 to avoid a breaking protocol change + // Will be signed in 3.4+ + // Set max_record_time in the PreparedTransactionRequest to get this field set accordingly + optional uint64 max_record_time = 11; } /* diff --git a/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Party.scala b/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Party.scala index 471b7ef60c23..e524908ea21c 100644 --- a/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Party.scala +++ b/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/Party.scala @@ -3,10 +3,13 @@ package com.daml.ledger.api.testtool.infrastructure +import com.daml.ledger.api.v2.crypto as lapicrypto +import com.daml.ledger.api.v2.crypto.SignatureFormat.SIGNATURE_FORMAT_RAW import com.daml.ledger.javaapi.data.Party as ApiParty -import com.daml.nonempty.NonEmpty import com.digitalasset.canton.crypto.Fingerprint +import com.google.protobuf.ByteString +import java.security.{KeyPair, Signature} import scala.language.implicitConversions sealed trait Party { @@ -17,17 +20,34 @@ case class LocalParty(underlying: ApiParty, initialSynchronizers: List[String]) case class ExternalParty( underlying: ApiParty, initialSynchronizers: List[String], - signingFingerprints: NonEmpty[Seq[Fingerprint]], -) extends Party + signingFingerprint: Fingerprint, + signingKeyPair: KeyPair, +) extends Party { + def sign(data: ByteString): ByteString = { + val signatureInstance = Signature.getInstance("Ed25519") + signatureInstance.initSign(signingKeyPair.getPrivate) + signatureInstance.update(data.toByteArray) + ByteString.copyFrom(signatureInstance.sign()) + } + + def signProto(data: ByteString): lapicrypto.Signature = + lapicrypto.Signature( + format = SIGNATURE_FORMAT_RAW, + signature = sign(data), + signedBy = signingFingerprint.toProtoPrimitive, + signingAlgorithmSpec = lapicrypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, + ) +} object Party { def external( value: String, - signingFingerprints: NonEmpty[Seq[Fingerprint]], + signingFingerprint: Fingerprint, + signingKeyPair: KeyPair, initialSynchronizers: List[String] = List.empty, ): ExternalParty = - ExternalParty(new ApiParty(value), initialSynchronizers, signingFingerprints) + ExternalParty(new ApiParty(value), initialSynchronizers, signingFingerprint, signingKeyPair) def apply(value: String, initialSynchronizers: List[String] = List.empty): Party = LocalParty(new ApiParty(value), initialSynchronizers) diff --git a/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantTestContext.scala b/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantTestContext.scala index 3c7f7e310520..8a79bc3815e8 100644 --- a/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantTestContext.scala +++ b/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/ParticipantTestContext.scala @@ -70,6 +70,7 @@ import com.digitalasset.canton.util.MonadUtil import com.google.protobuf.ByteString import io.grpc.health.v1.health.HealthCheckResponse +import java.security.KeyPair import java.time.Instant import java.util.List as JList import scala.concurrent.{ExecutionContext, Future} @@ -180,9 +181,15 @@ trait ParticipantTestContext extends UserManagementTestContext { ): Future[Party] def allocateExternalPartyRequest( + keyPair: KeyPair, partyIdHint: Option[String] = None, - synchronizerId: String = "", - ): AllocateExternalPartyRequest + synchronizer: String = "", + ): Future[AllocateExternalPartyRequest] + + def generateExternalPartyTopologyRequest( + namespacePublicKey: Array[Byte], + partyIdHint: Option[String] = None, + ): Future[GenerateExternalPartyTopologyResponse] def allocateExternalParty( request: AllocateExternalPartyRequest, diff --git a/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/SingleParticipantTestContext.scala b/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/SingleParticipantTestContext.scala index 4ddad0f70bf8..2e310968b2bf 100644 --- a/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/SingleParticipantTestContext.scala +++ b/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/SingleParticipantTestContext.scala @@ -53,7 +53,6 @@ import com.daml.ledger.api.v2.event_query_service.{ GetEventsByContractIdRequest, GetEventsByContractIdResponse, } -import com.daml.ledger.api.v2.interactive.interactive_submission_service as iss import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ ExecuteSubmissionAndWaitForTransactionRequest, ExecuteSubmissionAndWaitForTransactionResponse, @@ -80,7 +79,7 @@ import com.daml.ledger.api.v2.transaction.Transaction import com.daml.ledger.api.v2.transaction_filter.* import com.daml.ledger.api.v2.transaction_filter.CumulativeFilter.IdentifierFilter import com.daml.ledger.api.v2.update_service.* -import com.daml.ledger.api.v2.value as v1 +import com.daml.ledger.api.v2.{crypto as lapicrypto, value as v1} import com.daml.ledger.javaapi.data.codegen.{ContractCompanion, ContractId, Exercised, Update} import com.daml.ledger.javaapi.data.{ Command, @@ -91,24 +90,12 @@ import com.daml.ledger.javaapi.data.{ Value, } import com.daml.logging.{ContextualizedLogger, LoggingContext} -import com.daml.nonempty.NonEmpty import com.daml.timer.Delayed import com.digitalasset.base.error.ErrorCode -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.{DefaultProcessingTimeouts, ProcessingTimeout} -import com.digitalasset.canton.crypto.LedgerApiCryptoConversions.* -import com.digitalasset.canton.interactive.ExternalPartyUtils import com.digitalasset.canton.ledger.api.TransactionShape import com.digitalasset.canton.ledger.api.TransactionShape.{AcsDelta, LedgerEffects, toProto} -import com.digitalasset.canton.logging.SuppressingLogger -import com.digitalasset.canton.time.{NonNegativeFiniteDuration, WallClock} -import com.digitalasset.canton.topology.{ - ExternalParty as CantonExternalParty, - ParticipantId, - PartyId, - UniqueIdentifier, -} -import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.time.NonNegativeFiniteDuration +import com.digitalasset.canton.topology.{PartyId, UniqueIdentifier} import com.digitalasset.canton.util.{MonadUtil, OptionUtil} import com.google.protobuf.ByteString import io.grpc.StatusRuntimeException @@ -117,6 +104,7 @@ import io.grpc.protobuf.StatusProto import io.grpc.stub.StreamObserver import io.scalaland.chimney.dsl.* +import java.security.{KeyPair, KeyPairGenerator, Signature} import java.time.{Clock, Instant} import java.util.List as JList import scala.concurrent.duration.DurationInt @@ -142,8 +130,7 @@ final class SingleParticipantTestContext private[participant] ( val features: Features, val participantId: String, )(protected[participant] implicit val ec: ExecutionContext) - extends ParticipantTestContext - with ExternalPartyUtils { + extends ParticipantTestContext { private val logger = ContextualizedLogger.get(getClass) private[this] val identifierPrefix = @@ -312,29 +299,66 @@ final class SingleParticipantTestContext private[participant] ( override def allocateParty(): Future[Party] = allocateParty(partyIdHint = Some(nextPartyHintId())) + override def generateExternalPartyTopologyRequest( + namespacePublicKey: Array[Byte], + partyIdHint: Option[String] = None, + ): Future[GenerateExternalPartyTopologyResponse] = + for { + syncIds <- getConnectedSynchronizers(None, None) + syncId = syncIds.headOption.getOrElse(throw new Exception("No synchronizer connected")) + onboardingTransactions <- generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId, + partyHint = partyIdHint.getOrElse(nextPartyHintId()), + publicKey = Some( + lapicrypto.SigningPublicKey( + format = + lapicrypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO, + keyData = ByteString.copyFrom(namespacePublicKey), + keySpec = lapicrypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_CURVE25519, + ) + ), + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = Seq(), + confirmationThreshold = 1, + observingParticipantUids = Seq(), + ) + ) + } yield onboardingTransactions + override def allocateExternalPartyRequest( + keyPair: KeyPair, partyIdHint: Option[String] = None, synchronizer: String = "", - ): AllocateExternalPartyRequest = { - val (onboardingTransactions, _) = generateExternalPartyOnboardingTransactions( - partyIdHint.getOrElse(nextPartyHintId()), - Seq(ParticipantId.tryFromProtoPrimitive(s"PAR::$participantId")), - shareNamespaceAndSigningKey = true, - ) - AllocateExternalPartyRequest( - synchronizer = synchronizer, - onboardingTransactions = onboardingTransactions.transactionsWithSingleSignature.map { - case (transaction, signatures) => + ): Future[AllocateExternalPartyRequest] = { + val signing = Signature.getInstance("Ed25519") + signing.initSign(keyPair.getPrivate) + for { + onboardingTransactions <- generateExternalPartyTopologyRequest( + keyPair.getPublic.getEncoded, + partyIdHint, + ) + } yield { + signing.update(onboardingTransactions.multiHash.toByteArray) + AllocateExternalPartyRequest( + synchronizer = synchronizer, + onboardingTransactions = onboardingTransactions.topologyTransactions.map { transaction => AllocateExternalPartyRequest.SignedTransaction( - transaction.getCryptographicEvidence, - signatures.map(_.toProtoV30.transformInto[iss.Signature]), + transaction, + Seq.empty, ) - }, - multiHashSignatures = onboardingTransactions.multiTransactionSignatures.map( - _.toProtoV30.transformInto[iss.Signature] - ), - identityProviderId = "", - ) + }, + multiHashSignatures = Seq( + lapicrypto.Signature( + format = lapicrypto.SignatureFormat.SIGNATURE_FORMAT_RAW, + signature = ByteString.copyFrom(signing.sign()), + signedBy = onboardingTransactions.publicKeyFingerprint, + signingAlgorithmSpec = lapicrypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, + ) + ), + identityProviderId = "", + ) + } } override def allocateExternalParty( @@ -353,7 +377,9 @@ final class SingleParticipantTestContext private[participant] ( override def allocateExternalPartyFromHint( partyIdHint: Option[String], minSynchronizers: Int, - ): Future[ExternalParty] = + ): Future[ExternalParty] = { + val keyGen = KeyPairGenerator.getInstance("Ed25519") + val keyPair = keyGen.generateKeyPair() for { connectedSynchronizerIds <- connectedSynchronizers() result <- MonadUtil.foldLeftM[Future, Option[AllocateExternalPartyResponse], String]( @@ -365,23 +391,23 @@ final class SingleParticipantTestContext private[participant] ( .map(_.partyId) .map(PartyId.tryFromProtoPrimitive) .map(_.identifier.unwrap) - services.partyManagement - .allocateExternalParty( - allocateExternalPartyRequest( - partyIdHint - // otherwise use the dynamically generated party id as the party id hint, to allocate - // the same party across all synchronizers - .orElse(previouslyAllocatedPartyIdHint), - synchronizer = synchronizerId, - ) - ) + allocateExternalPartyRequest( + keyPair, + partyIdHint + // otherwise use the dynamically generated party id as the party id hint, to allocate + // the same party across all synchronizers + .orElse(previouslyAllocatedPartyIdHint), + synchronizer = synchronizerId, + ).flatMap(services.partyManagement.allocateExternalParty) .map(Some(_)) } } yield Party.external( result.head.partyId, - NonEmpty.mk(Seq, UniqueIdentifier.tryFromProtoPrimitive(result.head.partyId).fingerprint), + UniqueIdentifier.tryFromProtoPrimitive(result.head.partyId).fingerprint, + keyPair, connectedSynchronizerIds.toList, ) + } override def allocateParty( partyIdHint: Option[String] = None, @@ -1189,31 +1215,24 @@ final class SingleParticipantTestContext private[participant] ( packageIdSelectionPreference = Seq.empty, verboseHashing = false, prefetchContractKeys = Seq.empty, + maxRecordTime = Option.empty, ) override def executeSubmissionRequest( party: ExternalParty, preparedTx: PrepareSubmissionResponse, ): ExecuteSubmissionRequest = { - import com.digitalasset.canton.crypto.LedgerApiCryptoConversions.* - import io.scalaland.chimney.dsl.* - val signature = signTxAs( - preparedTx.preparedTransactionHash, - CantonExternalParty( - partyId = PartyId.tryFromProtoPrimitive(party.getValue), - signingFingerprints = party.signingFingerprints, - ), - ) + val signature = party.signProto(preparedTx.preparedTransactionHash) ExecuteSubmissionRequest( preparedTransaction = preparedTx.preparedTransaction, partySignatures = Some( PartySignatures( - signature.toSeq.map { case (partyId, signatures) => + Seq( SinglePartySignatures( - partyId.toProtoPrimitive, - signatures.map(_.toProtoV30.transformInto[iss.Signature]), + party.underlying.getValue, + Seq(signature), ) - } + ) ) ), deduplicationPeriod = ExecuteSubmissionRequest.DeduplicationPeriod.Empty, @@ -1469,11 +1488,4 @@ final class SingleParticipantTestContext private[participant] ( NonNegativeFiniteDuration.tryCreate( features.offsetCheckpoint.getMaxOffsetCheckpointEmissionDelay.asJava ) - - override val externalPartyExecutionContext: ExecutionContext = ec - override implicit protected val traceContext: TraceContext = TraceContext.empty - override val loggerFactory: SuppressingLogger = SuppressingLogger(getClass) - override val futureSupervisor: FutureSupervisor = FutureSupervisor.Noop - override protected def timeouts: ProcessingTimeout = DefaultProcessingTimeouts.testing - override val wallClock: WallClock = new WallClock(timeouts, loggerFactory) } diff --git a/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/TimeoutParticipantTestContext.scala b/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/TimeoutParticipantTestContext.scala index af161750a4dc..f6f76b6e2ca1 100644 --- a/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/TimeoutParticipantTestContext.scala +++ b/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/infrastructure/participant/TimeoutParticipantTestContext.scala @@ -67,6 +67,7 @@ import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.google.protobuf.ByteString import io.grpc.health.v1.health.HealthCheckResponse +import java.security.KeyPair import java.time.Instant import java.util.List as JList import java.util.concurrent.TimeoutException @@ -229,10 +230,19 @@ class TimeoutParticipantTestContext(timeoutScaleFactor: Double, delegate: Partic ) override def allocateExternalPartyRequest( + keyPair: KeyPair, partyIdHint: Option[String] = None, - synchronizerId: String = "", - ): AllocateExternalPartyRequest = - delegate.allocateExternalPartyRequest(partyIdHint, synchronizerId) + synchronizer: String = "", + ): Future[AllocateExternalPartyRequest] = + delegate.allocateExternalPartyRequest(keyPair, partyIdHint, synchronizer) + + override def generateExternalPartyTopologyRequest( + namespacePublicKey: Array[Byte], + partyIdHint: Option[String] = None, + ): Future[GenerateExternalPartyTopologyResponse] = withTimeout( + s"Generate topology transactions to allocate external party $partyIdHint", + delegate.generateExternalPartyTopologyRequest(namespacePublicKey, partyIdHint), + ) override def allocateParty( partyIdHint: Option[String] = None, diff --git a/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementServiceIT.scala b/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementServiceIT.scala index 3d137e7b3566..445d70c48b47 100644 --- a/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementServiceIT.scala +++ b/sdk/canton/community/ledger-test-tool/suites/lf-v2.1/src/main/scala/com/daml/ledger/api/testtool/suites/v2_1/PartyManagementServiceIT.scala @@ -21,7 +21,6 @@ import com.daml.ledger.api.v2.admin.party_management_service.{ UpdatePartyIdentityProviderIdRequest, } import com.daml.ledger.api.v2.crypto as lapicrypto -import com.daml.ledger.api.v2.interactive.interactive_submission_service import com.daml.ledger.javaapi.data.Party as ApiParty import com.daml.ledger.test.java.model.test.Dummy import com.digitalasset.canton.ledger.error.groups.{AdminServiceErrors, RequestValidationErrors} @@ -1101,12 +1100,11 @@ final class PartyManagementServiceIT extends PartyManagementITBase { .SignedTransaction(transaction = x, signatures = Seq.empty) ), multiHashSignatures = Seq( - interactive_submission_service.Signature( - format = interactive_submission_service.SignatureFormat.SIGNATURE_FORMAT_RAW, + lapicrypto.Signature( + format = lapicrypto.SignatureFormat.SIGNATURE_FORMAT_RAW, signature = ByteString.copyFrom(signing.sign()), signedBy = response.publicKeyFingerprint, - signingAlgorithmSpec = - interactive_submission_service.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, + signingAlgorithmSpec = lapicrypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, ) ), identityProviderId = "", diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/InteractiveSubmissionService.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/InteractiveSubmissionService.scala index e0d0cdc44953..2523c5093e32 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/InteractiveSubmissionService.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/InteractiveSubmissionService.scala @@ -27,7 +27,11 @@ import com.digitalasset.canton.version.HashingSchemeVersion import com.digitalasset.daml.lf.data.Ref.{SubmissionId, UserId} object InteractiveSubmissionService { - final case class PrepareRequest(commands: Commands, verboseHashing: Boolean) + final case class PrepareRequest( + commands: Commands, + verboseHashing: Boolean, + maxRecordTime: Option[LfTimestamp], + ) final case class ExecuteRequest( userId: UserId, diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/util.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/util.scala index b58cd0ddb14a..18a3e616a841 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/util.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/util.scala @@ -442,7 +442,7 @@ object UploadDarVettingChange { } } -sealed trait VettedPackagesRef { +sealed trait VettedPackagesRef extends PrettyPrinting { def toProtoLAPI: package_management_service.VettedPackagesRef def findMatchingPackages(metadata: PackageMetadata): Either[String, NonEmpty[Set[Ref.PackageId]]] } @@ -462,6 +462,9 @@ object VettedPackagesRef { } else { Right(NonEmpty(Set, id)) } + + override protected def pretty: Pretty[Id] = + prettyOfString(id => s"package-id: ${id.id.singleQuoted}") } final case class NameAndVersion( @@ -475,7 +478,6 @@ object VettedPackagesRef { version.toString, ) - // TODO(#27499): Stop relying on `(name, version) -> id` injection def findMatchingPackages( metadata: PackageMetadata ): Either[String, NonEmpty[Set[Ref.PackageId]]] = @@ -498,6 +500,11 @@ object VettedPackagesRef { case Some(ne) => Right(ne) } } + + override protected def pretty: Pretty[NameAndVersion] = prettyOfClass( + param("name", _.name), + param("version", _.version), + ) } final case class All( @@ -526,6 +533,13 @@ object VettedPackagesRef { ) } } + + override protected def pretty: Pretty[All] = + prettyOfClass( + param("id", _.id), + param("name", _.name), + param("version", _.version), + ) } final case class Name( @@ -541,6 +555,9 @@ object VettedPackagesRef { case None => Left(s"No packages with name $name") case Some(packageResolution) => Right(packageResolution.allPackageIdsForName) } + + override protected def pretty: Pretty[Name] = + prettyOfString(name => s"package-name: ${name.name.singleQuoted}") } private def parseWith[A]( diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala index e49ef9185334..5e096953995e 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala @@ -17,6 +17,7 @@ import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ PrepareSubmissionRequest, } import com.daml.ledger.api.v2.reassignment_commands.{ReassignmentCommand, ReassignmentCommands} +import com.digitalasset.canton.LfTimestamp import com.digitalasset.canton.data.{DeduplicationPeriod, Offset} import com.digitalasset.canton.ledger.api.messages.command.submission import com.digitalasset.canton.ledger.api.util.{DurationConversion, TimestampConversion} @@ -258,6 +259,18 @@ final class CommandsValidator( } yield ledgerEffectiveTimestamp + def validateLfTime(protoTimestamp: com.google.protobuf.timestamp.Timestamp)(implicit + errorLoggingContext: ErrorLoggingContext + ): Either[StatusRuntimeException, LfTimestamp] = + LfTimestamp + .fromInstant(TimestampConversion.toInstant(protoTimestamp)) + .left + .map(_ => + invalidArgument( + s"Can not represent ledger time $protoTimestamp as a Daml timestamp" + ) + ) + // Public because it is used by Canton. def validateInnerCommands( commands: Seq[Command] diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CryptoValidator.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CryptoValidator.scala index 463b278af186..f7c01fca83db 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CryptoValidator.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CryptoValidator.scala @@ -4,10 +4,10 @@ package com.digitalasset.canton.ledger.api.validation import cats.syntax.either.* -import com.daml.ledger.api.v2.interactive.interactive_submission_service as iss -import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ - Signature as InteractiveSignature, - SignatureFormat as InteractiveSignatureFormat, +import com.daml.ledger.api.v2.crypto +import com.daml.ledger.api.v2.crypto.{ + Signature as LAPISignature, + SignatureFormat as LAPISignatureFormat, } import com.digitalasset.canton.crypto.{ Fingerprint, @@ -24,13 +24,13 @@ import scala.annotation.nowarn object CryptoValidator { def validateSignature( - issSignatureP: iss.Signature, + cryptoSignatureP: crypto.Signature, fieldName: String, )(implicit errorLoggingContext: ErrorLoggingContext ): Either[StatusRuntimeException, Signature] = { - val InteractiveSignature(formatP, signatureP, signedByP, signingAlgorithmSpecP) = - issSignatureP + val LAPISignature(formatP, signatureP, signedByP, signingAlgorithmSpecP) = + cryptoSignatureP for { format <- validateSignatureFormat(formatP, "format") signature = signatureP @@ -42,35 +42,35 @@ object CryptoValidator { } private def validateSignatureFormat( - formatP: InteractiveSignatureFormat, + formatP: LAPISignatureFormat, fieldName: String, )(implicit errorLoggingContext: ErrorLoggingContext ): Either[StatusRuntimeException, SignatureFormat] = formatP match { - case InteractiveSignatureFormat.SIGNATURE_FORMAT_DER => Right(SignatureFormat.Der) - case InteractiveSignatureFormat.SIGNATURE_FORMAT_CONCAT => Right(SignatureFormat.Concat) - case InteractiveSignatureFormat.SIGNATURE_FORMAT_RAW => + case LAPISignatureFormat.SIGNATURE_FORMAT_DER => Right(SignatureFormat.Der) + case LAPISignatureFormat.SIGNATURE_FORMAT_CONCAT => Right(SignatureFormat.Concat) + case LAPISignatureFormat.SIGNATURE_FORMAT_RAW => Right(SignatureFormat.Raw: @nowarn("msg=Raw in object SignatureFormat is deprecated")) - case InteractiveSignatureFormat.SIGNATURE_FORMAT_SYMBOLIC => Right(SignatureFormat.Symbolic) - case InteractiveSignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED => + case LAPISignatureFormat.SIGNATURE_FORMAT_SYMBOLIC => Right(SignatureFormat.Symbolic) + case LAPISignatureFormat.SIGNATURE_FORMAT_UNSPECIFIED => Left(invalidField(fieldName, message = "Signature format must be specified")) - case other: InteractiveSignatureFormat.Unrecognized => + case other: LAPISignatureFormat.Unrecognized => Left(invalidField(fieldName, message = s"Signing algorithm spec $other not supported")) } private def validateSigningAlgorithmSpec( - signingAlgorithmSpecP: iss.SigningAlgorithmSpec, + signingAlgorithmSpecP: crypto.SigningAlgorithmSpec, fieldName: String, )(implicit errorLoggingContext: ErrorLoggingContext ): Either[StatusRuntimeException, SigningAlgorithmSpec] = signingAlgorithmSpecP match { - case iss.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519 => + case crypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519 => Right(SigningAlgorithmSpec.Ed25519) - case iss.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256 => + case crypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_256 => Right(SigningAlgorithmSpec.EcDsaSha256) - case iss.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_384 => + case crypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_EC_DSA_SHA_384 => Right(SigningAlgorithmSpec.EcDsaSha384) case other => Left(invalidField(fieldName, message = s"Signing algorithm spec $other not supported")) diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidator.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidator.scala index a1fd35e14ebb..284537b05dbc 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidator.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidator.scala @@ -15,6 +15,7 @@ import com.digitalasset.base.error.RpcError import com.digitalasset.canton.crypto.Signature import com.digitalasset.canton.ledger.api.SubmissionIdGenerator import com.digitalasset.canton.ledger.api.messages.command.submission +import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService.ExecuteRequest import com.digitalasset.canton.ledger.api.validation.ValueValidator.* import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors @@ -56,7 +57,7 @@ class SubmitRequestValidator( maxDeduplicationDuration: Duration, )(implicit errorLoggingContext: ErrorLoggingContext - ): Either[StatusRuntimeException, submission.SubmitRequest] = + ): Either[StatusRuntimeException, InteractiveSubmissionService.PrepareRequest] = for { validatedCommands <- commandsValidator.validatePrepareRequest( req, @@ -64,7 +65,12 @@ class SubmitRequestValidator( currentUtcTime, maxDeduplicationDuration, ) - } yield submission.SubmitRequest(validatedCommands) + maxRecordTime <- req.maxRecordTime.traverse(commandsValidator.validateLfTime) + } yield InteractiveSubmissionService.PrepareRequest( + validatedCommands, + req.verboseHashing, + maxRecordTime, + ) private def validatePartySignatures( proto: PartySignatures diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala index 2bc6cf15dfed..5eb32fba1f4b 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.ledger.participant.state import com.daml.logging.entries.{LoggingValue, ToLoggingValue} +import com.digitalasset.canton.LfTimestamp import com.digitalasset.canton.crypto.Signature import com.digitalasset.canton.data.DeduplicationPeriod import com.digitalasset.canton.ledger.participant.state.SubmitterInfo.ExternallySignedSubmission @@ -73,6 +74,8 @@ final case class SubmitterInfo( } object SubmitterInfo { + import com.digitalasset.canton.ledger.api.Commands.`Timestamp to LoggingValue` + implicit val `ExternallySignedSubmission to LoggingValue` : ToLoggingValue[ExternallySignedSubmission] = { case ExternallySignedSubmission( @@ -80,12 +83,14 @@ object SubmitterInfo { signatures, transactionUUID, mediatorGroup, + maxRecordTimeO, ) => LoggingValue.Nested.fromEntries( "version" -> version.index, "signatures" -> signatures.keys.map(_.toProtoPrimitive), "transactionUUID" -> transactionUUID.toString, "mediatorGroup" -> mediatorGroup.toString, + "maxRecordTimeO" -> maxRecordTimeO, ) } implicit val `SubmitterInfo to LoggingValue`: ToLoggingValue[SubmitterInfo] = { @@ -114,6 +119,7 @@ object SubmitterInfo { signatures: Map[PartyId, Seq[Signature]], transactionUUID: UUID, mediatorGroup: MediatorGroupIndex, + maxRecordTimeO: Option[LfTimestamp], ) } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiInteractiveSubmissionService.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiInteractiveSubmissionService.scala index a28536625662..4d83ca544c2c 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiInteractiveSubmissionService.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiInteractiveSubmissionService.scala @@ -24,12 +24,8 @@ import com.daml.ledger.api.v2.package_reference.PackageReference import com.daml.metrics.Timed import com.daml.tracing.Telemetry import com.digitalasset.canton.ledger.api.grpc.GrpcApiService -import com.digitalasset.canton.ledger.api.messages.command.submission.SubmitRequest import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService -import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService.{ - ExecuteRequest, - PrepareRequest, -} +import com.digitalasset.canton.ledger.api.services.InteractiveSubmissionService.ExecuteRequest import com.digitalasset.canton.ledger.api.validation.{ CommandsValidator, GetPreferredPackagesRequestValidator, @@ -108,9 +104,6 @@ class ApiInteractiveSubmissionService( maxDeduplicationDuration = maxDeduplicationDuration, )(errorLogger), ) - .map { case SubmitRequest(commands) => - PrepareRequest(commands, request.value.verboseHashing) - } .fold( t => FutureUnlessShutdown.failed(ValidationLogger.logFailureWithTrace(logger, request, t)), diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementService.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementService.scala index 9b46642c871e..f2196681a611 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementService.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementService.scala @@ -243,7 +243,10 @@ private[apiserver] final class ApiPartyManagementService private ( key: PartyAllocation.TrackerKey )(implicit errorLogger: ErrorLoggingContext): StatusRuntimeException = CommonErrors.RequestAlreadyInFlight - .Reject(requestId = key.submissionId) + .Reject( + requestId = key.submissionId, + details = s"Party ${key.party} is in the process of being allocated on this node.", + ) .asGrpcError } @@ -775,12 +778,16 @@ private[apiserver] final class ApiPartyManagementService private ( for { transaction <- TopologyTransaction .fromByteString( + // TODO(i27619): We may be able to not validate the protocol version here + // depending on the trust we put in the input + // Note that pinning to a protocol version makes it not possible to use transactions + // generated with an earlier protocol version (e.g in between synchronizer updates) ProtocolVersionValidation(protocolVersion), signedTransaction.transaction, ) .leftMap(error => ValidationErrors.invalidField( - "onboarding_transaction.transaction", + "onboarding_transactions.transaction", s"Invalid transaction: ${error.message}", ) ) @@ -788,7 +795,7 @@ private[apiserver] final class ApiPartyManagementService private ( .selectOp[TopologyChangeOp.Replace] .toRight( ValidationErrors.invalidField( - "onboarding_transaction.transaction", + "onboarding_transactions.transaction", s"Onboarding topology transactions must be Replace operations", ) ) @@ -796,7 +803,7 @@ private[apiserver] final class ApiPartyManagementService private ( positiveTransaction.serial == PositiveInt.one, (), ValidationErrors.invalidField( - "onboarding_transaction.transaction.serial", + "onboarding_transactions.transaction.serial", "Onboarding transaction serial must be 1", ), ) @@ -809,7 +816,6 @@ private[apiserver] final class ApiPartyManagementService private ( request: AllocateExternalPartyRequest ): Future[AllocateExternalPartyResponse] = { implicit val loggingContext = LoggingContextWithTrace(telemetry)(this.loggingContext) - logger.info("Starting external party allocation") implicit val errorLoggingContext: ErrorLoggingContext = ErrorLoggingContext(logger, loggingContext.toPropertiesMap, loggingContext.traceContext) import com.digitalasset.canton.config.NonNegativeFiniteDuration @@ -823,18 +829,25 @@ private[apiserver] final class ApiPartyManagementService private ( ) protocolVersion <- syncService .protocolVersionForSynchronizerId(synchronizerId) - .toRight(ValidationErrors.invalidArgument("No valid synchronizer found.")) - signedTransactions <- request.onboardingTransactions.toList.traverse( + .toRight( + ValidationErrors.invalidArgument( + s"This node is not connected to the requested synchronizer $synchronizerId." + ) + ) + transactionsWithSignatures <- request.onboardingTransactions.toList.traverse( parseSignedTransaction(protocolVersion, _) ) signedTransactionsNE <- NonEmpty - .from(signedTransactions) + .from(transactionsWithSignatures) .toRight( ValidationErrors .invalidField("onboarding_transactions.transactions", "Transactions field is empty") ) parsedMultiSignatures <- request.multiHashSignatures.toList.traverse( - CryptoValidator.validateSignature(_, "multi_signature.signatures") + CryptoValidator.validateSignature(_, "multi_hash_signatures.signatures") + ) + _ = logger.debug( + s"External party allocation input transactions:\n ${signedTransactionsNE.map(_._1).mkString("\n")}" ) cantonParticipantId = this.syncService.participantId externalPartyDetails <- ExternalPartyOnboardingDetails @@ -848,10 +861,14 @@ private[apiserver] final class ApiPartyManagementService private ( s"$participantId -> $permission" } .mkString("[", ", ", "]") + val signingKeysString = externalPartyOnboardingDetails.signedPartyToKeyMappingTransaction + .map { p2k => + s" and ${p2k.mapping.signingKeys.length} signing keys with threshold ${p2k.mapping.threshold.value}" + } + .getOrElse("") logger.info( s"Allocating external party ${externalPartyOnboardingDetails.partyId.toProtoPrimitive} on" + - s" $hostingParticipantsString with confirmation threshold ${externalPartyOnboardingDetails.confirmationThreshold.value}" + - s" and ${externalPartyOnboardingDetails.numberOfSigningKeys} signing keys with threshold ${externalPartyOnboardingDetails.signingKeysThreshold.value}" + s" $hostingParticipantsString with confirmation threshold ${externalPartyOnboardingDetails.confirmationThreshold.value}" + signingKeysString ) val trackerKey = submissionIdGenerator( @@ -872,10 +889,10 @@ private[apiserver] final class ApiPartyManagementService private ( _ <- checkSubmissionResult(result) } yield () - // Only track the party if this participant is not multi hosted - // If it's not the party won't be fully onboarded here so this would time out + // Only track the party if we expect it to be fully authorized + // Otherwise the party won't be fully onboarded here so this would time out val partyIdF = - if (!externalPartyOnboardingDetails.isMultiHosted) { + if (externalPartyOnboardingDetails.fullyAllocatesParty) { partyAllocationTracker .track( trackerKey, @@ -907,15 +924,15 @@ private[apiserver] final class ApiPartyManagementService private ( partyHint, publicKeyO, localParticipantObservationOnly, - otherConfirmingParticipantIds, + otherConfirmingParticipantUids, confirmationThreshold, - observingParticipantIds, + observingParticipantUids, ) = request val participantId = syncService.participantId val availableConfirmers = - (if (localParticipantObservationOnly) 0 else 1) + otherConfirmingParticipantIds.size + (if (localParticipantObservationOnly) 0 else 1) + otherConfirmingParticipantUids.size val response = for { publicKeyP <- ProtoConverter.required("public_key", publicKeyO).leftMap(_.message) @@ -952,20 +969,34 @@ private[apiserver] final class ApiPartyManagementService private ( _ <- UniqueIdentifier.verifyValidString(partyHint).leftMap(x => "party_hint: " + x) uid <- UniqueIdentifier.create(partyHint, namespace) _ <- Either.cond(confirmationThreshold >= 0, (), "Negative confirmation threshold observed") - confirmingPids <- otherConfirmingParticipantIds.toList + confirmingPids <- otherConfirmingParticipantUids.toList .traverse(UniqueIdentifier.fromProtoPrimitive_) .leftMap(_.message) - observingPids <- observingParticipantIds.toList + observingPids <- observingParticipantUids.toList .traverse(UniqueIdentifier.fromProtoPrimitive_) .leftMap(_.message) - allParticipantIds = - (confirmingPids ++ observingPids).map(ParticipantId(_)) :+ participantId + _ <- Either.cond( + !confirmingPids.contains(participantId.uid), + (), + s"This participant node ($participantId) is also listed in 'otherConfirmingParticipantUids'." + + s" By sending the request to this node, it is de facto a hosting node and must not be listed in 'otherConfirmingParticipantUids'.", + ) + _ <- Either.cond( + !observingPids.contains(participantId.uid), + (), + s"This participant node ($participantId) is also listed in 'observingParticipantUids'." + + s" By sending the request to this node, it is de facto a hosting node and must not be listed in 'observingParticipantUids'.", + ) + allParticipantIds = (confirmingPids ++ observingPids) _ <- Either.cond( allParticipantIds.distinct.sizeIs == allParticipantIds.size, (), { - val duplicate = + val duplicates = allParticipantIds.groupBy(identity).collect { case (x, ys) if ys.sizeIs > 1 => x } - s"Duplicate participant ids $duplicate. They need to be unique and only in one category." + s"The following participant IDs are referenced multiple times in the request: ${duplicates + .mkString(", ")}." + + s" Please ensure all IDs are referenced only once" + + s" across 'otherConfirmingParticipantUids' and 'observingParticipantUids' fields." }, ) _ <- Either.cond( @@ -1161,20 +1192,11 @@ private[apiserver] object ApiPartyManagementService { partyIdHint: String, authorizationLevel: AuthorizationLevel, ): PartyAllocation.TrackerKey = - PartyAllocation.TrackerKey.of( + PartyAllocation.TrackerKey( partyIdHint, participantId, AuthorizationEvent.Added(authorizationLevel), ) } - - def fixedForTests(const: String) = new CreateSubmissionId() { - override def apply( - partyIdHint: String, - authorizationLevel: AuthorizationLevel, - ): PartyAllocation.TrackerKey = - PartyAllocation.TrackerKey.forTests(Ref.SubmissionId.assertFromString(const)) - } - } } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PartyAllocation.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PartyAllocation.scala index 66d400107121..120333786b87 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PartyAllocation.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/PartyAllocation.scala @@ -3,6 +3,7 @@ package com.digitalasset.canton.platform.apiserver.services.admin +import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, HashPurpose} import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationEvent import com.digitalasset.canton.ledger.participant.state.index.IndexerPartyDetails import com.digitalasset.canton.platform.apiserver.services.tracking.StreamTracker @@ -10,27 +11,29 @@ import com.digitalasset.daml.lf.data.Ref object PartyAllocation { - final case class TrackerKey private (val submissionId: Ref.SubmissionId) - object TrackerKey { - def of( - party: String, - participantId: Ref.ParticipantId, - authorizationEvent: AuthorizationEvent, - ): TrackerKey = { - import com.digitalasset.canton.crypto.{Hash, HashAlgorithm, HashPurpose} - + final case class TrackerKey( + party: String, + participantId: Ref.ParticipantId, + authorizationEvent: AuthorizationEvent, + ) { + lazy val submissionId = { val builder = Hash.build(HashPurpose.PartyUpdateId, HashAlgorithm.Sha256) builder.add(party.split("::")(0)) builder.add(participantId) builder.add(authorizationEvent.toString) val hash = builder.finish() - TrackerKey(Ref.SubmissionId.assertFromString(hash.toHexString)) + Ref.SubmissionId.assertFromString(hash.toHexString) } - private[admin] def forTests(submissionId: Ref.SubmissionId) = TrackerKey(submissionId) + // Override hashCode and equals to only consider submissionId for equality and hashing + // Needed for when they key is used in HashMaps etc... + override def hashCode(): Int = submissionId.hashCode + override def equals(obj: Any): Boolean = obj match { + case otherTrackerKey: TrackerKey => submissionId.equals(otherTrackerKey.submissionId) + case _ => false + } } - final case class Completed(submissionId: TrackerKey, partyDetails: IndexerPartyDetails) type Tracker = StreamTracker[TrackerKey, Completed] diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/InteractiveSubmissionServiceImpl.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/InteractiveSubmissionServiceImpl.scala index ef6a72404dda..3a3a3986f616 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/InteractiveSubmissionServiceImpl.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/InteractiveSubmissionServiceImpl.scala @@ -16,6 +16,7 @@ import com.daml.ledger.api.v2.update_service.GetUpdateResponse import com.daml.scalautil.future.FutureConversion.CompletionStageConversionOps import com.digitalasset.base.error.ErrorCode.LoggedApiException import com.digitalasset.base.error.RpcError +import com.digitalasset.canton.LfTimestamp import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.interactive.InteractiveSubmissionEnricher @@ -175,13 +176,19 @@ private[apiserver] final class InteractiveSubmissionServiceImpl private[services request.commands.submissionId.map(SubmissionId.unwrap), ) - evaluateAndHash(seedService.nextSeed(), request.commands, request.verboseHashing) + evaluateAndHash( + seedService.nextSeed(), + request.commands, + request.verboseHashing, + request.maxRecordTime, + ) } private def evaluateAndHash( submissionSeed: crypto.Hash, commands: ApiCommands, verboseHashing: Boolean, + maxRecordTime: Option[LfTimestamp], )(implicit loggingContext: LoggingContextWithTrace, errorLoggingContext: ErrorLoggingContext, @@ -212,6 +219,7 @@ private[apiserver] final class InteractiveSubmissionServiceImpl private[services commands, config.contractLookupParallelism, hashTracer, + maxRecordTime, ) .leftWiden[RpcError] hashingDetails = hashTracer match { diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/EnrichedTransactionData.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/EnrichedTransactionData.scala index 4649aa208c07..8b24ffa34e56 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/EnrichedTransactionData.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/EnrichedTransactionData.scala @@ -21,6 +21,7 @@ import com.digitalasset.canton.protocol.hash.HashTracer import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.version.{HashingSchemeVersion, ProtocolVersion} import com.digitalasset.daml.lf.data.ImmArray +import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.engine.Enricher import com.digitalasset.daml.lf.transaction.{ FatContractInstance, @@ -118,6 +119,7 @@ final case class PrepareTransactionData( private[codec] val synchronizerId: SynchronizerId, private[codec] val mediatorGroup: Int, private[codec] val transactionUUID: UUID, + private[codec] val maxRecordTime: Option[Timestamp], ) extends EnrichedTransactionData /** Transaction data for an enriched external submission during the execute phase. This is usually diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/ExternalTransactionProcessor.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/ExternalTransactionProcessor.scala index 5e341c5ef200..a5a73b38b128 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/ExternalTransactionProcessor.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/ExternalTransactionProcessor.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform.apiserver.services.command.interactive. import cats.data.EitherT import cats.syntax.either.* import com.daml.ledger.api.v2.interactive.interactive_submission_service.PreparedTransaction +import com.digitalasset.canton.LfTimestamp import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.Hash import com.digitalasset.canton.interactive.InteractiveSubmissionEnricher @@ -25,8 +26,11 @@ import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFact import com.digitalasset.canton.platform.apiserver.execution.CommandExecutionResult import com.digitalasset.canton.platform.apiserver.services.command.interactive.codec.EnrichedTransactionData.ExternalInputContract import com.digitalasset.canton.platform.apiserver.services.command.interactive.codec.ExternalTransactionProcessor.PrepareResult +import com.digitalasset.canton.platform.store.dao.events.InputContractPackages +import com.digitalasset.canton.protocol.LfFatContractInst import com.digitalasset.canton.protocol.hash.HashTracer import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.util.collection.MapsUtil import com.digitalasset.canton.version.HashingSchemeVersion import com.digitalasset.daml.lf.transaction.{SubmittedTransaction, Transaction} import com.digitalasset.daml.lf.value.Value.ContractId @@ -89,91 +93,76 @@ class ExternalTransactionProcessor( private def lookupAndEnrichInputContracts( transaction: Transaction, - disclosedContracts: Seq[DisclosedContract], + disclosedContracts: Map[ContractId, LfFatContractInst], contractLookupParallelism: PositiveInt, )(implicit loggingContextWithTrace: LoggingContextWithTrace, executionContext: ExecutionContext, ): EitherT[FutureUnlessShutdown, String, Map[ContractId, ExternalInputContract]] = { - val disclosedContractsByCoid = - disclosedContracts.groupMap(_.fatContractInstance.contractId)(_.fatContractInstance) - MonadUtil - .parTraverseWithLimit(contractLookupParallelism)(transaction.inputContracts.toList) { - inputCoid => - // First check the disclosed contracts - disclosedContractsByCoid.get(inputCoid) match { - // We expect a single disclosed contract for a coid - case Some(Seq(originalFci)) => - EitherT.liftF[FutureUnlessShutdown, String, (ContractId, ExternalInputContract)]( - enricher.enrichContract(originalFci).map { enrichedFci => - val externalInputContract = ExternalInputContract( - originalContract = originalFci, - enrichedContract = enrichedFci, + def lookupContract(coid: ContractId): FutureUnlessShutdown[LfFatContractInst] = + disclosedContracts.get(coid) match { + case Some(inst) => + FutureUnlessShutdown.pure(inst) + case None => + FutureUnlessShutdown + .outcomeF(contractStore.lookupContractState(coid)) + .flatMap[LfFatContractInst] { + + case active: ContractState.Active => + FutureUnlessShutdown.pure(active.contractInstance) + + // Engine interpretation likely would have failed if that was the case + // However it's possible that the contract was archived or pruned in the meantime + // That's not an issue however because if that was the case the transaction would have failed later + // anyway during conflict detection. + case ContractState.NotFound => + FutureUnlessShutdown + .failed( + ConsistencyErrors.ContractNotFound + .Reject( + s"Contract was not found in the participant contract store. You must either explicitly disclose the contract, or prepare the transaction via a participant that has knowledge of it", + coid, + ) + .asGrpcError ) - externalInputContract.contractId -> externalInputContract - } - ) - case Some(_) => - EitherT.leftT[FutureUnlessShutdown, (ContractId, ExternalInputContract)]( - s"Contract ID $inputCoid is not unique" - ) - // If the contract is not disclosed, look it up from the store - case None => - EitherT { + case ContractState.Archived => FutureUnlessShutdown - .outcomeF( - contractStore - .lookupContractState(inputCoid) + .failed( + CommandExecutionErrors.Interpreter.ContractNotActive + .Reject( + "Input contract has seemingly already been archived immediately after interpretation of the transaction", + coid, + None, + ) + .asGrpcError ) - .flatMap { - case active: ContractState.Active => - val originalFci = active.contractInstance - enricher - .enrichContract(originalFci) - .map { enrichedFci => - val externalInputContract = ExternalInputContract( - originalContract = originalFci, - enrichedContract = enrichedFci, - ) - Right(externalInputContract.contractId -> externalInputContract) - } - // Engine interpretation likely would have failed if that was the case - // However it's possible that the contract was archived or pruned in the meantime - // That's not an issue however because if that was the case the transaction would have failed later - // anyway during conflict detection. - case ContractState.NotFound => - FutureUnlessShutdown - .failed[Either[String, (ContractId, ExternalInputContract)]]( - ConsistencyErrors.ContractNotFound - .Reject( - s"Contract was not found in the participant contract store. You must either explicitly disclose the contract, or prepare the transaction via a participant that has knowledge of it", - inputCoid, - ) - .asGrpcError - ) - case ContractState.Archived => - FutureUnlessShutdown - .failed[Either[String, (ContractId, ExternalInputContract)]]( - CommandExecutionErrors.Interpreter.ContractNotActive - .Reject( - "Input contract has seemingly already been archived immediately after interpretation of the transaction", - inputCoid, - None, - ) - .asGrpcError - ) - } - } - } + } + } + + MonadUtil + .parTraverseWithLimit(contractLookupParallelism)( + InputContractPackages.forTransaction(transaction).toList + ) { case (inputCoid, targetPackageIds) => + for { + original <- EitherT.right[String](lookupContract(inputCoid)) + enriched <- enricher.enrichContract(original, targetPackageIds) + } yield { + inputCoid -> ExternalInputContract( + originalContract = original, + enrichedContract = enriched, + ) + } } .map(_.toMap) + } private def enrich( commandExecutionResult: CommandExecutionResult, - commands: ApiCommands, + disclosedContracts: Seq[DisclosedContract], contractLookupParallelism: PositiveInt, + maxRecordTime: Option[LfTimestamp], )(implicit loggingContextWithTrace: LoggingContextWithTrace, executionContext: ExecutionContext, @@ -189,10 +178,21 @@ class ExternalTransactionProcessor( commandExecutionResult.commandInterpretationResult.transaction ) ) + disclosedContractMap <- EitherT.fromEither[FutureUnlessShutdown]( + MapsUtil + .toNonConflictingMap( + disclosedContracts.map(_.fatContractInstance).map(c => c.contractId -> c) + ) + .leftMap(err => + CommandExecutionErrors.InteractiveSubmissionPreparationError.Reject( + s"Disclosed contracts contain non-unique contract IDs: $err" + ) + ) + ) // Compute input contracts by looking them up either from disclosed contracts or the local store inputContracts <- lookupAndEnrichInputContracts( enrichedTransaction.transaction, - commands.disclosedContracts.toList, + disclosedContractMap, contractLookupParallelism, ) .leftMap(CommandExecutionErrors.InteractiveSubmissionPreparationError.Reject(_)) @@ -207,6 +207,7 @@ class ExternalTransactionProcessor( synchronizerId = synchronizerId.logical, mediatorGroup = 0, transactionUUID = UUID.randomUUID(), + maxRecordTime = maxRecordTime, ) } yield transactionData @@ -217,6 +218,7 @@ class ExternalTransactionProcessor( commands: ApiCommands, contractLookupParallelism: PositiveInt, hashTracer: HashTracer, + maxRecordTime: Option[LfTimestamp], )(implicit loggingContextWithTrace: LoggingContextWithTrace, executionContext: ExecutionContext, @@ -227,7 +229,12 @@ class ExternalTransactionProcessor( ] = for { // Enrich first - enriched <- enrich(commandExecutionResult, commands, contractLookupParallelism) + enriched <- enrich( + commandExecutionResult, + commands.disclosedContracts.toList, + contractLookupParallelism, + maxRecordTime, + ) // Then encode encoded <- EitherT .liftF[ diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionDecoder.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionDecoder.scala index 74c8c74e3427..6c00b0171f29 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionDecoder.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionDecoder.scala @@ -422,11 +422,15 @@ final class PreparedTransactionDecoder(override val loggerFactory: NamedLoggerFa mediatorGroup <- ProtoConverter .parseNonNegativeInt("mediator_group", metadataProto.mediatorGroup) .toFutureWithLoggedFailuresDecode("Failed to deserialize mediator group", logger) + maxLedgerTimeO <- metadataProto.maxRecordTime + .transformIntoPartial[Option[lf.data.Time.Timestamp]] + .toFutureWithLoggedFailuresDecode("Failed to deserialize max record time", logger) } yield ExternallySignedSubmission( executeRequest.serializationVersion, executeRequest.signatures, transactionUUID = transactionUUID, mediatorGroup = mediatorGroup, + maxRecordTimeO = maxLedgerTimeO, ) submitterInfo <- submitterInfoProto .intoPartial[SubmitterInfo] diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionEncoder.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionEncoder.scala index 4d2b66e695e4..3c470bc3f649 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionEncoder.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/codec/PreparedTransactionEncoder.scala @@ -310,6 +310,7 @@ final class PreparedTransactionEncoder( transactionUUID: UUID, mediatorGroup: Int, inputContracts: Seq[ExternalInputContract], + maxRecordTime: Option[lf.data.Time.Timestamp], ): PartialTransformer[PrepareTransactionData, iss.Metadata] = Transformer .definePartial[PrepareTransactionData, iss.Metadata] @@ -342,6 +343,10 @@ final class PreparedTransactionEncoder( _.maxLedgerEffectiveTime, _.transactionMeta.timeBoundaries.maxConstraint.map(_.transformInto[Long]), ) + .withFieldConst( + _.maxRecordTime, + maxRecordTime.map(_.transformInto[Long]), + ) .buildTransformer @VisibleForTesting @@ -378,6 +383,7 @@ final class PreparedTransactionEncoder( transactionUUID, mediatorGroup, prepareTransactionData.inputContracts.values.toSeq, + prepareTransactionData.maxRecordTime, ) val versionedTransaction = lf.transaction.VersionedTransaction( prepareTransactionData.transaction.version, diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala index 7f9d172b2312..446e2126a116 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala @@ -330,7 +330,7 @@ private[platform] object InMemoryStateUpdater { .collect { case TransactionLogUpdate.TopologyTransactionEffective(_, _, _, _, events) => events.collect { case u: TransactionLogUpdate.PartyToParticipantAuthorization => PartyAllocation.Completed( - PartyAllocation.TrackerKey.of(u.party, u.participant, u.authorizationEvent), + PartyAllocation.TrackerKey(u.party, u.participant, u.authorizationEvent), IndexerPartyDetails(party = u.party, isLocal = u.participant == participantId), ) } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala index e52f58e3b2ed..67751db9898c 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala @@ -25,7 +25,6 @@ import com.digitalasset.canton.platform.InMemoryState import com.digitalasset.canton.platform.apiserver.TimedIndexService import com.digitalasset.canton.platform.config.IndexServiceConfig import com.digitalasset.canton.platform.index.IndexServiceOwner.GetPackagePreferenceForViewsUpgrading -import com.digitalasset.canton.platform.store.DbSupport import com.digitalasset.canton.platform.store.backend.common.MismatchException import com.digitalasset.canton.platform.store.cache.* import com.digitalasset.canton.platform.store.dao.events.{ @@ -39,6 +38,7 @@ import com.digitalasset.canton.platform.store.dao.{ LedgerReadDao, } import com.digitalasset.canton.platform.store.interning.StringInterning +import com.digitalasset.canton.platform.store.{DbSupport, PruningOffsetService} import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Ref @@ -67,7 +67,8 @@ final class IndexServiceOwner( lfValueTranslation: LfValueTranslation, queryExecutionContext: ExecutionContextExecutorService, commandExecutionContext: ExecutionContextExecutorService, - cantonContractStore: ContractStore, + participantContractStore: ContractStore, + pruningOffsetService: PruningOffsetService, ) extends ResourceOwner[IndexService] with NamedLogging { private val initializationRetryDelay = 100.millis @@ -79,6 +80,7 @@ final class IndexServiceOwner( stringInterning = inMemoryState.stringInterningView, contractLoader = contractLoader, lfValueTranslation = lfValueTranslation, + pruningOffsetService = pruningOffsetService, queryExecutionContext = queryExecutionContext, commandExecutionContext = commandExecutionContext, ) @@ -90,7 +92,7 @@ final class IndexServiceOwner( ledgerDao.contractsReader, contractStateCaches = inMemoryState.contractStateCaches, loggerFactory = loggerFactory, - contractStore = cantonContractStore, + contractStore = participantContractStore, )(commandExecutionContext) bufferedTransactionsReader = BufferedUpdateReader( @@ -194,6 +196,7 @@ final class IndexServiceOwner( ledgerEndCache: LedgerEndCache, stringInterning: StringInterning, contractLoader: ContractLoader, + pruningOffsetService: PruningOffsetService, lfValueTranslation: LfValueTranslation, queryExecutionContext: ExecutionContextExecutorService, commandExecutionContext: ExecutionContextExecutorService, @@ -216,7 +219,9 @@ final class IndexServiceOwner( incompleteOffsets = incompleteOffsets, contractLoader = contractLoader, lfValueTranslation = lfValueTranslation, - ) + pruningOffsetService = pruningOffsetService, + contractStore = participantContractStore, + )(queryExecutionContext) private object InMemoryStateNotInitialized extends NoStackTrace } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerConfig.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerConfig.scala index 1a0a58637d0a..81db9af6aed8 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerConfig.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerConfig.scala @@ -12,10 +12,19 @@ import com.digitalasset.canton.platform.store.backend.postgresql.PostgresDataSou import scala.concurrent.duration.{DurationInt, FiniteDuration} /** See com.digitalasset.canton.platform.indexer.JdbcIndexer for semantics on these configurations. + * + * - enableCompression: switches on compression for both consuming and non-consuming exercises, + * equivalent to setting both enableCompressionConsumingExercise and + * enableCompressionNonConsumingExercise to true. This is to maintain backward compatibility + * with existing config files. + * - enableCompressionConsumingExercise: switches on compression for consuming exercises + * - enableCompressionNonConsumingExercise: switches on compression for non-consuming exercises */ final case class IndexerConfig( batchingParallelism: NonNegativeInt = NonNegativeInt.tryCreate(DefaultBatchingParallelism), enableCompression: Boolean = DefaultEnableCompression, + enableCompressionConsumingExercise: Boolean = DefaultEnableCompression, + enableCompressionNonConsumingExercise: Boolean = DefaultEnableCompression, ingestionParallelism: NonNegativeInt = NonNegativeInt.tryCreate(DefaultIngestionParallelism), inputMappingParallelism: NonNegativeInt = NonNegativeInt.tryCreate(DefaultInputMappingParallelism), diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala index 522c59f5b7e2..2abaac8c10c8 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala @@ -110,7 +110,11 @@ object JdbcIndexer { ), compressionStrategy = if (config.enableCompression) CompressionStrategy.allGZIP(metrics) - else CompressionStrategy.none(metrics), + else + CompressionStrategy.buildFromConfig(metrics)( + config.enableCompressionConsumingExercise, + config.enableCompressionNonConsumingExercise, + ), maxInputBufferSize = config.maxInputBufferSize.unwrap, inputMappingParallelism = config.inputMappingParallelism.unwrap, dbPrepareParallelism = config.dbPrepareParallelism.unwrap, diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/PruningOffsetService.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/PruningOffsetService.scala new file mode 100644 index 000000000000..f2f4bdd4d6fd --- /dev/null +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/PruningOffsetService.scala @@ -0,0 +1,15 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store + +import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.Future + +trait PruningOffsetService { + def pruningOffset(implicit + traceContext: TraceContext + ): Future[Option[Offset]] +} diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala index 6f8756b36737..0a939f350803 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala @@ -216,7 +216,7 @@ object UpdateToDbDto { ledger_offset = offset.unwrap, recorded_at = topologyTransaction.recordTime.toMicros, submission_id = Some( - PartyAllocation.TrackerKey.of(party, participant, authorizationEvent).submissionId + PartyAllocation.TrackerKey(party, participant, authorizationEvent).submissionId ), party = Some(party), typ = JdbcLedgerDao.acceptType, @@ -433,13 +433,13 @@ object UpdateToDbDto { exercise_choice = exercise.qualifiedChoiceName.choiceName, exercise_choice_interface_id = exercise.qualifiedChoiceName.interfaceId.map(_.toString), exercise_argument = - compressionStrategy.exerciseArgumentCompression.compress(exerciseArgument), + compressionStrategy.consumingExerciseArgumentCompression.compress(exerciseArgument), exercise_result = - exerciseResult.map(compressionStrategy.exerciseResultCompression.compress), + exerciseResult.map(compressionStrategy.consumingExerciseResultCompression.compress), exercise_actors = exercise.actingParties.map(_.toString), exercise_last_descendant_node_id = lastDescendantNodeId.index, - exercise_argument_compression = compressionStrategy.exerciseArgumentCompression.id, - exercise_result_compression = compressionStrategy.exerciseResultCompression.id, + exercise_argument_compression = compressionStrategy.consumingExerciseArgumentCompression.id, + exercise_result_compression = compressionStrategy.consumingExerciseResultCompression.id, contract_id = exercise.targetCoid, internal_contract_id = None, // this will be filled later template_id = templateId, @@ -451,6 +451,17 @@ object UpdateToDbDto { val internal_contract_id = if (exercise.consuming) transactionAccepted.internalContractIds.get(exercise.targetCoid) else None + val (argumentCompression, resultCompression) = + if (exercise.consuming) + ( + compressionStrategy.consumingExerciseArgumentCompression, + compressionStrategy.consumingExerciseResultCompression, + ) + else + ( + compressionStrategy.nonConsumingExerciseArgumentCompression, + compressionStrategy.nonConsumingExerciseResultCompression, + ) DbDto.witnessedExercisedDbDtos( event_offset = offset.unwrap, update_id = transactionAccepted.updateId.toProtoPrimitive.toByteArray, @@ -468,14 +479,12 @@ object UpdateToDbDto { consuming = exercise.consuming, exercise_choice = exercise.qualifiedChoiceName.choiceName, exercise_choice_interface_id = exercise.qualifiedChoiceName.interfaceId.map(_.toString), - exercise_argument = - compressionStrategy.exerciseArgumentCompression.compress(exerciseArgument), - exercise_result = - exerciseResult.map(compressionStrategy.exerciseResultCompression.compress), + exercise_argument = argumentCompression.compress(exerciseArgument), + exercise_result = exerciseResult.map(resultCompression.compress), exercise_actors = exercise.actingParties.map(_.toString), exercise_last_descendant_node_id = lastDescendantNodeId.index, - exercise_argument_compression = compressionStrategy.exerciseArgumentCompression.id, - exercise_result_compression = compressionStrategy.exerciseResultCompression.id, + exercise_argument_compression = argumentCompression.id, + exercise_result_compression = resultCompression.id, contract_id = exercise.targetCoid, internal_contract_id = internal_contract_id, template_id = templateId, diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacy.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacy.scala index ae8a2ef37303..7e68dd3b0fe5 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacy.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacy.scala @@ -218,7 +218,7 @@ object UpdateToDbDtoLegacy { ledger_offset = offset.unwrap, recorded_at = topologyTransaction.recordTime.toMicros, submission_id = Some( - PartyAllocation.TrackerKey.of(party, participant, authorizationEvent).submissionId + PartyAllocation.TrackerKey(party, participant, authorizationEvent).submissionId ), party = Some(party), typ = JdbcLedgerDao.acceptType, @@ -373,16 +373,19 @@ object UpdateToDbDtoLegacy { representative_package_id = representativePackageId.toString, flat_event_witnesses = flatWitnesses, tree_event_witnesses = treeWitnesses, - create_argument = compressionStrategy.createArgumentCompression.compress(createArgument), + create_argument = + compressionStrategy.createArgumentCompressionLegacy.compress(createArgument), create_signatories = create.signatories.map(_.toString), create_observers = create.stakeholders.diff(create.signatories).map(_.toString), create_key_value = createKeyValue - .map(compressionStrategy.createKeyValueCompression.compress), + .map(compressionStrategy.createKeyValueCompressionLegacy.compress), create_key_maintainers = create.keyOpt.map(_.maintainers.map(_.toString)), create_key_hash = create.keyOpt.map(_.globalKey.hash.bytes.toHexString), - create_argument_compression = compressionStrategy.createArgumentCompression.id, + create_argument_compression = compressionStrategy.createArgumentCompressionLegacy.id, create_key_value_compression = - compressionStrategy.createKeyValueCompression.id.filter(_ => createKeyValue.isDefined), + compressionStrategy.createKeyValueCompressionLegacy.id.filter(_ => + createKeyValue.isDefined + ), event_sequential_id = 0, // this is filled later authentication_data = transactionAccepted.contractAuthenticationData .get(create.coid) @@ -466,13 +469,13 @@ object UpdateToDbDtoLegacy { exercise_choice = exercise.qualifiedChoiceName.choiceName, exercise_choice_interface_id = exercise.qualifiedChoiceName.interfaceId.map(_.toString), exercise_argument = - compressionStrategy.exerciseArgumentCompression.compress(exerciseArgument), + compressionStrategy.consumingExerciseArgumentCompression.compress(exerciseArgument), exercise_result = exerciseResult - .map(compressionStrategy.exerciseResultCompression.compress), + .map(compressionStrategy.consumingExerciseResultCompression.compress), exercise_actors = exercise.actingParties.map(_.toString), exercise_last_descendant_node_id = lastDescendantNodeId.index, - exercise_argument_compression = compressionStrategy.exerciseArgumentCompression.id, - exercise_result_compression = compressionStrategy.exerciseResultCompression.id, + exercise_argument_compression = compressionStrategy.consumingExerciseArgumentCompression.id, + exercise_result_compression = compressionStrategy.consumingExerciseResultCompression.id, event_sequential_id = 0, // this is filled later synchronizer_id = transactionAccepted.synchronizerId, trace_context = serializedTraceContext, @@ -658,12 +661,14 @@ object UpdateToDbDtoLegacy { .diff(assign.createNode.signatories) .map(_.toString), create_key_value = createKeyValue - .map(compressionStrategy.createKeyValueCompression.compress), + .map(compressionStrategy.createKeyValueCompressionLegacy.compress), create_key_maintainers = assign.createNode.keyOpt.map(_.maintainers.map(_.toString)), create_key_hash = assign.createNode.keyOpt.map(_.globalKey.hash.bytes.toHexString), - create_argument_compression = compressionStrategy.createArgumentCompression.id, + create_argument_compression = compressionStrategy.createArgumentCompressionLegacy.id, create_key_value_compression = - compressionStrategy.createKeyValueCompression.id.filter(_ => createKeyValue.isDefined), + compressionStrategy.createKeyValueCompressionLegacy.id.filter(_ => + createKeyValue.isDefined + ), event_sequential_id = 0L, // this is filled later ledger_effective_time = assign.ledgerEffectiveTime.micros, authentication_data = assign.contractAuthenticationData.toByteArray, diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/CommandCompletionsReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/CommandCompletionsReader.scala index 00ec1296313c..935917641919 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/CommandCompletionsReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/CommandCompletionsReader.scala @@ -13,8 +13,6 @@ import com.digitalasset.canton.platform.{Party, UserId} import org.apache.pekko.NotUsed import org.apache.pekko.stream.scaladsl.Source -import java.sql.Connection - /** @param pageSize * a single DB fetch query is guaranteed to fetch no more than this many results. */ @@ -44,7 +42,7 @@ private[dao] final class CommandCompletionsReader( loggingContext: LoggingContextWithTrace ): Source[(Offset, CompletionStreamResponse), NotUsed] = { val pruneSafeQuery = - (range: QueryRange[Offset]) => { implicit connection: Connection => + (range: QueryRange[Offset]) => queryValidRange.withRangeNotPruned[Vector[CompletionStreamResponse]]( minOffsetInclusive = startInclusive, maxOffsetInclusive = endInclusive, @@ -54,15 +52,16 @@ private[dao] final class CommandCompletionsReader( s"Command completions request from ${startInclusive.unwrap} to ${endInclusive.unwrap} is beyond ledger end offset ${ledgerEndOffset .fold(0L)(_.unwrap)}", ) { - storageBackend.commandCompletions( - startInclusive = range.startInclusive, - endInclusive = range.endInclusive, - userId = userId, - parties = parties, - limit = pageSize, - )(connection) + dispatcher.executeSql(metrics.index.db.getCompletions)( + storageBackend.commandCompletions( + startInclusive = range.startInclusive, + endInclusive = range.endInclusive, + userId = userId, + parties = parties, + limit = pageSize, + ) + ) } - } val initialRange = new QueryRange[Offset]( startInclusive = startInclusive, @@ -76,7 +75,7 @@ private[dao] final class CommandCompletionsReader( initialRange.copy(startInclusive = lastOffset.increment) }, ) { (subRange: QueryRange[Offset]) => - dispatcher.executeSql(metrics.index.db.getCompletions)(pruneSafeQuery(subRange)) + pruneSafeQuery(subRange) } source.map(response => offsetFor(response) -> response) } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala index ce616680461d..c6d20a18915b 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala @@ -14,6 +14,7 @@ import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.* import com.digitalasset.canton.platform.config.{ ActiveContractsServiceStreamsConfig, @@ -26,13 +27,15 @@ import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.dao.events.* import com.digitalasset.canton.platform.store.interning.StringInterning import com.digitalasset.canton.platform.store.utils.QueueBasedConcurrencyLimiter -import com.digitalasset.canton.protocol.UpdateId +import com.digitalasset.canton.protocol.{ContractInstance, ContractMetadata, UpdateId} import com.digitalasset.canton.topology.SynchronizerId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.Thereafter.syntax.* import com.digitalasset.daml.lf.data.Time.Timestamp import com.digitalasset.daml.lf.data.{Bytes, Ref} +import com.digitalasset.daml.lf.transaction.CreationTime.CreatedAt import com.digitalasset.daml.lf.transaction.{CommittedTransaction, Node} +import com.google.protobuf.ByteString import io.opentelemetry.api.trace.Tracer import scala.concurrent.{ExecutionContext, Future} @@ -62,7 +65,10 @@ private class JdbcLedgerDao( ) => FutureUnlessShutdown[Vector[Offset]], contractLoader: ContractLoader, translation: LfValueTranslation, -) extends LedgerReadDao + contractStore: ContractStore, + pruningOffsetService: PruningOffsetService, +)(implicit ec: ExecutionContext) + extends LedgerReadDao with LedgerWriteDaoForTests with NamedLogging { @@ -120,7 +126,7 @@ private class JdbcLedgerDao( party = partyDetails.party, // HACK: the `PartyAddedToParticipant` transmits `participantId`s, while here we only have the information // whether the party is locally hosted or not. We use the `nonLocalParticipantId` to get the desired effect of - // the `isLocal = False` information to be transmitted via a `PartyAddedToParticpant` `Update`. + // the `isLocal = False` information to be transmitted via a `PartyAddedToParticipant` `Update`. // // This will be properly resolved once we move away from the `sandbox-classic` codebase. participantId = if (partyDetails.isLocal) participantId else NonLocalParticipantId, @@ -224,11 +230,13 @@ private class JdbcLedgerDao( override def pruningOffset(implicit loggingContext: LoggingContextWithTrace ): Future[Option[Offset]] = - dbDispatcher.executeSql(metrics.index.db.fetchPruningOffsetsMetrics)( - parameterStorageBackend.prunedUpToInclusive - ) + pruningOffsetService.pruningOffset - private val queryValidRange = QueryValidRangeImpl(parameterStorageBackend, loggerFactory) + private val queryValidRange = QueryValidRangeImpl( + ledgerEndCache = ledgerEndCache, + pruningOffsetService = pruningOffsetService, + loggerFactory = loggerFactory, + ) private val globalIdQueriesLimiter = new QueueBasedConcurrencyLimiter( parallelism = globalMaxEventIdQueries, @@ -248,6 +256,7 @@ private class JdbcLedgerDao( queryValidRange = queryValidRange, eventStorageBackend = readStorageBackend.eventStorageBackend, lfValueTranslation = translation, + contractStore = contractStore, incompleteOffsets = incompleteOffsets, metrics = metrics, tracer = tracer, @@ -271,6 +280,7 @@ private class JdbcLedgerDao( queryValidRange = queryValidRange, eventStorageBackend = readStorageBackend.eventStorageBackend, lfValueTranslation = translation, + contractStore = contractStore, metrics = metrics, loggerFactory = loggerFactory, )(queryExecutionContext) @@ -283,6 +293,7 @@ private class JdbcLedgerDao( queryValidRange = queryValidRange, eventStorageBackend = readStorageBackend.eventStorageBackend, lfValueTranslation = translation, + contractStore = contractStore, metrics = metrics, tracer = tracer, topologyTransactionsStreamReader = topologyTransactionsStreamReader, @@ -295,6 +306,8 @@ private class JdbcLedgerDao( eventStorageBackend = readStorageBackend.eventStorageBackend, metrics = metrics, lfValueTranslation = translation, + queryValidRange = queryValidRange, + contractStore = contractStore, loggerFactory = loggerFactory, )(queryExecutionContext) @@ -303,6 +316,7 @@ private class JdbcLedgerDao( eventStorageBackend = readStorageBackend.eventStorageBackend, metrics = metrics, lfValueTranslation = translation, + queryValidRange = queryValidRange, loggerFactory = loggerFactory, )(queryExecutionContext) @@ -311,6 +325,8 @@ private class JdbcLedgerDao( eventStorageBackend = readStorageBackend.eventStorageBackend, metrics = metrics, lfValueTranslation = translation, + queryValidRange = queryValidRange, + contractStore = contractStore, loggerFactory = loggerFactory, )(queryExecutionContext) @@ -347,13 +363,14 @@ private class JdbcLedgerDao( override def eventsReader: LedgerDaoEventsReader = new EventsReader( - dbDispatcher, - readStorageBackend.eventStorageBackend, - parameterStorageBackend, - metrics, - translation, - ledgerEndCache, - loggerFactory, + dbDispatcher = dbDispatcher, + eventStorageBackend = readStorageBackend.eventStorageBackend, + parameterStorageBackend = parameterStorageBackend, + metrics = metrics, + lfValueTranslation = translation, + contractStore = contractStore, + ledgerEndCache = ledgerEndCache, + loggerFactory = loggerFactory, )(queryExecutionContext) override val completions: CommandCompletionsReader = @@ -380,15 +397,37 @@ private class JdbcLedgerDao( contractActivenessChanged: Boolean, )(implicit loggingContext: LoggingContextWithTrace - ): Future[PersistenceResponse] = { - logger.info("Storing transaction") - val internalContractIds: Map[ContractId, Long] = + ): Future[PersistenceResponse] = for { + _ <- Future.successful(logger.info("Storing contracts into participant contract store")) + _ <- contractStore + .storeContracts( + transaction.nodes.values + .collect { case create: Node.Create => create } + .map(FatContract.fromCreateNode(_, CreatedAt(ledgerEffectiveTime), Bytes.Empty)) + .map( + ContractInstance + .ContractInstanceImpl( + _, + ContractMetadata.empty, + ByteString.EMPTY, + ) + ) + .toSeq + ) + .failOnShutdownTo(new IllegalStateException("Storing contracts was interrupted")) + + contractIds = transaction.nodes.values .collect { case create: Node.Create => create.coid } - .zipWithIndex - .map { case (cid, idx) => cid -> idx.toLong } - .toMap - dbDispatcher + + internalContractIds <- contractStore + .lookupBatchedNonCachedInternalIds(contractIds) + .failOnShutdownTo( + new IllegalStateException("Looking up internal contract ids was interrupted") + ) + + _ <- Future.successful(logger.info("Storing transaction")) + _ <- dbDispatcher .executeSql(metrics.index.db.storeTransactionDbMetrics) { implicit conn => sequentialIndexer.store( conn, @@ -429,8 +468,9 @@ private class JdbcLedgerDao( ) ), ) - PersistenceResponse.Ok } + } yield { + PersistenceResponse.Ok } } @@ -464,7 +504,9 @@ private[platform] object JdbcLedgerDao { ) => FutureUnlessShutdown[Vector[Offset]], contractLoader: ContractLoader = ContractLoader.dummyLoader, lfValueTranslation: LfValueTranslation, - ): LedgerReadDao = + pruningOffsetService: PruningOffsetService, + contractStore: ContractStore, + )(implicit ec: ExecutionContext): LedgerReadDao = new JdbcLedgerDao( dbDispatcher = dbSupport.dbDispatcher, queryExecutionContext = queryExecutionContext, @@ -487,6 +529,8 @@ private[platform] object JdbcLedgerDao { incompleteOffsets = incompleteOffsets, contractLoader = contractLoader, translation = lfValueTranslation, + pruningOffsetService = pruningOffsetService, + contractStore = contractStore, ) def writeForTests( @@ -506,7 +550,9 @@ private[platform] object JdbcLedgerDao { loggerFactory: NamedLoggerFactory, contractLoader: ContractLoader = ContractLoader.dummyLoader, lfValueTranslation: LfValueTranslation, - ): LedgerReadDao with LedgerWriteDaoForTests = + pruningOffsetService: PruningOffsetService, + contractStore: ContractStore, + )(implicit ec: ExecutionContext): LedgerReadDao with LedgerWriteDaoForTests = new JdbcLedgerDao( dbDispatcher = dbSupport.dbDispatcher, queryExecutionContext = servicesExecutionContext, @@ -529,6 +575,8 @@ private[platform] object JdbcLedgerDao { incompleteOffsets = (_, _, _) => FutureUnlessShutdown.pure(Vector.empty), contractLoader = contractLoader, translation = lfValueTranslation, + pruningOffsetService = pruningOffsetService, + contractStore = contractStore, ) val acceptType = "accept" diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ACSReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ACSReader.scala index ff2664a3dbdc..bb841d16cd5e 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ACSReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ACSReader.scala @@ -19,7 +19,7 @@ import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTr import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown -import com.digitalasset.canton.platform.TemplatePartiesFilter +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.config.ActiveContractsServiceStreamsConfig import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.Ids @@ -43,8 +43,10 @@ import com.digitalasset.canton.platform.store.utils.{ QueueBasedConcurrencyLimiter, Telemetry, } +import com.digitalasset.canton.platform.{FatContract, TemplatePartiesFilter} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.PekkoUtil.syntax.* +import com.digitalasset.canton.util.Thereafter.syntax.ThereafterAsyncOps import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.FullIdentifier import com.digitalasset.daml.lf.value.Value.ContractId @@ -53,8 +55,8 @@ import org.apache.pekko.NotUsed import org.apache.pekko.stream.Attributes import org.apache.pekko.stream.scaladsl.Source -import java.sql.Connection import scala.concurrent.{ExecutionContext, Future} +import scala.util.Success import scala.util.chaining.* /** Streams ACS events (active contracts) in a two step process consisting of: @@ -75,6 +77,7 @@ class ACSReader( queryValidRange: QueryValidRange, eventStorageBackend: EventStorageBackend, lfValueTranslation: LfValueTranslation, + contractStore: ContractStore, incompleteOffsets: ( Offset, Option[Set[Ref.Party]], @@ -122,7 +125,7 @@ class ACSReader( loggingContext: LoggingContextWithTrace ): Source[GetActiveContractsResponse, NotUsed] = { val (activeAtOffset, activeAtEventSeqId) = activeAt - def withValidatedActiveAt[T](query: => T)(implicit connection: Connection) = + def withValidatedActiveAt[T](query: => Future[T]) = queryValidRange.withOffsetNotBeforePruning( activeAtOffset, pruned => @@ -217,50 +220,68 @@ class ACSReader( ), ) + def withFatContracts[T]( + internalContractId: T => Long + )(payloads: Vector[T]): Future[Vector[(T, Option[FatContract])]] = + for { + contractsM <- contractStore + .lookupBatchedNonCached( + payloads.map(internalContractId) + ) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + } yield payloads + .map { payload => + val fatContractO = contractsM.get(internalContractId(payload)).map(_.inst) + (payload, fatContractO) + } + def fetchActiveCreatePayloads( ids: Iterable[Long] - ): Future[Vector[RawActiveContractLegacy]] = + ): Future[Vector[(RawActiveContractLegacy, Option[FatContract])]] = localPayloadQueriesLimiter.execute( globalPayloadQueriesLimiter.execute( - dispatcher.executeSql(metrics.index.db.getActiveContractBatchForCreatedLegacy) { - implicit connection => - val result = withValidatedActiveAt( + withValidatedActiveAt( + dispatcher + .executeSql(metrics.index.db.getActiveContractBatchForCreatedLegacy) { eventStorageBackend.activeContractCreateEventBatchLegacy( eventSequentialIds = ids, allFilterParties = allFilterParties, endInclusive = activeAtEventSeqId, - )(connection) - ) - logger.debug( - s"getActiveContractBatch returned ${ids.size}/${result.size} ${ids.lastOption - .map(last => s"until $last") - .getOrElse("")}" - ) - result + ) + } + .flatMap(withFatContracts(_.rawCreatedEvent.internalContractId)) + ).thereafterP { case Success(result) => + logger.debug( + s"getActiveContractBatch returned ${result.size}/${ids.size} ${ids.lastOption + .map(last => s"until $last") + .getOrElse("")}" + ) } ) ) def fetchActiveAssignPayloads( ids: Iterable[Long] - ): Future[Vector[RawActiveContractLegacy]] = + ): Future[Vector[(RawActiveContractLegacy, Option[FatContract])]] = localPayloadQueriesLimiter.execute( globalPayloadQueriesLimiter.execute( - dispatcher.executeSql(metrics.index.db.getActiveContractBatchForAssignedLegacy) { - implicit connection => - val result = withValidatedActiveAt( + withValidatedActiveAt( + dispatcher + .executeSql(metrics.index.db.getActiveContractBatchForAssignedLegacy)( eventStorageBackend.activeContractAssignEventBatchLegacy( eventSequentialIds = ids, allFilterParties = allFilterParties, endInclusive = activeAtEventSeqId, - )(connection) - ) - logger.debug( - s"getActiveContractBatch returned ${ids.size}/${result.size} ${ids.lastOption - .map(last => s"until $last") - .getOrElse("")}" + ) ) - result + .flatMap(withFatContracts(_.rawCreatedEvent.internalContractId)) + ).thereafterP { case Success(result) => + logger.debug( + s"getActiveContractBatch returned ${result.size}/${ids.size} ${ids.lastOption + .map(last => s"until $last") + .getOrElse("")}" + ) + } ) ) @@ -301,27 +322,30 @@ class ACSReader( def fetchAssignPayloads( ids: Iterable[Long] - ): Future[Vector[Entry[RawAssignEventLegacy]]] = + ): Future[Vector[(Entry[RawAssignEventLegacy], Option[FatContract])]] = if (ids.isEmpty) Future.successful(Vector.empty) else localPayloadQueriesLimiter.execute( globalPayloadQueriesLimiter.execute( - dispatcher.executeSql( - metrics.index.db.reassignmentStream.fetchEventAssignPayloadsLegacy - ) { implicit connection => - val result = withValidatedActiveAt( - eventStorageBackend.assignEventBatchLegacy( - eventSequentialIds = Ids(ids), - allFilterParties = allFilterParties, - )(connection) - ) - logger.debug( - s"assignEventBatch returned ${ids.size}/${result.size} ${ids.lastOption - .map(last => s"until $last") - .getOrElse("")}" - ) - result - } + withValidatedActiveAt( + dispatcher + .executeSql( + metrics.index.db.reassignmentStream.fetchEventAssignPayloadsLegacy + )( + eventStorageBackend.assignEventBatchLegacy( + eventSequentialIds = Ids(ids), + allFilterParties = allFilterParties, + ) + ) + .flatMap(withFatContracts(_.event.rawCreatedEvent.internalContractId)) + ) + .thereafterP { case Success(result) => + logger.debug( + s"assignEventBatch returned ${result.size}/${ids.size} ${ids.lastOption + .map(last => s"until $last") + .getOrElse("")}" + ) + } ) ) @@ -330,21 +354,21 @@ class ACSReader( ): Future[Vector[Entry[RawUnassignEventLegacy]]] = localPayloadQueriesLimiter.execute( globalPayloadQueriesLimiter.execute( - dispatcher.executeSql( - metrics.index.db.reassignmentStream.fetchEventUnassignPayloadsLegacy - ) { implicit connection => - val result = withValidatedActiveAt( + withValidatedActiveAt( + dispatcher.executeSql( + metrics.index.db.reassignmentStream.fetchEventUnassignPayloadsLegacy + )( eventStorageBackend.unassignEventBatchLegacy( eventSequentialIds = Ids(ids), allFilterParties = allFilterParties, - )(connection) + ) ) + ).thereafterP { case Success(result) => logger.debug( - s"unassignEventBatch returned ${ids.size}/${result.size} ${ids.lastOption + s"unassignEventBatch returned ${result.size}/${ids.size} ${ids.lastOption .map(last => s"until $last") .getOrElse("")}" ) - result } ) ) @@ -387,37 +411,43 @@ class ACSReader( def fetchCreatePayloads( ids: Iterable[Long] - ): Future[Vector[Entry[RawCreatedEventLegacy]]] = + ): Future[Vector[(Entry[RawCreatedEventLegacy], Option[FatContract])]] = if (ids.isEmpty) Future.successful(Vector.empty) else globalPayloadQueriesLimiter.execute( - dispatcher - .executeSql(metrics.index.db.updatesAcsDeltaStream.fetchEventCreatePayloadsLegacy) { - implicit connection => - val result = withValidatedActiveAt( - eventStorageBackend.fetchEventPayloadsAcsDeltaLegacy( - EventPayloadSourceForUpdatesAcsDeltaLegacy.Create - )( - eventSequentialIds = Ids(ids), - requestingParties = allFilterParties, - )(connection) - ) - logger.debug( - s"fetchEventPayloads for Create returned ${ids.size}/${result.size} ${ids.lastOption - .map(last => s"until $last") - .getOrElse("")}" + withValidatedActiveAt( + dispatcher + .executeSql( + metrics.index.db.updatesAcsDeltaStream.fetchEventCreatePayloadsLegacy + )( + eventStorageBackend.fetchEventPayloadsAcsDeltaLegacy( + EventPayloadSourceForUpdatesAcsDeltaLegacy.Create + )( + eventSequentialIds = Ids(ids), + requestingParties = allFilterParties, ) + ) + .map(result => result.view.collect { entry => entry.event match { case created: RawCreatedEventLegacy => entry.copy(event = created) } }.toVector - } + ) + .flatMap(withFatContracts(_.event.internalContractId)) + .thereafterP { case Success(result) => + logger.debug( + s"fetchEventPayloads for Create returned ${result.size}/${ids.size} ${ids.lastOption + .map(last => s"until $last") + .getOrElse("")}" + ) + } + ) ) def fetchCreatedEventsForUnassignedBatch(batch: Seq[Entry[RawUnassignEventLegacy]]): Future[ - Seq[(Entry[RawUnassignEventLegacy], Entry[RawCreatedEventLegacy])] + Seq[(Entry[RawUnassignEventLegacy], (Entry[RawCreatedEventLegacy], Option[FatContract]))] ] = { def extractUnassignProperties( @@ -440,18 +470,25 @@ class ACSReader( unassignPropertiesToAssignedIds: Map[UnassignProperties, Long] <- fetchAssignIdsFor( unassignPropertiesSeq ) - assignedPayloads: Seq[Entry[RawAssignEventLegacy]] <- fetchAssignPayloads( - unassignPropertiesToAssignedIds.values - ) - assignedIdsToPayloads: Map[Long, Entry[RawAssignEventLegacy]] = assignedPayloads - .map(payload => payload.eventSequentialId -> payload) - .toMap + assignedPayloads: Seq[(Entry[RawAssignEventLegacy], Option[FatContract])] <- + fetchAssignPayloads( + unassignPropertiesToAssignedIds.values + ) + assignedIdsToPayloads: Map[Long, (Entry[RawAssignEventLegacy], Option[FatContract])] = + assignedPayloads + .map(payload => payload._1.eventSequentialId -> payload) + .toMap // map the requested unassign event properties to the returned raw created events using the assign sequential id - rawCreatedFromAssignedResults: Map[UnassignProperties, Entry[RawCreatedEventLegacy]] = + rawCreatedFromAssignedResults: Map[ + UnassignProperties, + (Entry[RawCreatedEventLegacy], Option[FatContract]), + ] = unassignPropertiesToAssignedIds.flatMap { case (params, assignedId) => assignedIdsToPayloads .get(assignedId) - .map(assignEntry => (params, assignEntry.map(_.rawCreatedEvent))) + .map { case (assignEntry, fatContract) => + (params, (assignEntry.map(_.rawCreatedEvent), fatContract)) + } } // if not found in the assigned events, search the created events @@ -463,8 +500,10 @@ class ACSReader( .distinct createdIds <- fetchCreateIdsForContractIds(missingContractIds) createdPayloads <- fetchCreatePayloads(createdIds) - rawCreatedFromCreatedResults: Map[ContractId, Vector[Entry[RawCreatedEventLegacy]]] = - createdPayloads.groupBy(_.event.contractId) + rawCreatedFromCreatedResults: Map[ContractId, Vector[ + (Entry[RawCreatedEventLegacy], Option[FatContract]) + ]] = + createdPayloads.groupBy(_._1.event.contractId) } yield batch.flatMap { rawUnassignEntry => val unassignProperties = extractUnassignProperties(rawUnassignEntry) rawCreatedFromAssignedResults @@ -479,7 +518,7 @@ class ACSReader( candidateCreateEntries .find(createdEntry => // the created event should match the synchronizer id of the unassign entry and have a lower sequential id than it - createdEntry.synchronizerId == unassignProperties.synchronizerId && createdEntry.eventSequentialId < unassignProperties.sequentialId + createdEntry._1.synchronizerId == unassignProperties.synchronizerId && createdEntry._1.eventSequentialId < unassignProperties.sequentialId ) } } @@ -547,10 +586,10 @@ class ACSReader( .mapConcat(identity) activeFromCreatePipe - .mergeSorted(activeFromAssignPipe)(Ordering.by(_.eventSequentialId)) - .mapAsync(config.contractProcessingParallelism)( - toApiResponseActiveContract(_, eventProjectionProperties) - ) + .mergeSorted(activeFromAssignPipe)(Ordering.by(_._1.eventSequentialId)) + .mapAsync(config.contractProcessingParallelism) { case (rawActiveContract, fatContractO) => + toApiResponseActiveContract(rawActiveContract, fatContractO, eventProjectionProperties) + } .concatLazy( // compute incomplete reassignments Source.lazyFutureSource(() => @@ -573,7 +612,7 @@ class ACSReader( .mapAsync(config.maxParallelPayloadCreateQueries)( fetchAssignPayloads ) - .mapConcat(_.filter(assignMeetsConstraints)) + .mapConcat(_.filter(entryPair => assignMeetsConstraints(entryPair._1))) .mapAsync(config.contractProcessingParallelism)( toApiResponseIncompleteAssigned(eventProjectionProperties) ) @@ -613,16 +652,24 @@ class ACSReader( private def toApiResponseActiveContract( rawActiveContract: RawActiveContractLegacy, + fatContract: Option[FatContract], eventProjectionProperties: EventProjectionProperties, )(implicit lc: LoggingContextWithTrace): Future[GetActiveContractsResponse] = Timed.future( future = Future.delegate( lfValueTranslation - .deserializeRawCreated( + .toApiCreatedEvent( eventProjectionProperties = eventProjectionProperties, - rawCreatedEvent = rawActiveContract.rawCreatedEvent, + fatContractInstance = fatContract.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawActiveContract.rawCreatedEvent.internalContractId} was not found in the contract store." + ) + ), offset = rawActiveContract.offset, nodeId = rawActiveContract.nodeId, + representativePackageId = rawActiveContract.rawCreatedEvent.representativePackageId, + witnesses = rawActiveContract.rawCreatedEvent.witnessParties, + acsDelta = true, ) .map(createdEvent => GetActiveContractsResponse( @@ -641,44 +688,65 @@ class ACSReader( ) private def toApiResponseIncompleteAssigned(eventProjectionProperties: EventProjectionProperties)( - rawAssignEntry: Entry[RawAssignEventLegacy] + rawAssignEntryFatContract: (Entry[RawAssignEventLegacy], Option[FatContract]) )(implicit lc: LoggingContextWithTrace): Future[(Long, GetActiveContractsResponse)] = - Timed.future( - future = Future.delegate( - lfValueTranslation - .deserializeRawCreated( - eventProjectionProperties = eventProjectionProperties, - rawCreatedEvent = rawAssignEntry.event.rawCreatedEvent, - offset = rawAssignEntry.offset, - nodeId = rawAssignEntry.nodeId, - ) - .map(createdEvent => - rawAssignEntry.offset -> GetActiveContractsResponse( - workflowId = rawAssignEntry.workflowId.getOrElse(""), - contractEntry = GetActiveContractsResponse.ContractEntry.IncompleteAssigned( - IncompleteAssigned( - Some(UpdateReader.toAssignedEvent(rawAssignEntry.event, createdEvent)) + rawAssignEntryFatContract match { + case (rawAssignEntry, fatContract) => + Timed.future( + future = Future.delegate( + lfValueTranslation + .toApiCreatedEvent( + eventProjectionProperties = eventProjectionProperties, + fatContractInstance = fatContract.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawAssignEntry.event.rawCreatedEvent.internalContractId} was not found in the contract store." + ) + ), + offset = rawAssignEntry.offset, + nodeId = rawAssignEntry.nodeId, + representativePackageId = + rawAssignEntry.event.rawCreatedEvent.representativePackageId, + witnesses = rawAssignEntry.event.rawCreatedEvent.witnessParties, + acsDelta = true, + ) + .map(createdEvent => + rawAssignEntry.offset -> GetActiveContractsResponse( + workflowId = rawAssignEntry.workflowId.getOrElse(""), + contractEntry = GetActiveContractsResponse.ContractEntry.IncompleteAssigned( + IncompleteAssigned( + Some(UpdateReader.toAssignedEvent(rawAssignEntry.event, createdEvent)) + ) + ), ) - ), - ) - ) - ), - timer = dbMetrics.getActiveContracts.translationTimer, - ) + ) + ), + timer = dbMetrics.getActiveContracts.translationTimer, + ) + } private def toApiResponseIncompleteUnassigned( eventProjectionProperties: EventProjectionProperties )( - rawUnassignEntryWithCreate: (Entry[RawUnassignEventLegacy], Entry[RawCreatedEventLegacy]) + rawUnassignEntryWithCreate: ( + Entry[RawUnassignEventLegacy], + (Entry[RawCreatedEventLegacy], Option[FatContract]), + ) )(implicit lc: LoggingContextWithTrace): Future[(Long, GetActiveContractsResponse)] = { - val (rawUnassignEntry, rawCreate) = rawUnassignEntryWithCreate + val (rawUnassignEntry, (rawCreate, fatContract)) = rawUnassignEntryWithCreate Timed.future( future = lfValueTranslation - .deserializeRawCreated( + .toApiCreatedEvent( eventProjectionProperties = eventProjectionProperties, - rawCreatedEvent = rawCreate.event, + fatContractInstance = fatContract.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawCreate.event.internalContractId} was not found in the contract store." + ) + ), offset = rawCreate.offset, nodeId = rawCreate.nodeId, + representativePackageId = rawCreate.event.representativePackageId, + witnesses = rawCreate.event.witnessParties, + acsDelta = true, ) .map(createdEvent => rawUnassignEntry.offset -> GetActiveContractsResponse( diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/CompressionStrategy.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/CompressionStrategy.scala index ae3868a2d833..55f488612d6e 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/CompressionStrategy.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/CompressionStrategy.scala @@ -10,10 +10,12 @@ import com.digitalasset.canton.platform.store.serialization.Compression import java.io.ByteArrayOutputStream final case class CompressionStrategy( - createArgumentCompression: FieldCompressionStrategy, - createKeyValueCompression: FieldCompressionStrategy, - exerciseArgumentCompression: FieldCompressionStrategy, - exerciseResultCompression: FieldCompressionStrategy, + createArgumentCompressionLegacy: FieldCompressionStrategy, // TODO(i25857) not needed with new schema anymore + createKeyValueCompressionLegacy: FieldCompressionStrategy, // TODO(i25857) not needed with new schema anymore + consumingExerciseArgumentCompression: FieldCompressionStrategy, + consumingExerciseResultCompression: FieldCompressionStrategy, + nonConsumingExerciseArgumentCompression: FieldCompressionStrategy, + nonConsumingExerciseResultCompression: FieldCompressionStrategy, ) object CompressionStrategy { @@ -24,29 +26,59 @@ object CompressionStrategy { def allGZIP(metrics: LedgerApiServerMetrics): CompressionStrategy = buildUniform(Compression.Algorithm.GZIP, metrics) + def buildFromConfig( + metrics: LedgerApiServerMetrics + )(consumingExercise: Boolean, nonConsumingExercise: Boolean): CompressionStrategy = { + val consumingAlgorithm: Compression.Algorithm = + if (consumingExercise) Compression.Algorithm.GZIP else Compression.Algorithm.None + val nonConsumingAlgorithm: Compression.Algorithm = + if (nonConsumingExercise) Compression.Algorithm.GZIP else Compression.Algorithm.None + build( + Compression.Algorithm.None, + Compression.Algorithm.None, + consumingAlgorithm, + consumingAlgorithm, + nonConsumingAlgorithm, + nonConsumingAlgorithm, + metrics, + ) + } + def buildUniform( algorithm: Compression.Algorithm, metrics: LedgerApiServerMetrics, ): CompressionStrategy = - build(algorithm, algorithm, algorithm, algorithm, metrics) + build(algorithm, algorithm, algorithm, algorithm, algorithm, algorithm, metrics) def build( createArgumentAlgorithm: Compression.Algorithm, createKeyValueAlgorithm: Compression.Algorithm, - exerciseArgumentAlgorithm: Compression.Algorithm, - exerciseResultAlgorithm: Compression.Algorithm, + consumingExerciseArgumentAlgorithm: Compression.Algorithm, + consumingExerciseResultAlgorithm: Compression.Algorithm, + nonConsumingExerciseArgumentAlgorithm: Compression.Algorithm, + nonConsumingExerciseResultAlgorithm: Compression.Algorithm, metrics: LedgerApiServerMetrics, ): CompressionStrategy = CompressionStrategy( - createArgumentCompression = + createArgumentCompressionLegacy = FieldCompressionStrategy(createArgumentAlgorithm, CompressionMetrics.createArgument(metrics)), - createKeyValueCompression = + createKeyValueCompressionLegacy = FieldCompressionStrategy(createKeyValueAlgorithm, CompressionMetrics.createKeyValue(metrics)), - exerciseArgumentCompression = FieldCompressionStrategy( - exerciseArgumentAlgorithm, + consumingExerciseArgumentCompression = FieldCompressionStrategy( + consumingExerciseArgumentAlgorithm, + CompressionMetrics.exerciseArgument(metrics), + ), + consumingExerciseResultCompression = FieldCompressionStrategy( + consumingExerciseResultAlgorithm, + CompressionMetrics.exerciseResult(metrics), + ), + nonConsumingExerciseArgumentCompression = FieldCompressionStrategy( + nonConsumingExerciseArgumentAlgorithm, CompressionMetrics.exerciseArgument(metrics), ), - exerciseResultCompression = - FieldCompressionStrategy(exerciseResultAlgorithm, CompressionMetrics.exerciseResult(metrics)), + nonConsumingExerciseResultCompression = FieldCompressionStrategy( + nonConsumingExerciseResultAlgorithm, + CompressionMetrics.exerciseResult(metrics), + ), ) } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsReader.scala index 02c5078d453d..87ecf254ad7b 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/EventsReader.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform.store.dao.events import com.daml.ledger.api.v2.event_query_service.{Archived, Created, GetEventsByContractIdResponse} import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{ ErrorLoggingContext, LoggingContextWithTrace, @@ -13,6 +14,8 @@ import com.digitalasset.canton.logging.{ NamedLogging, } import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.InternalEventFormat import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, @@ -35,6 +38,7 @@ private[dao] sealed class EventsReader( val parameterStorageBackend: ParameterStorageBackend, val metrics: LedgerApiServerMetrics, val lfValueTranslation: LfValueTranslation, + val contractStore: ContractStore, val ledgerEndCache: LedgerEndCache, override val loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext) @@ -84,15 +88,27 @@ private[dao] sealed class EventsReader( created } + contractsM <- contractStore + .lookupBatchedNonCached( + // we only need the internal contract id for the created event, if it exists + rawCreatedEventRestoredWitnesses.map(_.internalContractId).toList + ) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + deserialized <- Future.delegate { implicit val ec: ExecutionContext = directEC // Scala 2 implicit scope override: shadow the outer scope's implicit by name MonadUtil.sequentialTraverse(rawEventsRestoredWitnesses) { event => + val fatContractO = event.event match { + case created: RawCreatedEventLegacy => + contractsM.get(created.internalContractId).map(_.inst) + case _ => None + } UpdateReader .deserializeRawAcsDeltaEvent( internalEventFormat.eventProjectionProperties, lfValueTranslation, - )(event) + )(event -> fatContractO) .map(_ -> event.synchronizerId) } } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackages.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackages.scala new file mode 100644 index 000000000000..0b50dd5f7554 --- /dev/null +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackages.scala @@ -0,0 +1,57 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.dao.events + +import cats.implicits.toFunctorOps +import com.digitalasset.canton.LfPackageId +import com.digitalasset.canton.protocol.GenContractInstance +import com.digitalasset.daml.lf.data +import com.digitalasset.daml.lf.data.Relation +import com.digitalasset.daml.lf.transaction.{FatContractInstance, Node, Transaction} +import com.digitalasset.daml.lf.value.Value.ContractId + +object InputContractPackages { + + /** Returns a mapping from all contract ids referenced in the transaction to their package ids, + * excluding those that are created within the transaction. + */ + def forTransaction(tx: Transaction): data.Relation[ContractId, LfPackageId] = + tx.fold(data.Relation.empty[ContractId, LfPackageId]) { + case ( + acc, + (_, Node.Exercise(coid, _, templateId, _, _, _, _, _, _, _, _, _, _, _, _, _, _)), + ) => + Relation.update(acc, coid, templateId.packageId) + case (acc, (_, Node.Fetch(coid, _, templateId, _, _, _, _, _, _, _))) => + Relation.update(acc, coid, templateId.packageId) + case (acc, (_, Node.LookupByKey(_, templateId, _, Some(coid), _))) => + Relation.update(acc, coid, templateId.packageId) + case (acc, _) => acc + } -- tx.localContracts.keySet + + /** Merges two maps, returning an error if their key sets differ. */ + private[events] def strictZipByKey[K, V1, V2]( + m1: Map[K, V1], + m2: Map[K, V2], + ): Either[Set[K], Map[K, (V1, V2)]] = { + val keys1 = m1.keySet + val keys2 = m2.keySet + Either.cond( + keys1 == keys2, + keys1.view.map(k => k -> (m1(k), m2(k))).toMap, + (keys1 union keys2) -- (keys1 intersect keys2), + ) + } + + /** Returns a mapping from all contract ids referenced in the transaction to their (contract + * instance, package id), excluding those that are created within the transaction. Fails if the + * set of contract ids in the transaction and in the provided contracts differ. + */ + def forTransactionWithContracts( + tx: Transaction, + contracts: Map[ContractId, GenContractInstance], + ): Either[Set[ContractId], Map[ContractId, (FatContractInstance, Set[LfPackageId])]] = + strictZipByKey(contracts.fmap(_.inst), forTransaction(tx)) + +} diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/LfValueTranslation.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/LfValueTranslation.scala index 6aa12d9efea2..fdd6c6ddfe2b 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/LfValueTranslation.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/LfValueTranslation.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.platform.store.dao.events -import cats.implicits.toTraverseOps import com.daml.ledger.api.v2.event.{ArchivedEvent, CreatedEvent, ExercisedEvent, InterfaceView} import com.daml.ledger.api.v2.value import com.daml.ledger.api.v2.value.{Record as ApiRecord, Value as ApiValue} @@ -21,7 +20,6 @@ import com.digitalasset.canton.platform.packages.DeduplicatingPackageLoader import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, RawArchivedEventLegacy, - RawCreatedEventLegacy, RawExercisedEventLegacy, } import com.digitalasset.canton.platform.store.dao.EventProjectionProperties @@ -36,8 +34,8 @@ import com.digitalasset.canton.platform.{ Value as LfValue, } import com.digitalasset.canton.util.MonadUtil -import com.digitalasset.daml.lf.data.Ref.{FullIdentifier, Identifier, Party} -import com.digitalasset.daml.lf.data.{Bytes, Ref} +import com.digitalasset.daml.lf.data.Ref +import com.digitalasset.daml.lf.data.Ref.{FullIdentifier, Identifier} import com.digitalasset.daml.lf.engine as LfEngine import com.digitalasset.daml.lf.engine.Engine import com.digitalasset.daml.lf.transaction.* @@ -401,87 +399,6 @@ final class LfValueTranslation( ) } - def deserializeRawCreated( - eventProjectionProperties: EventProjectionProperties, - rawCreatedEvent: RawCreatedEventLegacy, - offset: Long, - nodeId: Int, - )(implicit - ec: ExecutionContext, - loggingContext: LoggingContextWithTrace, - ): Future[CreatedEvent] = { - def getFatContractInstance( - createArgument: VersionedValue, - createKey: Option[VersionedValue], - ): Either[String, FatContractInstance] = - for { - signatories <- rawCreatedEvent.signatories.toList.traverse(Party.fromString).map(_.toSet) - observers <- rawCreatedEvent.observers.toList.traverse(Party.fromString).map(_.toSet) - maintainers <- rawCreatedEvent.createKeyMaintainers.toList - .traverse(Party.fromString) - .map(_.toSet) - globalKey <- createKey - .traverse(key => - GlobalKey - .build( - rawCreatedEvent.templateId.toIdentifier, - key.unversioned, - rawCreatedEvent.templateId.pkgName, - ) - .left - .map(_.msg) - ) - } yield FatContractInstance.fromCreateNode( - Node.Create( - coid = rawCreatedEvent.contractId, - templateId = rawCreatedEvent.templateId.toIdentifier, - packageName = rawCreatedEvent.templateId.pkgName, - arg = createArgument.unversioned, - signatories = signatories, - stakeholders = signatories ++ observers, - keyOpt = globalKey.map(GlobalKeyWithMaintainers(_, maintainers)), - version = createArgument.version, - ), - createTime = CreationTime.CreatedAt(rawCreatedEvent.ledgerEffectiveTime), - authenticationData = Bytes.fromByteArray(rawCreatedEvent.authenticationData), - ) - - for { - createKey <- Future( - rawCreatedEvent.createKeyValue - .map( - decompressAndDeserialize( - Compression.Algorithm - .assertLookup(rawCreatedEvent.createKeyValueCompression), - _, - ) - ) - ) - createArgument <- Future( - decompressAndDeserialize( - Compression.Algorithm - .assertLookup(rawCreatedEvent.createArgumentCompression), - rawCreatedEvent.createArgument, - ) - ) - - fatContractInstance <- getFatContractInstance(createArgument, createKey).fold( - err => Future.failed(new RuntimeException(s"Cannot serialize createdEventBlob: $err")), - Future.successful, - ) - - createdEvent <- toApiCreatedEvent( - eventProjectionProperties = eventProjectionProperties, - fatContractInstance = fatContractInstance, - offset = offset, - nodeId = nodeId, - representativePackageId = rawCreatedEvent.representativePackageId, - witnesses = rawCreatedEvent.witnessParties, - acsDelta = rawCreatedEvent.flatEventWitnesses.nonEmpty, - ) - } yield createdEvent - } - private def toApiContractData( value: Value, keyO: Option[Value], diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/QueryValidRange.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/QueryValidRange.scala index bfa615a42107..f732593bf3dd 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/QueryValidRange.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/QueryValidRange.scala @@ -5,15 +5,19 @@ package com.digitalasset.canton.platform.store.dao.events import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{ ErrorLoggingContext, LoggingContextWithTrace, NamedLoggerFactory, NamedLogging, } -import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend +import com.digitalasset.canton.platform.store.PruningOffsetService +import com.digitalasset.canton.platform.store.cache.LedgerEndCache +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.Thereafter.syntax.* -import java.sql.Connection +import scala.concurrent.{ExecutionContext, Future} trait QueryValidRange { def withRangeNotPruned[T]( @@ -21,25 +25,33 @@ trait QueryValidRange { maxOffsetInclusive: Offset, errorPruning: Offset => String, errorLedgerEnd: Option[Offset] => String, - )(query: => T)(implicit - conn: Connection, - loggingContext: LoggingContextWithTrace, - ): T + )(query: => Future[T])(implicit + loggingContext: LoggingContextWithTrace + ): Future[T] def withOffsetNotBeforePruning[T]( offset: Offset, errorPruning: Offset => String, errorLedgerEnd: Option[Offset] => String, - )(query: => T)(implicit - conn: Connection, - loggingContext: LoggingContextWithTrace, - ): T + )(query: => Future[T])(implicit + loggingContext: LoggingContextWithTrace + ): Future[T] + + def filterPrunedEvents[T](offset: T => Offset)( + events: Seq[T] + )(implicit + errorLoggingContext: ErrorLoggingContext, + traceContext: TraceContext, + ): Future[Seq[T]] } final case class QueryValidRangeImpl( - storageBackend: ParameterStorageBackend, - val loggerFactory: NamedLoggerFactory, + ledgerEndCache: LedgerEndCache, + pruningOffsetService: PruningOffsetService, + loggerFactory: NamedLoggerFactory, +)(implicit + ec: ExecutionContext ) extends QueryValidRange with NamedLogging { @@ -72,49 +84,49 @@ final case class QueryValidRangeImpl( maxOffsetInclusive: Offset, errorPruning: Offset => String, errorLedgerEnd: Option[Offset] => String, - )(query: => T)(implicit - conn: Connection, - loggingContext: LoggingContextWithTrace, - ): T = { + )(query: => Future[T])(implicit + loggingContext: LoggingContextWithTrace + ): Future[T] = { assert(Option(maxOffsetInclusive) >= minOffsetInclusive.decrement) - val result = query - val params = storageBackend.prunedUpToInclusiveAndLedgerEnd(conn) - - params.pruneUptoInclusive - .filter(_ >= minOffsetInclusive) - .foreach(pruningOffsetUpToInclusive => - throw RequestValidationErrors.ParticipantPrunedDataAccessed + val ledgerEnd = ledgerEndCache().map(_.lastOffset) + if (Option(maxOffsetInclusive) > ledgerEnd) { + Future.failed( + RequestValidationErrors.ParticipantDataAccessedAfterLedgerEnd .Reject( - cause = errorPruning(pruningOffsetUpToInclusive), - earliestOffset = pruningOffsetUpToInclusive.unwrap, + cause = errorLedgerEnd(ledgerEnd), + latestOffset = ledgerEnd.fold(0L)(_.unwrap), )( ErrorLoggingContext(logger, loggingContext) ) .asGrpcError ) - - if (Option(maxOffsetInclusive) > params.ledgerEnd) { - throw RequestValidationErrors.ParticipantDataAccessedAfterLedgerEnd - .Reject( - cause = errorLedgerEnd(params.ledgerEnd), - latestOffset = params.ledgerEnd.fold(0L)(_.unwrap), - )( - ErrorLoggingContext(logger, loggingContext) - ) - .asGrpcError - } - - result + } else + query.thereafterF(_ => + pruningOffsetService.pruningOffset + .map(pruningOffsetO => + pruningOffsetO + .filter(_ >= minOffsetInclusive) + .foreach(pruningOffsetUpToInclusive => + throw RequestValidationErrors.ParticipantPrunedDataAccessed + .Reject( + cause = errorPruning(pruningOffsetUpToInclusive), + earliestOffset = pruningOffsetUpToInclusive.unwrap, + )( + ErrorLoggingContext(logger, loggingContext) + ) + .asGrpcError + ) + ) + ) } override def withOffsetNotBeforePruning[T]( offset: Offset, errorPruning: Offset => String, errorLedgerEnd: Option[Offset] => String, - )(query: => T)(implicit - conn: Connection, - loggingContext: LoggingContextWithTrace, - ): T = + )(query: => Future[T])(implicit + loggingContext: LoggingContextWithTrace + ): Future[T] = withRangeNotPruned( // as the range not pruned forms a condition that the minOffsetInclusive is greater than the pruning offset, // by setting this to the offset + 1 we ensure that the offset is greater than or equal to the pruning offset. @@ -123,4 +135,43 @@ final case class QueryValidRangeImpl( errorPruning = errorPruning, errorLedgerEnd = errorLedgerEnd, )(query) + + /** Filters out events that are at or below the participant's pruning offset. + * + * @param offset + * function to extract the offset from an event + * @param events + * the events to filter + * @tparam T + * the type of the events + * @return + * a future of the filtered events + */ + def filterPrunedEvents[T](offset: T => Offset)( + events: Seq[T] + )(implicit + errorLoggingContext: ErrorLoggingContext, + traceContext: TraceContext, + ): Future[Seq[T]] = { + val ledgerEnd = ledgerEndCache().map(_.lastOffset) + val beyondLegerEndO = events.find(event => Option(offset(event)) > ledgerEnd) + beyondLegerEndO match { + case Some(event) => + Future.failed( + RequestValidationErrors.ParticipantDataAccessedAfterLedgerEnd + .Reject( + cause = + s"Offset of event to be filtered ${offset(event)} is beyond ledger end $ledgerEnd", + latestOffset = ledgerEnd.fold(0L)(_.unwrap), + )(errorLoggingContext) + .asGrpcError + ) + case None => + pruningOffsetService.pruningOffset + .map(participantPrunedUpTo => + events.filter(event => Option(offset(event)) > participantPrunedUpTo) + ) + } + } + } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentPointwiseReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentPointwiseReader.scala index c3829accf1a6..59e0802fb2a9 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentPointwiseReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentPointwiseReader.scala @@ -6,19 +6,28 @@ package com.digitalasset.canton.platform.store.dao.events import com.daml.ledger.api.v2.reassignment.Reassignment import com.daml.metrics.Timed import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.IdRange import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, RawAssignEventLegacy, - RawEventLegacy, RawReassignmentEventLegacy, RawUnassignEventLegacy, } import com.digitalasset.canton.platform.store.dao.{DbDispatcher, EventProjectionProperties} -import com.digitalasset.canton.platform.{InternalEventFormat, Party, TemplatePartiesFilter} +import com.digitalasset.canton.platform.{ + FatContract, + InternalEventFormat, + Party, + TemplatePartiesFilter, +} +import com.digitalasset.canton.tracing.TraceContext import scala.concurrent.{ExecutionContext, Future} @@ -27,6 +36,8 @@ final class ReassignmentPointwiseReader( val eventStorageBackend: EventStorageBackend, val metrics: LedgerApiServerMetrics, val lfValueTranslation: LfValueTranslation, + val queryValidRange: QueryValidRange, + val contractStore: ContractStore, val loggerFactory: NamedLoggerFactory, )(implicit val ec: ExecutionContext) extends NamedLogging { @@ -66,7 +77,7 @@ final class ReassignmentPointwiseReader( } private def toApiAssigned(eventProjectionProperties: EventProjectionProperties)( - rawAssignEntries: Seq[Entry[RawAssignEventLegacy]] + rawAssignEntries: Seq[(Entry[RawAssignEventLegacy], Option[FatContract])] )(implicit lc: LoggingContextWithTrace): Future[Option[Reassignment]] = Timed.future( future = Future.delegate { @@ -79,8 +90,8 @@ final class ReassignmentPointwiseReader( def entriesToReassignment( eventProjectionProperties: EventProjectionProperties - )( - rawReassignmentEntries: Seq[Entry[RawReassignmentEventLegacy]] + )(rawReassignmentEntries: Seq[Entry[RawReassignmentEventLegacy]])( + contractsM: Map[Long, FatContract] )(implicit loggingContext: LoggingContextWithTrace, ec: ExecutionContext, @@ -88,7 +99,9 @@ final class ReassignmentPointwiseReader( assignO <- toApiAssigned(eventProjectionProperties)( rawReassignmentEntries.collect(entry => entry.event match { - case rawAssign: RawAssignEventLegacy => entry.copy(event = rawAssign) + case rawAssign: RawAssignEventLegacy => + val fatContractO = contractsM.get(rawAssign.rawCreatedEvent.internalContractId) + entry.copy(event = rawAssign) -> fatContractO } ) ) @@ -102,21 +115,36 @@ final class ReassignmentPointwiseReader( } yield assignO.orElse(unassignO) - private def fetchAndFilterEvents[T <: RawEventLegacy]( + private def fetchAndFilterEvents[T <: RawReassignmentEventLegacy]( fetchRawEvents: Future[Vector[Entry[T]]], templatePartiesFilter: TemplatePartiesFilter, - toResponse: Seq[Entry[T]] => Future[Option[Reassignment]], - ): Future[Option[Reassignment]] = - for { - // Fetching all events from the event sequential id range - rawEvents <- fetchRawEvents + toResponse: Seq[Entry[T]] => Map[Long, FatContract] => Future[Option[Reassignment]], + )(implicit traceContext: TraceContext): Future[Option[Reassignment]] = + // Fetching all events from the event sequential id range + fetchRawEvents // Filtering by template filters - filteredRawEvents = UpdateReader.filterRawEvents(templatePartiesFilter)(rawEvents) - // Deserialization of lf values - deserialized <- toResponse(filteredRawEvents) - } yield { - deserialized - } + .map(UpdateReader.filterRawEvents(templatePartiesFilter)) + // Checking if events are not pruned + .flatMap( + queryValidRange.filterPrunedEvents[Entry[T]](entry => Offset.tryFromLong(entry.offset)) + ) + .flatMap(rawPrunedEvents => + for { + // Fetching all contracts for the filtered assigned events + fatInstancesM <- contractStore + .lookupBatchedNonCached( + rawPrunedEvents.collect(_.event match { + case assign: RawAssignEventLegacy => assign.rawCreatedEvent.internalContractId + }) + ) + .map(_.view.mapValues(_.inst).toMap) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + // Deserialization of lf values + deserialized <- toResponse(rawPrunedEvents)(fatInstancesM) + } yield { + deserialized + } + ) def lookupReassignmentBy( eventSeqIdRange: (Long, Long), diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentStreamReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentStreamReader.scala index 4426f373dc45..02bc01c6b6f1 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentStreamReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/ReassignmentStreamReader.scala @@ -10,12 +10,14 @@ import com.digitalasset.canton.data.Offset import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics -import com.digitalasset.canton.platform.TemplatePartiesFilter +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.Ids import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, RawAssignEventLegacy, + RawReassignmentEventLegacy, RawUnassignEventLegacy, SequentialIdBatch, } @@ -34,6 +36,7 @@ import com.digitalasset.canton.platform.store.utils.{ ConcurrencyLimiter, QueueBasedConcurrencyLimiter, } +import com.digitalasset.canton.platform.{FatContract, TemplatePartiesFilter} import com.digitalasset.canton.util.PekkoUtil.syntax.* import com.digitalasset.daml.lf.data.Ref import com.digitalasset.daml.lf.data.Ref.{NameTypeConRef, Party} @@ -52,6 +55,7 @@ class ReassignmentStreamReader( queryValidRange: QueryValidRange, eventStorageBackend: EventStorageBackend, lfValueTranslation: LfValueTranslation, + contractStore: ContractStore, metrics: LedgerApiServerMetrics, val loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext) @@ -111,12 +115,12 @@ class ReassignmentStreamReader( maxBatchCount = maxOutputBatchCount, ) - def fetchPayloads[T]( + def fetchPayloads[T <: RawReassignmentEventLegacy]( ids: Source[Iterable[Long], NotUsed], maxParallelPayloadQueries: Int, dbMetric: DatabaseMetrics, payloadDbQuery: PayloadDbQuery[Entry[T]], - deserialize: Seq[Entry[T]] => Future[Option[Reassignment]], + deserialize: Seq[(Entry[T], Option[FatContract])] => Future[Option[Reassignment]], ): Source[Reassignment, NotUsed] = { // Pekko requires for this buffer's size to be a power of two. val inputBufferSize = Utils.largestSmallerOrEqualPowerOfTwo(maxParallelPayloadQueries) @@ -125,28 +129,49 @@ class ReassignmentStreamReader( .mapAsync(maxParallelPayloadQueries)(ids => payloadQueriesLimiter.execute { globalPayloadQueriesLimiter.execute { - dbDispatcher.executeSql(dbMetric) { implicit connection => - queryValidRange.withRangeNotPruned( - minOffsetInclusive = queryRange.startInclusiveOffset, - maxOffsetInclusive = queryRange.endInclusiveOffset, - errorPruning = (prunedOffset: Offset) => - s"Reassignment request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", - errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => - s"Reassignment request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset - .fold(0L)(_.unwrap)}", - ) { - payloadDbQuery.fetchPayloads( - eventSequentialIds = Ids(ids), - allFilterParties = filteringConstraints.allFilterParties, - )(connection) - } + queryValidRange.withRangeNotPruned( + minOffsetInclusive = queryRange.startInclusiveOffset, + maxOffsetInclusive = queryRange.endInclusiveOffset, + errorPruning = (prunedOffset: Offset) => + s"Reassignment request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", + errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => + s"Reassignment request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset + .fold(0L)(_.unwrap)}", + ) { + dbDispatcher + .executeSql(dbMetric)( + payloadDbQuery.fetchPayloads( + eventSequentialIds = Ids(ids), + allFilterParties = filteringConstraints.allFilterParties, + ) + ) + .flatMap { payloads => + val internalContractIds = + payloads.map(_.event).collect { case assign: RawAssignEventLegacy => + assign.rawCreatedEvent.internalContractId + } + for { + contractsM <- contractStore + .lookupBatchedNonCached(internalContractIds) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + } yield payloads.map { payload => + payload.event match { + case assign: RawAssignEventLegacy => + payload -> contractsM + .get(assign.rawCreatedEvent.internalContractId) + .map(_.inst) + case _: RawUnassignEventLegacy => + payload -> None + } + } + } } } } ) .mapConcat(identity) UpdateReader - .groupContiguous(serializedPayloads)(by = _.updateId) + .groupContiguous(serializedPayloads)(by = _._1.updateId) .mapAsync(deserializationProcessingParallelism)(t => deserializationQueriesLimiter.execute( deserialize(t) @@ -194,15 +219,15 @@ class ReassignmentStreamReader( } private def toApiUnassigned( - rawUnassignEntries: Seq[Entry[RawUnassignEventLegacy]] + rawUnassignEntries: Seq[(Entry[RawUnassignEventLegacy], Option[FatContract])] ): Future[Option[Reassignment]] = Timed.future( - future = Future.successful(UpdateReader.toApiUnassigned(rawUnassignEntries)), + future = Future.successful(UpdateReader.toApiUnassigned(rawUnassignEntries.map(_._1))), timer = dbMetrics.reassignmentStream.translationTimer, ) private def toApiAssigned(eventProjectionProperties: EventProjectionProperties)( - rawAssignEntries: Seq[Entry[RawAssignEventLegacy]] + rawAssignEntries: Seq[(Entry[RawAssignEventLegacy], Option[FatContract])] )(implicit lc: LoggingContextWithTrace): Future[Option[Reassignment]] = Timed.future( future = Future.delegate { diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionPointwiseReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionPointwiseReader.scala index 82fbb610472b..63359f468c19 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionPointwiseReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionPointwiseReader.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.platform.store.dao.events import com.daml.ledger.api.v2.topology_transaction.TopologyTransaction import com.digitalasset.canton.ledger.api.TopologyFormat +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.Party @@ -13,6 +14,7 @@ import com.digitalasset.canton.platform.store.backend.EventStorageBackend.RawPar import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.IdRange import com.digitalasset.canton.platform.store.dao.DbDispatcher import com.digitalasset.canton.platform.store.dao.events.EventsTable.TransactionConversions +import com.digitalasset.canton.tracing.TraceContext import scala.concurrent.{ExecutionContext, Future} @@ -21,6 +23,7 @@ final class TopologyTransactionPointwiseReader( val eventStorageBackend: EventStorageBackend, val metrics: LedgerApiServerMetrics, val lfValueTranslation: LfValueTranslation, + val queryValidRange: QueryValidRange, val loggerFactory: NamedLoggerFactory, )(implicit val ec: ExecutionContext) extends NamedLogging { @@ -45,19 +48,19 @@ final class TopologyTransactionPointwiseReader( fetchRawEvents: Future[Vector[RawParticipantAuthorization]], requestingParties: Option[Set[Party]], // None is a party-wildcard toResponse: Vector[RawParticipantAuthorization] => Future[Option[TopologyTransaction]], - ): Future[Option[TopologyTransaction]] = - for { - // Fetching all events from the event sequential id range - rawEvents <- fetchRawEvents + )(implicit traceContext: TraceContext): Future[Option[TopologyTransaction]] = + // Fetching all events from the event sequential id range + fetchRawEvents // Filter out events that do not include the parties - filteredEvents = rawEvents.filter(event => - requestingParties.fold(true)(parties => parties.map(_.toString).contains(event.partyId)) + .map( + _.filter(event => + requestingParties.fold(true)(parties => parties.map(_.toString).contains(event.partyId)) + ) ) + // Checking if events are not pruned + .flatMap(queryValidRange.filterPrunedEvents[RawParticipantAuthorization](_.offset)) // Convert to api response - response <- toResponse(filteredEvents) - } yield { - response - } + .flatMap(filteredEventsPruned => toResponse(filteredEventsPruned.toVector)) def lookupTopologyTransaction( eventSeqIdRange: (Long, Long), diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionsStreamReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionsStreamReader.scala index ccf513b80c60..7e0255ca7a62 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionsStreamReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TopologyTransactionsStreamReader.scala @@ -117,18 +117,19 @@ class TopologyTransactionsStreamReader( .mapAsync(maxParallelPayloadQueries)(ids => payloadQueriesLimiter.execute { globalPayloadQueriesLimiter.execute { - dbDispatcher.executeSql(dbMetric) { implicit connection => - queryValidRange.withRangeNotPruned( - minOffsetInclusive = queryRange.startInclusiveOffset, - maxOffsetInclusive = queryRange.endInclusiveOffset, - errorPruning = (prunedOffset: Offset) => - s"Topology events request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", - errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => - s"Topology events request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset - .fold(0L)(_.unwrap)}", - ) { - payloadDbQuery.fetchPayloads(eventSequentialIds = Ids(ids))(connection) - } + queryValidRange.withRangeNotPruned( + minOffsetInclusive = queryRange.startInclusiveOffset, + maxOffsetInclusive = queryRange.endInclusiveOffset, + errorPruning = (prunedOffset: Offset) => + s"Topology events request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", + errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => + s"Topology events request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset + .fold(0L)(_.unwrap)}", + ) { + dbDispatcher.executeSql(dbMetric)( + payloadDbQuery.fetchPayloads(eventSequentialIds = Ids(ids)) + ) + } } } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionPointwiseReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionPointwiseReader.scala index 5d753b38147e..2864f8baf3f6 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionPointwiseReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionPointwiseReader.scala @@ -8,14 +8,19 @@ import com.daml.ledger.api.v2.transaction.Transaction import com.daml.metrics.Timed import com.daml.metrics.api.MetricHandle import com.digitalasset.canton.concurrent.DirectExecutionContext +import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.TransactionShape +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.IdRange import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, RawAcsDeltaEventLegacy, + RawCreatedEventLegacy, RawEventLegacy, RawLedgerEffectsEventLegacy, } @@ -25,7 +30,13 @@ import com.digitalasset.canton.platform.store.backend.common.{ } import com.digitalasset.canton.platform.store.dao.events.EventsTable.TransactionConversions.toTransaction import com.digitalasset.canton.platform.store.dao.{DbDispatcher, EventProjectionProperties} -import com.digitalasset.canton.platform.{InternalTransactionFormat, Party, TemplatePartiesFilter} +import com.digitalasset.canton.platform.{ + FatContract, + InternalTransactionFormat, + Party, + TemplatePartiesFilter, +} +import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.MonadUtil import scala.concurrent.{ExecutionContext, Future} @@ -35,6 +46,8 @@ final class TransactionPointwiseReader( val eventStorageBackend: EventStorageBackend, val metrics: LedgerApiServerMetrics, val lfValueTranslation: LfValueTranslation, + val queryValidRange: QueryValidRange, + val contractStore: ContractStore, val loggerFactory: NamedLoggerFactory, )(implicit val ec: ExecutionContext) extends NamedLogging { @@ -127,7 +140,7 @@ final class TransactionPointwiseReader( private def deserializeEntryAcsDelta( eventProjectionProperties: EventProjectionProperties, lfValueTranslation: LfValueTranslation, - )(entry: Entry[RawAcsDeltaEventLegacy])(implicit + )(entry: (Entry[RawAcsDeltaEventLegacy], Option[FatContract]))(implicit loggingContext: LoggingContextWithTrace, ec: ExecutionContext, ): Future[Entry[Event]] = @@ -136,7 +149,7 @@ final class TransactionPointwiseReader( private def deserializeEntryLedgerEffects( eventProjectionProperties: EventProjectionProperties, lfValueTranslation: LfValueTranslation, - )(entry: Entry[RawLedgerEffectsEventLegacy])(implicit + )(entry: (Entry[RawLedgerEffectsEventLegacy], Option[FatContract]))(implicit loggingContext: LoggingContextWithTrace, ec: ExecutionContext, ): Future[Entry[Event]] = @@ -147,26 +160,50 @@ final class TransactionPointwiseReader( private def fetchAndFilterEvents[T <: RawEventLegacy]( fetchRawEvents: Future[Vector[Entry[T]]], templatePartiesFilter: TemplatePartiesFilter, - deserializeEntry: Entry[T] => Future[Entry[Event]], + deserializeEntry: ((Entry[T], Option[FatContract])) => Future[Entry[Event]], timer: MetricHandle.Timer, - ): Future[Seq[Entry[Event]]] = - for { - // Fetching all events from the event sequential id range - rawEvents <- fetchRawEvents + )(implicit traceContext: TraceContext): Future[Seq[Entry[Event]]] = + // Fetching all events from the event sequential id range + fetchRawEvents // Filtering by template filters - filteredRawEvents = UpdateReader.filterRawEvents(templatePartiesFilter)(rawEvents) - // Deserialization of lf values - deserialized <- Timed.future( - timer = timer, - future = Future.delegate { - implicit val ec: ExecutionContext = - directEC // Scala 2 implicit scope override: shadow the outer scope's implicit by name - MonadUtil.sequentialTraverse(filteredRawEvents)(deserializeEntry) - }, + .map(UpdateReader.filterRawEvents(templatePartiesFilter)) + // Checking if events are not pruned + .flatMap( + queryValidRange.filterPrunedEvents[Entry[T]](entry => Offset.tryFromLong(entry.offset)) + ) + .flatMap(rawEventsPruned => + for { + // Fetching all contracts for the filtered assigned events + contractsM <- contractStore + .lookupBatchedNonCached( + rawEventsPruned.collect(_.event match { + case created: RawCreatedEventLegacy => created.internalContractId + }) + ) + .map(_.view.mapValues(_.inst).toMap) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + // Deserialization of lf values + deserialized <- + Timed.future( + timer = timer, + future = Future.delegate { + implicit val ec: ExecutionContext = + directEC // Scala 2 implicit scope override: shadow the outer scope's implicit by name + MonadUtil.sequentialTraverse(rawEventsPruned)(entry => + entry.event match { + case created: RawCreatedEventLegacy => + val fatContractO = contractsM.get(created.internalContractId) + deserializeEntry(entry -> fatContractO) + case _ => + deserializeEntry(entry -> None) + } + ) + }, + ) + } yield { + deserialized + } ) - } yield { - deserialized - } def lookupTransactionBy( eventSeqIdRange: (Long, Long), diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatePointwiseReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatePointwiseReader.scala index 5b9f8cfab0d0..82472163f787 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatePointwiseReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatePointwiseReader.scala @@ -12,7 +12,6 @@ import com.digitalasset.canton.platform.InternalUpdateFormat import com.digitalasset.canton.platform.store.backend.common.UpdatePointwiseQueries.LookupKey import com.digitalasset.canton.platform.store.backend.{EventStorageBackend, ParameterStorageBackend} import com.digitalasset.canton.platform.store.dao.DbDispatcher -import com.digitalasset.canton.platform.store.dao.events.UpdatePointwiseReader.getOffset import scala.concurrent.{ExecutionContext, Future} @@ -88,24 +87,9 @@ final class UpdatePointwiseReader( ) .map(_.flatten) - prunedUpToInclusive <- dbDispatcher.executeSql(metrics.index.db.fetchPruningOffsetsMetrics)( - parameterStorageBackend.prunedUpToInclusive - ) - - notPruned = agg.filter(update => getOffset(update) > prunedUpToInclusive.fold(0L)(_.unwrap)) - } yield { // only a single update should exist for a specific offset or update id - notPruned.headOption.map(GetUpdateResponse.apply) + agg.headOption.map(GetUpdateResponse.apply) } } - -object UpdatePointwiseReader { - private def getOffset(update: Update): Long = update match { - case Update.Empty => throw new RuntimeException("The update was unexpectedly empty.") - case Update.Transaction(tx) => tx.offset - case Update.Reassignment(reassignment) => reassignment.offset - case Update.TopologyTransaction(topologyTx) => topologyTx.offset - } -} diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdateReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdateReader.scala index 10058c4e3f36..d354cc6f24b7 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdateReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdateReader.scala @@ -35,7 +35,7 @@ import com.digitalasset.canton.platform.store.dao.{ EventProjectionProperties, LedgerDaoUpdateReader, } -import com.digitalasset.canton.platform.{InternalUpdateFormat, TemplatePartiesFilter} +import com.digitalasset.canton.platform.{FatContract, InternalUpdateFormat, TemplatePartiesFilter} import com.digitalasset.canton.util.MonadUtil import io.opentelemetry.api.trace.Span import org.apache.pekko.stream.scaladsl.Source @@ -133,50 +133,46 @@ private[dao] final class UpdateReader( private def getMaxAcsEventSeqId(activeAt: Offset)(implicit loggingContext: LoggingContextWithTrace ): Future[Long] = - dispatcher - .executeSql(dbMetrics.getAcsEventSeqIdRange)(implicit connection => - queryValidRange.withOffsetNotBeforePruning( - offset = activeAt, - errorPruning = pruned => - ACSReader.acsBeforePruningErrorReason( - acsOffset = activeAt, - prunedUpToOffset = pruned, - ), - errorLedgerEnd = ledgerEnd => - ACSReader.acsAfterLedgerEndErrorReason( - acsOffset = activeAt, - ledgerEndOffset = ledgerEnd, - ), - )( - eventStorageBackend.maxEventSequentialId(Some(activeAt))(connection) - ) + queryValidRange.withOffsetNotBeforePruning( + offset = activeAt, + errorPruning = pruned => + ACSReader.acsBeforePruningErrorReason( + acsOffset = activeAt, + prunedUpToOffset = pruned, + ), + errorLedgerEnd = ledgerEnd => + ACSReader.acsAfterLedgerEndErrorReason( + acsOffset = activeAt, + ledgerEndOffset = ledgerEnd, + ), + )( + dispatcher.executeSql(dbMetrics.getAcsEventSeqIdRange)( + eventStorageBackend.maxEventSequentialId(Some(activeAt)) ) + ) private def getEventSeqIdRange( startInclusive: Offset, endInclusive: Offset, )(implicit loggingContext: LoggingContextWithTrace): Future[EventsRange] = - dispatcher - .executeSql(dbMetrics.getEventSeqIdRange)(implicit connection => - queryValidRange.withRangeNotPruned( - minOffsetInclusive = startInclusive, - maxOffsetInclusive = endInclusive, - errorPruning = (prunedOffset: Offset) => - s"Transactions request from ${startInclusive.unwrap} to ${endInclusive.unwrap} precedes pruned offset ${prunedOffset.unwrap}", - errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => - s"Transactions request from ${startInclusive.unwrap} to ${endInclusive.unwrap} is beyond ledger end offset ${ledgerEndOffset - .fold(0L)(_.unwrap)}", - ) { - EventsRange( - startInclusiveOffset = startInclusive, - startInclusiveEventSeqId = - eventStorageBackend.maxEventSequentialId(startInclusive.decrement)(connection), - endInclusiveOffset = endInclusive, - endInclusiveEventSeqId = - eventStorageBackend.maxEventSequentialId(Some(endInclusive))(connection), - ) - } + queryValidRange.withRangeNotPruned( + minOffsetInclusive = startInclusive, + maxOffsetInclusive = endInclusive, + errorPruning = (prunedOffset: Offset) => + s"Transactions request from ${startInclusive.unwrap} to ${endInclusive.unwrap} precedes pruned offset ${prunedOffset.unwrap}", + errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => + s"Transactions request from ${startInclusive.unwrap} to ${endInclusive.unwrap} is beyond ledger end offset ${ledgerEndOffset + .fold(0L)(_.unwrap)}", + )(dispatcher.executeSql(dbMetrics.getEventSeqIdRange) { connection => + EventsRange( + startInclusiveOffset = startInclusive, + startInclusiveEventSeqId = + eventStorageBackend.maxEventSequentialId(startInclusive.decrement)(connection), + endInclusiveOffset = endInclusive, + endInclusiveEventSeqId = + eventStorageBackend.maxEventSequentialId(Some(endInclusive))(connection), ) + }) } @@ -284,26 +280,34 @@ private[dao] object UpdateReader { eventProjectionProperties: EventProjectionProperties, lfValueTranslation: LfValueTranslation, )( - rawAssignEntries: Seq[Entry[RawAssignEventLegacy]] + rawAssignEntries: Seq[(Entry[RawAssignEventLegacy], Option[FatContract])] )(implicit lc: LoggingContextWithTrace, ec: ExecutionContext): Future[Option[Reassignment]] = MonadUtil - .sequentialTraverse(rawAssignEntries) { rawAssignEntry => + .sequentialTraverse(rawAssignEntries) { case (rawAssignEntry, fatContractO) => lfValueTranslation - .deserializeRawCreated( + .toApiCreatedEvent( eventProjectionProperties = eventProjectionProperties, - rawCreatedEvent = rawAssignEntry.event.rawCreatedEvent, + fatContractInstance = fatContractO.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawAssignEntry.event.rawCreatedEvent.internalContractId} was not found in the contract store." + ) + ), offset = rawAssignEntry.offset, nodeId = rawAssignEntry.nodeId, + representativePackageId = rawAssignEntry.event.rawCreatedEvent.representativePackageId, + witnesses = rawAssignEntry.event.rawCreatedEvent.witnessParties, + acsDelta = rawAssignEntry.event.rawCreatedEvent.flatEventWitnesses.nonEmpty, ) + } .map(createdEvents => - rawAssignEntries.headOption.map(first => + rawAssignEntries.headOption.map { case (first, _) => Reassignment( updateId = first.updateId, commandId = first.commandId.getOrElse(""), workflowId = first.workflowId.getOrElse(""), offset = first.offset, - events = rawAssignEntries.zip(createdEvents).map { case (entry, created) => + events = rawAssignEntries.zip(createdEvents).map { case ((entry, _), created) => ReassignmentEvent( ReassignmentEvent.Event.Assigned( UpdateReader.toAssignedEvent(entry.event, created) @@ -314,76 +318,100 @@ private[dao] object UpdateReader { traceContext = first.traceContext.map(DamlTraceContext.parseFrom), synchronizerId = first.synchronizerId, ) - ) + } ) def deserializeRawAcsDeltaEvent( eventProjectionProperties: EventProjectionProperties, lfValueTranslation: LfValueTranslation, )( - rawFlatEntry: Entry[RawAcsDeltaEventLegacy] + rawFlatEntryFatContract: (Entry[RawAcsDeltaEventLegacy], Option[FatContract]) )(implicit loggingContext: LoggingContextWithTrace, ec: ExecutionContext, - ): Future[Entry[Event]] = rawFlatEntry.event match { - case rawCreated: RawCreatedEventLegacy => - lfValueTranslation - .deserializeRawCreated( - eventProjectionProperties = eventProjectionProperties, - rawCreatedEvent = rawCreated, - offset = rawFlatEntry.offset, - nodeId = rawFlatEntry.nodeId, - ) - .map(createdEvent => rawFlatEntry.withEvent(Event(Event.Event.Created(createdEvent)))) + ): Future[Entry[Event]] = + rawFlatEntryFatContract match { + case (rawFlatEntry, fatContractO) => + rawFlatEntry.event match { + case rawCreated: RawCreatedEventLegacy => + lfValueTranslation + .toApiCreatedEvent( + eventProjectionProperties = eventProjectionProperties, + fatContractInstance = fatContractO.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawCreated.internalContractId} was not found in the contract store." + ) + ), + offset = rawFlatEntry.offset, + nodeId = rawFlatEntry.nodeId, + representativePackageId = rawCreated.representativePackageId, + witnesses = rawCreated.witnessParties, + acsDelta = rawCreated.flatEventWitnesses.nonEmpty, + ) + .map(createdEvent => rawFlatEntry.withEvent(Event(Event.Event.Created(createdEvent)))) - case rawArchived: RawArchivedEventLegacy => - Future.successful( - rawFlatEntry.withEvent( - Event( - Event.Event.Archived( - lfValueTranslation.deserializeRawArchived( - eventProjectionProperties, - rawFlatEntry.withEvent(rawArchived), + case rawArchived: RawArchivedEventLegacy => + Future.successful( + rawFlatEntry.withEvent( + Event( + Event.Event.Archived( + lfValueTranslation.deserializeRawArchived( + eventProjectionProperties, + rawFlatEntry.withEvent(rawArchived), + ) + ) + ) ) ) - ) - ) - ) - } + } + } def deserializeRawLedgerEffectsEvent( eventProjectionProperties: EventProjectionProperties, lfValueTranslation: LfValueTranslation, )( - rawTreeEntry: Entry[RawLedgerEffectsEventLegacy] + rawTreeEntryFatContract: (Entry[RawLedgerEffectsEventLegacy], Option[FatContract]) )(implicit loggingContext: LoggingContextWithTrace, ec: ExecutionContext, - ): Future[Entry[Event]] = rawTreeEntry.event match { - case rawCreated: RawCreatedEventLegacy => - lfValueTranslation - .deserializeRawCreated( - eventProjectionProperties = eventProjectionProperties, - rawCreatedEvent = rawCreated, - offset = rawTreeEntry.offset, - nodeId = rawTreeEntry.nodeId, - ) - .map(createdEvent => - rawTreeEntry.withEvent( - Event(Event.Event.Created(createdEvent)) - ) - ) - - case rawExercised: RawExercisedEventLegacy => - lfValueTranslation - .deserializeRawExercised(eventProjectionProperties, rawTreeEntry.withEvent(rawExercised)) - .map(exercisedEvent => - rawTreeEntry.copy( - event = Event(Event.Event.Exercised(exercisedEvent)) - ) - ) - } + ): Future[Entry[Event]] = + rawTreeEntryFatContract match { + case (rawTreeEntry, fatContractO) => + rawTreeEntry.event match { + case rawCreated: RawCreatedEventLegacy => + lfValueTranslation + .toApiCreatedEvent( + eventProjectionProperties = eventProjectionProperties, + fatContractInstance = fatContractO.getOrElse( + throw new IllegalStateException( + s"Contract for internal contract id ${rawCreated.internalContractId} was not found in the contract store." + ) + ), + offset = rawTreeEntry.offset, + nodeId = rawTreeEntry.nodeId, + representativePackageId = rawCreated.representativePackageId, + witnesses = rawCreated.witnessParties, + acsDelta = rawCreated.flatEventWitnesses.nonEmpty, + ) + .map(createdEvent => + rawTreeEntry.withEvent( + Event(Event.Event.Created(createdEvent)) + ) + ) + case rawExercised: RawExercisedEventLegacy => + lfValueTranslation + .deserializeRawExercised( + eventProjectionProperties, + rawTreeEntry.withEvent(rawExercised), + ) + .map(exercisedEvent => + rawTreeEntry.copy( + event = Event(Event.Event.Exercised(exercisedEvent)) + ) + ) + } + } def filterRawEvents[T <: RawEventLegacy](templatePartiesFilter: TemplatePartiesFilter)( rawEvents: Seq[Entry[T]] ): Seq[Entry[T]] = { diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatesStreamReader.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatesStreamReader.scala index 61c3d686ad99..96f78f2b1ea4 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatesStreamReader.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/UpdatesStreamReader.scala @@ -16,12 +16,17 @@ import com.digitalasset.canton.ledger.api.{TraceIdentifiers, TransactionShape} import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.participant.store.ContractStore import com.digitalasset.canton.platform.config.UpdatesStreamsConfig import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.EventStorageBackend.SequentialIdBatch.Ids import com.digitalasset.canton.platform.store.backend.EventStorageBackend.{ Entry, RawAcsDeltaEventLegacy, + RawArchivedEventLegacy, + RawCreatedEventLegacy, + RawExercisedEventLegacy, RawLedgerEffectsEventLegacy, } import com.digitalasset.canton.platform.store.backend.common.{ @@ -43,6 +48,7 @@ import com.digitalasset.canton.platform.store.utils.{ Telemetry, } import com.digitalasset.canton.platform.{ + FatContract, InternalEventFormat, InternalTransactionFormat, InternalUpdateFormat, @@ -67,6 +73,7 @@ class UpdatesStreamReader( queryValidRange: QueryValidRange, eventStorageBackend: EventStorageBackend, lfValueTranslation: LfValueTranslation, + contractStore: ContractStore, metrics: LedgerApiServerMetrics, tracer: Tracer, topologyTransactionsStreamReader: TopologyTransactionsStreamReader, @@ -80,10 +87,12 @@ class UpdatesStreamReader( private val dbMetrics = metrics.index.db private val orderBySequentialEventIdFlat = - Ordering.by[Entry[RawAcsDeltaEventLegacy], Long](_.eventSequentialId) + Ordering.by[(Entry[RawAcsDeltaEventLegacy], Option[FatContract]), Long](_._1.eventSequentialId) private val orderBySequentialEventIdTree = - Ordering.by[Entry[RawLedgerEffectsEventLegacy], Long](_.eventSequentialId) + Ordering.by[(Entry[RawLedgerEffectsEventLegacy], Option[FatContract]), Long]( + _._1.eventSequentialId + ) private val paginatingAsyncStream = new PaginatingAsyncStream(loggerFactory) @@ -320,6 +329,15 @@ class UpdatesStreamReader( maxOutputBatchCount = maxParallelPayloadConsumingQueries + 1, metric = dbMetrics.updatesAcsDeltaStream.fetchEventConsumingIdsStakeholder, ) + + def getInternalContractIdFromCreated(event: RawAcsDeltaEventLegacy): Long = event match { + case created: RawCreatedEventLegacy => created.internalContractId + case _: RawArchivedEventLegacy => + throw new IllegalStateException( + s"archived event should not be used to lookup a contract" + ) + } + val payloadsCreate = fetchPayloads( queryRange = queryRange, @@ -334,6 +352,8 @@ class UpdatesStreamReader( maxParallelPayloadQueries = maxParallelPayloadCreateQueries, dbMetric = dbMetrics.updatesAcsDeltaStream.fetchEventCreatePayloadsLegacy, payloadQueriesLimiter = payloadQueriesLimiter, + contractStore = contractStore, + getInternalContractIdO = Some(getInternalContractIdFromCreated), ) val payloadsConsuming = fetchPayloads( @@ -350,11 +370,13 @@ class UpdatesStreamReader( maxParallelPayloadQueries = maxParallelPayloadConsumingQueries, dbMetric = dbMetrics.updatesAcsDeltaStream.fetchEventConsumingPayloadsLegacy, payloadQueriesLimiter = payloadQueriesLimiter, + contractStore = contractStore, + getInternalContractIdO = None, ) val allSortedPayloads = payloadsConsuming.mergeSorted(payloadsCreate)(orderBySequentialEventIdFlat) UpdateReader - .groupContiguous(allSortedPayloads)(by = _.updateId) + .groupContiguous(allSortedPayloads)(by = _._1.updateId) .mapAsync(transactionsProcessingParallelism)(rawEvents => deserializationQueriesLimiter.execute( deserializeLfValues(rawEvents, internalEventFormat.eventProjectionProperties) @@ -465,6 +487,14 @@ class UpdatesStreamReader( requestingParties = txFilteringConstraints.allFilterParties, )(connection) + def getInternalContractIdFromCreated(event: RawLedgerEffectsEventLegacy): Long = event match { + case created: RawCreatedEventLegacy => created.internalContractId + case _: RawExercisedEventLegacy => + throw new IllegalStateException( + s"exercised event should not be used to lookup a contract" + ) + } + val payloadsCreate = fetchPayloads( queryRange = queryRange, ids = idsCreate, @@ -473,6 +503,8 @@ class UpdatesStreamReader( maxParallelPayloadQueries = maxParallelPayloadCreateQueries, dbMetric = dbMetrics.updatesLedgerEffectsStream.fetchEventCreatePayloads, payloadQueriesLimiter = payloadQueriesLimiter, + contractStore = contractStore, + getInternalContractIdO = Some(getInternalContractIdFromCreated), ) val payloadsConsuming = fetchPayloads( queryRange = queryRange, @@ -482,6 +514,8 @@ class UpdatesStreamReader( maxParallelPayloadQueries = maxParallelPayloadConsumingQueries, dbMetric = dbMetrics.updatesLedgerEffectsStream.fetchEventConsumingPayloads, payloadQueriesLimiter = payloadQueriesLimiter, + contractStore = contractStore, + getInternalContractIdO = None, ) val payloadsNonConsuming = fetchPayloads( queryRange = queryRange, @@ -492,12 +526,14 @@ class UpdatesStreamReader( maxParallelPayloadQueries = maxParallelPayloadNonConsumingQueries, dbMetric = dbMetrics.updatesLedgerEffectsStream.fetchEventNonConsumingPayloads, payloadQueriesLimiter = payloadQueriesLimiter, + contractStore = contractStore, + getInternalContractIdO = None, ) val allSortedPayloads = payloadsConsuming .mergeSorted(payloadsCreate)(orderBySequentialEventIdTree) .mergeSorted(payloadsNonConsuming)(orderBySequentialEventIdTree) UpdateReader - .groupContiguous(allSortedPayloads)(by = _.updateId) + .groupContiguous(allSortedPayloads)(by = _._1.updateId) .mapAsync(transactionsProcessingParallelism)(rawEvents => deserializationQueriesLimiter.execute( deserializeLfValuesTree(rawEvents, internalEventFormat.eventProjectionProperties) @@ -563,9 +599,11 @@ class UpdatesStreamReader( maxParallelPayloadQueries: Int, dbMetric: DatabaseMetrics, payloadQueriesLimiter: ConcurrencyLimiter, + contractStore: ContractStore, + getInternalContractIdO: Option[T => Long], )(implicit loggingContext: LoggingContextWithTrace - ): Source[Entry[T], NotUsed] = { + ): Source[(Entry[T], Option[FatContract]), NotUsed] = { // Pekko requires for this buffer's size to be a power of two. val inputBufferSize = Utils.largestSmallerOrEqualPowerOfTwo(maxParallelPayloadQueries) ids @@ -573,18 +611,37 @@ class UpdatesStreamReader( .mapAsync(maxParallelPayloadQueries)(ids => payloadQueriesLimiter.execute { globalPayloadQueriesLimiter.execute { - dbDispatcher.executeSql(dbMetric) { implicit connection => - queryValidRange.withRangeNotPruned( - minOffsetInclusive = queryRange.startInclusiveOffset, - maxOffsetInclusive = queryRange.endInclusiveOffset, - errorPruning = (prunedOffset: Offset) => - s"Updates request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", - errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => - s"Updates request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset - .fold(0L)(_.unwrap)}", - ) { - fetchEvents(ids, connection) - } + queryValidRange.withRangeNotPruned( + minOffsetInclusive = queryRange.startInclusiveOffset, + maxOffsetInclusive = queryRange.endInclusiveOffset, + errorPruning = (prunedOffset: Offset) => + s"Updates request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} precedes pruned offset ${prunedOffset.unwrap}", + errorLedgerEnd = (ledgerEndOffset: Option[Offset]) => + s"Updates request from ${queryRange.startInclusiveOffset.unwrap} to ${queryRange.endInclusiveOffset.unwrap} is beyond ledger end offset ${ledgerEndOffset + .fold(0L)(_.unwrap)}", + ) { + dbDispatcher + .executeSql(dbMetric) { connection => + fetchEvents(ids, connection) + } + .flatMap(events => + getInternalContractIdO match { + case Some(getInternalContractId) => + val internalContractIds = + events.map(entry => getInternalContractId(entry.event)) + for { + contractsM <- contractStore + .lookupBatchedNonCached(internalContractIds) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + } yield events.map { entry => + entry -> contractsM + .get(getInternalContractId(entry.event)) + .map(_.inst) + } + case None => + Future.successful(events.map(_ -> None)) + } + ) } } } @@ -593,7 +650,7 @@ class UpdatesStreamReader( } private def deserializeLfValuesTree( - rawEvents: Vector[Entry[RawLedgerEffectsEventLegacy]], + rawEvents: Vector[(Entry[RawLedgerEffectsEventLegacy], Option[FatContract])], eventProjectionProperties: EventProjectionProperties, )(implicit lc: LoggingContextWithTrace): Future[Seq[Entry[Event]]] = Timed.future( @@ -609,7 +666,7 @@ class UpdatesStreamReader( ) private def deserializeLfValues( - rawEvents: Vector[Entry[RawAcsDeltaEventLegacy]], + rawEvents: Vector[(Entry[RawAcsDeltaEventLegacy], Option[FatContract])], eventProjectionProperties: EventProjectionProperties, )(implicit lc: LoggingContextWithTrace): Future[Seq[Entry[Event]]] = Timed.future( diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/services/ApiServicesRequiredClaimSpec.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/services/ApiServicesRequiredClaimSpec.scala index 5db08a184028..e7a490994299 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/services/ApiServicesRequiredClaimSpec.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/services/ApiServicesRequiredClaimSpec.scala @@ -683,6 +683,7 @@ object ApiServicesRequiredClaimSpec { packageIdSelectionPreference = Seq.empty, verboseHashing = true, prefetchContractKeys = Seq.empty, + maxRecordTime = Option.empty, ) val preparedTransaction = PreparedTransaction( @@ -703,6 +704,7 @@ object ApiServicesRequiredClaimSpec { minLedgerEffectiveTime = None, maxLedgerEffectiveTime = None, globalKeyMapping = Seq.empty, + maxRecordTime = Option.empty, ) ), ) diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala index c8e9d6617f1d..2e2974c571b7 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala @@ -25,7 +25,7 @@ import com.digitalasset.canton.platform.store.DbSupport.{ConnectionPoolConfig, D import com.digitalasset.canton.platform.store.cache.MutableLedgerEndCache import com.digitalasset.canton.platform.store.dao.events.{ContractLoader, LfValueTranslation} import com.digitalasset.canton.platform.store.interning.StringInterningView -import com.digitalasset.canton.platform.store.{DbSupport, FlywayMigrations} +import com.digitalasset.canton.platform.store.{DbSupport, FlywayMigrations, PruningOffsetService} import com.digitalasset.canton.store.packagemeta.PackageMetadata import com.digitalasset.canton.time.WallClock import com.digitalasset.canton.tracing.NoReportingTracerProvider @@ -83,7 +83,8 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx protected def index: IndexService = testServices.index - protected def cantonContractStore: InMemoryContractStore = testServices.cantonContractStore + protected def participantContractStore: InMemoryContractStore = + testServices.participantContractStore protected def sequentialPostProcessor: Update => Unit = _ => () @@ -98,7 +99,8 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx val mutableLedgerEndCache = MutableLedgerEndCache() val stringInterningView = new StringInterningView(loggerFactory) val participantId = Ref.ParticipantId.assertFromString("index-component-test-participant-id") - val cantonContractStore = new InMemoryContractStore(timeouts, loggerFactory) + val participantContractStore = new InMemoryContractStore(timeouts, loggerFactory) + val pruningOffsetService = mock[PruningOffsetService] val indexResourceOwner = for { @@ -189,7 +191,8 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx _: String, _: LoggingContextWithTrace, ) => FutureUnlessShutdown.pure(Left("not used")), - cantonContractStore = cantonContractStore, + participantContractStore = participantContractStore, + pruningOffsetService = pruningOffsetService, ) } yield indexService -> indexer @@ -201,7 +204,7 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest with HasEx indexResource = indexResource, index = index, indexer = indexer, - cantonContractStore = cantonContractStore, + participantContractStore = participantContractStore, ) ) } @@ -225,6 +228,6 @@ object IndexComponentTest { indexResource: Resource[Any], index: IndexService, indexer: FutureQueue[Update], - cantonContractStore: InMemoryContractStore, + participantContractStore: InMemoryContractStore, ) } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementServiceSpec.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementServiceSpec.scala index 7d87f1cba643..00afe0dd4011 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementServiceSpec.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPartyManagementServiceSpec.scala @@ -5,21 +5,31 @@ package com.digitalasset.canton.platform.apiserver.services.admin import cats.syntax.traverse.* import com.daml.ledger.api.testing.utils.PekkoBeforeAndAfterAll +import com.daml.ledger.api.v2.admin.party_management_service.AllocateExternalPartyRequest.SignedTransaction import com.daml.ledger.api.v2.admin.party_management_service.{ + AllocateExternalPartyRequest, AllocatePartyRequest, GenerateExternalPartyTopologyRequest, GenerateExternalPartyTopologyResponse, PartyDetails as ProtoPartyDetails, } -import com.daml.ledger.api.v2.crypto as lapicrypto +import com.daml.ledger.api.v2.crypto.SignatureFormat.SIGNATURE_FORMAT_RAW +import com.daml.ledger.api.v2.{crypto, crypto as lapicrypto} +import com.daml.nonempty.NonEmpty import com.daml.tracing.TelemetrySpecBase.* import com.daml.tracing.{DefaultOpenTelemetry, NoOpTelemetry} import com.digitalasset.base.error.ErrorsAssertions import com.digitalasset.base.error.utils.ErrorDetails import com.digitalasset.base.error.utils.ErrorDetails.RetryInfoDetail -import com.digitalasset.canton.BaseTest import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.crypto.{HashOps, TestHash} +import com.digitalasset.canton.crypto.v30.SigningKeyScheme.SIGNING_KEY_SCHEME_UNSPECIFIED +import com.digitalasset.canton.crypto.{ + Fingerprint, + HashOps, + SigningKeyUsage, + SigningPublicKey, + TestHash, +} import com.digitalasset.canton.ledger.api.{IdentityProviderId, ObjectMeta} import com.digitalasset.canton.ledger.localstore.api.{ PartyRecord, @@ -27,6 +37,9 @@ import com.digitalasset.canton.ledger.localstore.api.{ UserManagementStore, } import com.digitalasset.canton.ledger.participant.state +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationEvent.Added +import com.digitalasset.canton.ledger.participant.state.Update.TopologyTransactionEffective.AuthorizationLevel.Submission import com.digitalasset.canton.ledger.participant.state.index.{ IndexPartyManagementService, IndexerPartyDetails, @@ -37,41 +50,54 @@ import com.digitalasset.canton.logging.{ NamedLoggerFactory, SuppressionRule, } -import com.digitalasset.canton.platform.apiserver.services.admin.ApiPartyManagementService.blindAndConvertToProto +import com.digitalasset.canton.platform.apiserver.services.admin.ApiPartyManagementService.{ + CreateSubmissionId, + blindAndConvertToProto, +} import com.digitalasset.canton.platform.apiserver.services.admin.ApiPartyManagementServiceSpec.* import com.digitalasset.canton.platform.apiserver.services.admin.PartyAllocation import com.digitalasset.canton.platform.apiserver.services.tracking.{InFlight, StreamTracker} +import com.digitalasset.canton.topology.transaction.TopologyChangeOp.Replace import com.digitalasset.canton.topology.transaction.{ + DecentralizedNamespaceDefinition, + DelegationRestriction, HostingParticipant, NamespaceDelegation, ParticipantPermission, + PartyHostingLimits, PartyToKeyMapping, PartyToParticipant, + TopologyChangeOp, TopologyTransaction, } import com.digitalasset.canton.topology.{ DefaultTestIdentities, ExternalPartyOnboardingDetails, + Namespace, ParticipantId, + PartyId, SynchronizerId, } import com.digitalasset.canton.tracing.{TestTelemetrySetup, TraceContext} import com.digitalasset.canton.util.Thereafter.syntax.* import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.{BaseTest, HasExecutorService} import com.digitalasset.daml.lf.data.Ref import com.google.protobuf.ByteString import io.grpc.Status.Code import io.grpc.StatusRuntimeException import io.opentelemetry.api.trace.Tracer import io.opentelemetry.sdk.OpenTelemetrySdk +import io.scalaland.chimney.dsl.* import org.mockito.{ArgumentMatchers, ArgumentMatchersSugar, MockitoSugar} import org.scalatest.BeforeAndAfterEach import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AsyncWordSpec import org.slf4j.event.Level +import scalapb.lenses.{Lens, Mutation} -import java.security.KeyPairGenerator +import java.security.{KeyPair, KeyPairGenerator, Signature} import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} @@ -85,12 +111,43 @@ class ApiPartyManagementServiceSpec with PekkoBeforeAndAfterAll with ErrorsAssertions with BaseTest - with BeforeAndAfterEach { + with BeforeAndAfterEach + with HasExecutorService { var testTelemetrySetup: TestTelemetrySetup = _ val partiesPageSize = PositiveInt.tryCreate(100) - val aSubmissionId = Ref.SubmissionId.assertFromString("aSubmissionId") + val aPartyAllocationTracker = + PartyAllocation.TrackerKey("aParty", DefaultTestIdentities.participant1.toLf, Added(Submission)) + val createSubmissionId = new CreateSubmissionId { + override def apply( + partyIdHint: String, + authorizationLevel: TopologyTransactionEffective.AuthorizationLevel, + ): PartyAllocation.TrackerKey = aPartyAllocationTracker + } + + lazy val ( + _mockIndexTransactionsService, + mockIdentityProviderExists, + mockIndexPartyManagementService, + mockPartyRecordStore, + ) = mockedServices() + val partyAllocationTracker = makePartyAllocationTracker(loggerFactory) + + lazy val apiService = ApiPartyManagementService.createApiService( + mock[IndexPartyManagementService], + mock[UserManagementStore], + mock[IdentityProviderExists], + partiesPageSize, + NonNegativeInt.tryCreate(0), + mock[PartyRecordStore], + TestPartySyncService(testTelemetrySetup.tracer), + oneHour, + createSubmissionId, + NoOpTelemetry, + mock[PartyAllocation.Tracker], + loggerFactory = loggerFactory, + ) override def beforeEach(): Unit = testTelemetrySetup = new TestTelemetrySetup() @@ -138,6 +195,430 @@ class ApiPartyManagementServiceSpec .copy(isLocal = false) } + def createSigningKey: (Option[crypto.SigningPublicKey], KeyPair) = { + val keyGen = KeyPairGenerator.getInstance("Ed25519") + val keyPair = keyGen.generateKeyPair() + val protoKey = Some( + lapicrypto.SigningPublicKey( + format = lapicrypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO, + keyData = ByteString.copyFrom(keyPair.getPublic.getEncoded), + keySpec = lapicrypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_CURVE25519, + ) + ) + (protoKey, keyPair) + } + + def cantonSigningPublicKey(publicKey: crypto.SigningPublicKey) = + SigningPublicKey + .fromProtoV30( + com.digitalasset.canton.crypto.v30.SigningPublicKey( + format = + publicKey.format.transformInto[com.digitalasset.canton.crypto.v30.CryptoKeyFormat], + publicKey = publicKey.keyData, + // Deprecated field + scheme = SIGNING_KEY_SCHEME_UNSPECIFIED, + usage = Seq(SigningKeyUsage.Namespace.toProtoEnum), + keySpec = + publicKey.keySpec.transformInto[com.digitalasset.canton.crypto.v30.SigningKeySpec], + ) + ) + .value + + def sign(keyPair: KeyPair, data: ByteString, signedBy: Fingerprint) = { + val signatureInstance = Signature.getInstance("Ed25519") + signatureInstance.initSign(keyPair.getPrivate) + signatureInstance.update(data.toByteArray) + lapicrypto.Signature( + format = SIGNATURE_FORMAT_RAW, + signature = ByteString.copyFrom(signatureInstance.sign()), + signedBy = signedBy.toProtoPrimitive, + signingAlgorithmSpec = lapicrypto.SigningAlgorithmSpec.SIGNING_ALGORITHM_SPEC_ED25519, + ) + } + + "validate allocateExternalParty request" when { + def testAllocateExternalPartyValidation( + requestTransform: Lens[ + AllocateExternalPartyRequest, + AllocateExternalPartyRequest, + ] => Mutation[AllocateExternalPartyRequest], + expectedFailure: PartyId => Option[String], + ) = { + val (publicKey, keyPair) = createSigningKey + val cantonPublicKey = cantonSigningPublicKey(publicKey.value) + val partyId = PartyId.tryCreate("alice", cantonPublicKey.fingerprint) + for { + generatedTransactions <- apiService.generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = DefaultTestIdentities.synchronizerId.toProtoPrimitive, + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = + Seq(DefaultTestIdentities.participant2.uid.toProtoPrimitive), + confirmationThreshold = 1, + observingParticipantUids = + Seq(DefaultTestIdentities.participant3.uid.toProtoPrimitive), + ) + ) + signature = sign(keyPair, generatedTransactions.multiHash, partyId.fingerprint) + request = AllocateExternalPartyRequest( + synchronizer = DefaultTestIdentities.synchronizerId.toProtoPrimitive, + onboardingTransactions = generatedTransactions.topologyTransactions.map(tx => + AllocateExternalPartyRequest.SignedTransaction(tx, Seq.empty) + ), + multiHashSignatures = Seq(signature), + identityProviderId = "", + ).update(requestTransform) + result <- apiService + .allocateExternalParty(request) + .transform { + case Failure(e: io.grpc.StatusRuntimeException) => + expectedFailure(partyId) match { + case Some(value) => + e.getStatus.getCode.value() shouldBe io.grpc.Status.INVALID_ARGUMENT.getCode + .value() + e.getStatus.getDescription should include(value) + Success(succeed) + case None => + fail(s"Expected success but allocation failed with $e") + } + case Failure(other) => fail(s"expected a gRPC exception but got $other") + case Success(_) if expectedFailure(partyId).isDefined => + fail("Expected a failure but got a success") + case Success(_) => Success(succeed) + } + } yield result + } + + val (bobKey, bobKeyPair) = { + val (publicKey, keyPair) = createSigningKey + (cantonSigningPublicKey(publicKey.value), keyPair) + } + val bobParty = PartyId.tryCreate("bob", bobKey.fingerprint) + + def mkDecentralizedTx(ownerSize: Int): (SignedTransaction, Namespace) = { + val ownersKeys = Seq.fill(ownerSize)(createSigningKey).map { case (publicKey, keyPair) => + (cantonSigningPublicKey(publicKey.value), keyPair) + } + val namespaceOwners = ownersKeys.map(_._1.fingerprint).toSet.map(Namespace(_)) + val decentralizedNamespace = + DecentralizedNamespaceDefinition.computeNamespace(namespaceOwners) + val decentralizedTx = TopologyTransaction( + Replace, + PositiveInt.one, + DecentralizedNamespaceDefinition.tryCreate( + decentralizedNamespace = decentralizedNamespace, + threshold = PositiveInt.one, + owners = NonEmpty.from(namespaceOwners).value, + ), + testedProtocolVersion, + ) + val signatures = ownersKeys.map { case (publicKey, keyPair) => + sign(keyPair, decentralizedTx.getCryptographicEvidence, publicKey.fingerprint) + } + ( + SignedTransaction( + decentralizedTx.toByteString, + signatures, + ), + decentralizedNamespace, + ) + } + + "fail if missing synchronizerId" in { + testAllocateExternalPartyValidation( + _.synchronizer.modify(_ => ""), + _ => Some("The submitted command is missing a mandatory field: synchronizer"), + ) + } + + "fail if missing a party to participant" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.filterNot(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToParticipant] + .nonEmpty + ) + ), + _ => Some("One transaction of type PartyToParticipant must be provided, got 0"), + ) + } + + "allow a single P2P" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.filter(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToParticipant] + .nonEmpty + ) + ), + _ => None, + ) + } + + "refuse a P2P with Submission rights" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.map(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToParticipant] + .map { p2p => + TopologyTransaction( + p2p.operation, + p2p.serial, + PartyToParticipant.tryCreate( + p2p.mapping.partyId, + p2p.mapping.threshold, + Seq(HostingParticipant(participantId, ParticipantPermission.Submission)), + ), + testedProtocolVersion, + ) + } + .map { updatedTx => + SignedTransaction(updatedTx.toByteString, tx.signatures) + } + .getOrElse(tx) + ) + ), + _ => + Some( + "The PartyToParticipant transaction must not contain any node with Submission permission. Nodes with submission permission: PAR::participant1::participant1..." + ), + ) + } + + "refuse a non multi-hosted party submitted to another node" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.map(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToParticipant] + .map { p2p => + TopologyTransaction( + p2p.operation, + p2p.serial, + PartyToParticipant.tryCreate( + p2p.mapping.partyId, + p2p.mapping.threshold, + Seq( + HostingParticipant( + DefaultTestIdentities.participant2, + ParticipantPermission.Confirmation, + ) + ), + ), + testedProtocolVersion, + ) + } + .map { updatedTx => + SignedTransaction(updatedTx.toByteString, tx.signatures) + } + .getOrElse(tx) + ) + ), + _ => + Some( + "The party is to be hosted on a single participant (PAR::participant2::participant2...) that is not this participant (PAR::participant1::participant1...). Submit the allocation request on PAR::participant2::participant2... instead." + ), + ) + } + + "refuse a multi-hosted party with no confirming node" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.map(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToParticipant] + .map { p2p => + TopologyTransaction( + p2p.operation, + p2p.serial, + PartyToParticipant.tryCreate( + p2p.mapping.partyId, + p2p.mapping.threshold, + Seq( + HostingParticipant( + participantId, + ParticipantPermission.Observation, + ), + HostingParticipant( + DefaultTestIdentities.participant2, + ParticipantPermission.Observation, + ), + ), + ), + testedProtocolVersion, + ) + } + .map { updatedTx => + SignedTransaction(updatedTx.toByteString, tx.signatures) + } + .getOrElse(tx) + ) + ), + _ => + Some( + "The PartyToParticipant transaction must contain at least one node with Confirmation permission" + ), + ) + } + + "refuse mismatching party namespace and p2p namespace" in { + val updatedTransaction = TopologyTransaction( + Replace, + PositiveInt.one, + NamespaceDelegation.tryCreate( + namespace = bobParty.namespace, + target = bobKey, + restriction = DelegationRestriction.CanSignAllMappings, + ), + testedProtocolVersion, + ) + val signature = sign( + bobKeyPair, + updatedTransaction.hash.hash.getCryptographicEvidence, + bobParty.fingerprint, + ) + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.map(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[NamespaceDelegation] + .map(_ => updatedTransaction) + .map { updatedTx => + SignedTransaction( + updatedTx.toByteString, + Seq(signature), + ) + } + .getOrElse(tx) + ) + ), + partyId => + Some( + s"The Party namespace (${bobParty.namespace}) does not match the PartyToParticipant namespace (${partyId.namespace})" + ), + ) + } + + "refuse mismatching p2k namespace and p2p namespace" in { + def updatedTransaction(signingKeys: NonEmpty[Seq[SigningPublicKey]]) = TopologyTransaction( + Replace, + PositiveInt.one, + PartyToKeyMapping.tryCreate( + partyId = bobParty, + threshold = PositiveInt.one, + signingKeys = signingKeys, + ), + testedProtocolVersion, + ) + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.map(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[PartyToKeyMapping] + .map(p2k => updatedTransaction(p2k.mapping.signingKeys)) + .map { updatedTx => + SignedTransaction(updatedTx.toByteString, tx.signatures) + } + .getOrElse(tx) + ) + ), + partyId => + Some( + s"The PartyToKeyMapping namespace (${bobParty.namespace}) does not match the PartyToParticipant namespace (${partyId.namespace})" + ), + ) + } + + "refuse mismatching decentralized namespace and p2p namespace" in { + val (decentralizedNamespaceTx, namespace) = mkDecentralizedTx(1) + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + // Remove the Namespace delegation generated by default + _.filterNot(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[NamespaceDelegation] + .isDefined + ) + // replace it with a decentralized namespace + .appended(decentralizedNamespaceTx) + ), + partyId => + Some( + s"The Party namespace ($namespace) does not match the PartyToParticipant namespace (${partyId.namespace})" + ), + ) + } + + "refuse decentralized namespace with too many owners" in { + val max = ExternalPartyOnboardingDetails.maxDecentralizedOwnersSize + val (decentralizedNamespaceTx, namespace) = mkDecentralizedTx(max.increment.value) + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + // Remove the Namespace delegation generated by default + _.filterNot(tx => + TopologyTransaction + .fromByteString(testedProtocolVersion, tx.transaction) + .value + .selectMapping[NamespaceDelegation] + .isDefined + ) + // replace it with a decentralized namespace with too many owners + .appended(decentralizedNamespaceTx) + ), + partyId => + Some( + s"The Party namespace ($namespace) does not match the PartyToParticipant namespace (${partyId.namespace})" + ), + ) + } + + "refuse unwanted transactions" in { + testAllocateExternalPartyValidation( + _.onboardingTransactions.modify( + _.appended( + SignedTransaction( + TopologyTransaction( + TopologyChangeOp.Replace, + PositiveInt.one, + PartyHostingLimits.apply( + DefaultTestIdentities.synchronizerId, + DefaultTestIdentities.party1, + ), + testedProtocolVersion, + ).toByteString, + Seq.empty, + ) + ) + ), + _ => + Some( + s"Unsupported transactions found: PartyHostingLimits. Supported transactions are: NamespaceDelegation, DecentralizedNamespaceDefinition, PartyToParticipant, PartyToKeyMapping" + ), + ) + } + } + "propagate trace context" in { val ( mockIdentityProviderExists, @@ -155,7 +636,7 @@ class ApiPartyManagementServiceSpec mockPartyRecordStore, TestPartySyncService(testTelemetrySetup.tracer), oneHour, - ApiPartyManagementService.CreateSubmissionId.fixedForTests(aSubmissionId), + createSubmissionId, new DefaultOpenTelemetry(OpenTelemetrySdk.builder().build()), partyAllocationTracker, loggerFactory = loggerFactory, @@ -179,7 +660,7 @@ class ApiPartyManagementServiceSpec // Allow the tracker to complete partyAllocationTracker.onStreamItem( PartyAllocation.Completed( - PartyAllocation.TrackerKey.forTests(aSubmissionId), + aPartyAllocationTracker, IndexerPartyDetails(aParty, isLocal = true), ) ) @@ -208,7 +689,7 @@ class ApiPartyManagementServiceSpec mockPartyRecordStore, TestPartySyncService(testTelemetrySetup.tracer), oneHour, - ApiPartyManagementService.CreateSubmissionId.fixedForTests(aSubmissionId.toString), + createSubmissionId, NoOpTelemetry, partyAllocationTracker, loggerFactory = loggerFactory, @@ -255,32 +736,6 @@ class ApiPartyManagementServiceSpec } "generate-external-topology" when { - def createService = { - val keyGen = KeyPairGenerator.getInstance("Ed25519") - val keyPair = keyGen.generateKeyPair() - val apiPartyManagementService = ApiPartyManagementService.createApiService( - mock[IndexPartyManagementService], - mock[UserManagementStore], - mock[IdentityProviderExists], - partiesPageSize, - NonNegativeInt.tryCreate(0), - mock[PartyRecordStore], - TestPartySyncService(testTelemetrySetup.tracer), - oneHour, - ApiPartyManagementService.CreateSubmissionId.fixedForTests(aSubmissionId.toString), - NoOpTelemetry, - mock[PartyAllocation.Tracker], - loggerFactory = loggerFactory, - ) - val signingKey = Some( - lapicrypto.SigningPublicKey( - format = lapicrypto.CryptoKeyFormat.CRYPTO_KEY_FORMAT_DER_X509_SUBJECT_PUBLIC_KEY_INFO, - keyData = ByteString.copyFrom(keyPair.getPublic.getEncoded), - keySpec = lapicrypto.SigningKeySpec.SIGNING_KEY_SPEC_EC_CURVE25519, - ) - ) - (apiPartyManagementService, signingKey) - } def getMappingsFromResponse(response: GenerateExternalPartyTopologyResponse) = { response.topologyTransactions should have length (3) val txs = response.topologyTransactions.toList @@ -297,11 +752,11 @@ class ApiPartyManagementServiceSpec } } "correctly pass through all fields" in { - val (service, publicKey) = createService + val (publicKey, _) = createSigningKey val syncId = DefaultTestIdentities.synchronizerId for { - response <- service.generateExternalPartyTopology( + response <- apiService.generateExternalPartyTopology( GenerateExternalPartyTopologyRequest( synchronizer = syncId.toProtoPrimitive, partyHint = "alice", @@ -339,11 +794,11 @@ class ApiPartyManagementServiceSpec } } "correctly interpret local observer" in { - val (service, publicKey) = createService + val (publicKey, _) = createSigningKey val syncId = DefaultTestIdentities.synchronizerId for { - response <- service.generateExternalPartyTopology( + response <- apiService.generateExternalPartyTopology( GenerateExternalPartyTopologyRequest( synchronizer = syncId.toProtoPrimitive, partyHint = "alice", @@ -371,11 +826,11 @@ class ApiPartyManagementServiceSpec } } "correctly reject invalid threshold" in { - val (service, publicKey) = createService + val (publicKey, _) = createSigningKey val syncId = DefaultTestIdentities.synchronizerId for { - response <- service + response <- apiService .generateExternalPartyTopology( GenerateExternalPartyTopologyRequest( synchronizer = syncId.toProtoPrimitive, @@ -396,9 +851,9 @@ class ApiPartyManagementServiceSpec } } "fail gracefully on invalid synchronizer-ids" in { - val (service, publicKey) = createService + val (publicKey, _) = createSigningKey for { - response1 <- service + response1 <- apiService .generateExternalPartyTopology( GenerateExternalPartyTopologyRequest( synchronizer = "", @@ -411,7 +866,7 @@ class ApiPartyManagementServiceSpec ) ) .failed - response2 <- service + response2 <- apiService .generateExternalPartyTopology( GenerateExternalPartyTopologyRequest( synchronizer = SynchronizerId.tryFromString("not::valid").toProtoPrimitive, @@ -430,10 +885,10 @@ class ApiPartyManagementServiceSpec } } "fail gracefully on invalid party hints" in { - val (service, publicKey) = createService + val (publicKey, _) = createSigningKey val syncId = DefaultTestIdentities.synchronizerId for { - response1 <- service + response1 <- apiService .generateExternalPartyTopology( GenerateExternalPartyTopologyRequest( synchronizer = syncId.toProtoPrimitive, @@ -446,7 +901,7 @@ class ApiPartyManagementServiceSpec ) ) .failed - response2 <- service + response2 <- apiService .generateExternalPartyTopology( GenerateExternalPartyTopologyRequest( synchronizer = syncId.toProtoPrimitive, @@ -466,10 +921,9 @@ class ApiPartyManagementServiceSpec } } "fail gracefully on empty keys" in { - val (service, _) = createService val syncId = DefaultTestIdentities.synchronizerId for { - response1 <- service + response1 <- apiService .generateExternalPartyTopology( GenerateExternalPartyTopologyRequest( synchronizer = syncId.toProtoPrimitive, @@ -487,10 +941,10 @@ class ApiPartyManagementServiceSpec } } "fail gracefully on invalid duplicate participant ids" in { - val (service, publicKey) = createService + val (publicKey, _) = createSigningKey val syncId = DefaultTestIdentities.synchronizerId for { - response1 <- service + response1 <- apiService .generateExternalPartyTopology( GenerateExternalPartyTopologyRequest( synchronizer = syncId.toProtoPrimitive, @@ -504,7 +958,7 @@ class ApiPartyManagementServiceSpec ) ) .failed - response2 <- service + response2 <- apiService .generateExternalPartyTopology( GenerateExternalPartyTopologyRequest( synchronizer = syncId.toProtoPrimitive, @@ -518,9 +972,38 @@ class ApiPartyManagementServiceSpec ) ) .failed + response3 <- apiService + .generateExternalPartyTopology( + GenerateExternalPartyTopologyRequest( + synchronizer = syncId.toProtoPrimitive, + partyHint = "alice", + publicKey = publicKey, + localParticipantObservationOnly = false, + otherConfirmingParticipantUids = + Seq(DefaultTestIdentities.participant2.uid.toProtoPrimitive), + confirmationThreshold = 1, + observingParticipantUids = + Seq(DefaultTestIdentities.participant2.uid.toProtoPrimitive), + ) + ) + .failed } yield { - response1.getMessage should include("Duplicate participant ids") - response2.getMessage should include("Duplicate participant ids") + response1.getMessage should include( + s"This participant node ($participantId) is also listed in 'otherConfirmingParticipantUids'." + + s" By sending the request to this node, it is de facto a hosting node" + + s" and must not be listed in 'otherConfirmingParticipantUids'." + ) + response2.getMessage should include( + "This participant node (PAR::participant1::participant1...) is also listed in 'observingParticipantUids'." + + " By sending the request to this node, it is de facto a hosting node" + + " and must not be listed in 'observingParticipantUids'." + ) + response3.getMessage should include( + "The following participant IDs are referenced multiple times in the request:" + + " participant2::participant2.... " + + "Please ensure all IDs are referenced only once across" + + " 'otherConfirmingParticipantUids' and 'observingParticipantUids' fields." + ) } } @@ -533,7 +1016,7 @@ class ApiPartyManagementServiceSpec ): PartyAllocation.Tracker = StreamTracker.withTimer[PartyAllocation.TrackerKey, PartyAllocation.Completed]( timer = new java.util.Timer("test-timer"), - itemKey = (_ => Some(PartyAllocation.TrackerKey.forTests(aSubmissionId))), + itemKey = (_ => Some(aPartyAllocationTracker)), inFlightCounter = InFlight.Limited(100, mock[com.daml.metrics.api.MetricHandle.Counter]), loggerFactory, ) diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/GeneratorsInteractiveSubmission.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/GeneratorsInteractiveSubmission.scala index b568c6c06132..55e001de628d 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/GeneratorsInteractiveSubmission.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/interactive/GeneratorsInteractiveSubmission.scala @@ -12,7 +12,7 @@ import com.digitalasset.canton.platform.apiserver.services.command.interactive.c import com.digitalasset.canton.platform.apiserver.services.command.interactive.codec.PrepareTransactionData import com.digitalasset.canton.protocol.LfFatContractInst import com.digitalasset.canton.topology.{GeneratorsTopology, SynchronizerId} -import com.digitalasset.canton.{GeneratorsLf, LedgerUserId, LfPackageId, LfPartyId} +import com.digitalasset.canton.{GeneratorsLf, LedgerUserId, LfPackageId, LfPartyId, LfTimestamp} import com.digitalasset.daml.lf.crypto import com.digitalasset.daml.lf.crypto.Hash import com.digitalasset.daml.lf.data.{Bytes, ImmArray, Time} @@ -211,6 +211,7 @@ final class GeneratorsInteractiveSubmission( enrichedInputContracts <- Gen.sequence(coids.map(inputContractsGen)) mediatorGroup <- Arbitrary.arbitrary[PositiveInt] transactionUUID <- Gen.uuid + maxRecordTime <- Arbitrary.arbitrary[Option[LfTimestamp]] } yield PrepareTransactionData( submitterInfo, transactionMeta, @@ -225,6 +226,7 @@ final class GeneratorsInteractiveSubmission( synchronizerId, mediatorGroup.value, transactionUUID, + maxRecordTime, ) implicit val preparedTransactionDataArb: Arbitrary[PrepareTransactionData] = Arbitrary( diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/multisynchronizer/MultiSynchronizerIndexComponentTest.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/multisynchronizer/MultiSynchronizerIndexComponentTest.scala index a38961ea715f..57a34e59a92b 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/multisynchronizer/MultiSynchronizerIndexComponentTest.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/multisynchronizer/MultiSynchronizerIndexComponentTest.scala @@ -51,7 +51,7 @@ class MultiSynchronizerIndexComponentTest extends AnyFlatSpec with IndexComponen ) (for { // contracts should be stored in canton contract store before ingesting the updates to get the internal contract ids mapping - _ <- cantonContractStore + _ <- participantContractStore .storeContracts(Seq(c1, c2)) .failOnShutdown("failed to store contracts") (reassignmentAccepted1, cn1) <- @@ -60,7 +60,7 @@ class MultiSynchronizerIndexComponentTest extends AnyFlatSpec with IndexComponen "UpdateId1", createNode = c1.inst.toCreateNode, withAcsChange = false, - participantContractStore = cantonContractStore, + participantContractStore = participantContractStore, ) (reassignmentAccepted2, cn2) <- mkReassignmentAccepted( @@ -68,7 +68,7 @@ class MultiSynchronizerIndexComponentTest extends AnyFlatSpec with IndexComponen "UpdateId2", createNode = c2.inst.toCreateNode, withAcsChange = true, - participantContractStore = cantonContractStore, + participantContractStore = participantContractStore, ) _ = ingestUpdates(reassignmentAccepted1, reassignmentAccepted2) activeContractO1 <- index.lookupActiveContract(Set(party), cn1.coid) diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendProvider.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendProvider.scala index c0c913c87331..71b634a8a86a 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendProvider.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendProvider.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform.store.backend import com.digitalasset.canton.BaseTest import com.digitalasset.canton.data.{CantonTimestamp, Offset} import com.digitalasset.canton.logging.NamedLoggerFactory +import com.digitalasset.canton.platform.store.PruningOffsetService import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd import com.digitalasset.canton.platform.store.backend.h2.H2StorageBackendFactory import com.digitalasset.canton.platform.store.backend.localstore.{ @@ -17,6 +18,7 @@ import com.digitalasset.canton.platform.store.backend.postgresql.PostgresStorage import com.digitalasset.canton.platform.store.cache.MutableLedgerEndCache import com.digitalasset.canton.platform.store.interning.MockStringInterning import com.digitalasset.canton.platform.store.testing.postgresql.PostgresAroundAll +import org.mockito.MockitoSugar.mock import org.scalatest.Suite import java.sql.Connection @@ -87,6 +89,7 @@ trait StorageBackendProviderH2 extends StorageBackendProvider with BaseTest { th final case class TestBackend( ingestion: IngestionStorageBackend[_], parameter: ParameterStorageBackend, + pruningOffsetService: PruningOffsetService, party: PartyStorageBackend, completion: CompletionStorageBackend, contract: ContractStorageBackend, @@ -115,6 +118,7 @@ object TestBackend { TestBackend( ingestion = storageBackendFactory.createIngestionStorageBackend, parameter = storageBackendFactory.createParameterStorageBackend(stringInterning), + pruningOffsetService = mock[PruningOffsetService], party = storageBackendFactory.createPartyStorageBackend(ledgerEndCache), completion = storageBackendFactory.createCompletionStorageBackend(stringInterning, loggerFactory), diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsQueryValidRange.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsQueryValidRange.scala index 2f65bae6ab54..4bf2234bbcc7 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsQueryValidRange.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendTestsQueryValidRange.scala @@ -17,143 +17,166 @@ import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import org.slf4j.event.Level +import scala.concurrent.{ExecutionContext, Future} + private[backend] trait StorageBackendTestsQueryValidRange extends Matchers with StorageBackendSpec { this: AnyFlatSpec => implicit val loggingContextWithTrace: LoggingContextWithTrace = new LoggingContextWithTrace(LoggingEntries.empty, TraceContext.empty) + implicit val ec: ExecutionContext = directExecutionContext + behavior of "QueryValidRange.withRangeNotPruned" it should "allow valid range if no pruning and before ledger end" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( - minOffsetInclusive = offset(3), - maxOffsetInclusive = offset(8), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( + minOffsetInclusive = offset(3), + maxOffsetInclusive = offset(8), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow valid range if no pruning and before ledger end and start from ledger begin" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( - minOffsetInclusive = Offset.firstOffset, - maxOffsetInclusive = offset(8), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( + minOffsetInclusive = Offset.firstOffset, + maxOffsetInclusive = offset(8), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow valid range after pruning and before ledger end" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( - minOffsetInclusive = offset(6), - maxOffsetInclusive = offset(8), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( + minOffsetInclusive = offset(6), + maxOffsetInclusive = offset(8), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow valid range boundary case" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( - minOffsetInclusive = offset(4), - maxOffsetInclusive = offset(10), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( + minOffsetInclusive = offset(4), + maxOffsetInclusive = offset(10), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "deny in-valid range: earlier than pruning" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - loggerFactory.assertThrowsAndLogsSuppressing[StatusRuntimeException]( - SuppressionRule.Level(Level.INFO) - )( - within = executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( minOffsetInclusive = offset(3), maxOffsetInclusive = offset(10), errorPruning = pruningOffset => s"pruning issue: ${pruningOffset.unwrap}", errorLedgerEnd = _ => "", - )(()) - ), - assertions = _.infoMessage should include( - "PARTICIPANT_PRUNED_DATA_ACCESSED(9,0): pruning issue: 3" - ), - ) + )(Future.unit), + assertions = _.infoMessage should include( + "PARTICIPANT_PRUNED_DATA_ACCESSED(9,0): pruning issue: 3" + ), + ) + .futureValue } it should "deny in-valid range: later than ledger end when ledger is not empty" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - loggerFactory.assertThrowsAndLogsSuppressing[StatusRuntimeException]( - SuppressionRule.Level(Level.INFO) - )( - within = executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( minOffsetInclusive = offset(4), maxOffsetInclusive = offset(11), errorPruning = _ => "", errorLedgerEnd = ledgerEndOffset => s"ledger-end issue: ${ledgerEndOffset.fold(0L)(_.unwrap)}", - )(()) - ), - assertions = _.infoMessage should include( - "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 10" - ), - ) + )(Future.unit), + assertions = _.infoMessage should include( + "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 10" + ), + ) } it should "deny in-valid range: later than ledger end when ledger end is none" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) - loggerFactory.assertThrowsAndLogsSuppressing[StatusRuntimeException]( - SuppressionRule.Level(Level.INFO) - )( - within = executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withRangeNotPruned( minOffsetInclusive = offset(1), maxOffsetInclusive = offset(1), errorPruning = _ => "", errorLedgerEnd = ledgerEndOffset => s"ledger-end issue: ${ledgerEndOffset.fold(0L)(_.unwrap)}", - )(()) - ), - assertions = _.infoMessage should include( - "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 0" - ), - ) - } - - it should "execute query before reading parameters from the db" in { - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withRangeNotPruned( - minOffsetInclusive = offset(3), - maxOffsetInclusive = offset(8), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - ) { - backend.parameter.initializeParameters(someIdentityParams, loggerFactory)(connection) - updateLedgerEnd(offset(10), 10L)(connection) - } - ) + )(Future.unit), + assertions = _.infoMessage should include( + "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 0" + ), + ) + .futureValue } behavior of "QueryValidRange.withOffsetNotBeforePruning" @@ -161,92 +184,196 @@ private[backend] trait StorageBackendTestsQueryValidRange extends Matchers with it should "allow offset in the valid range" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( - offset = offset(5), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( + offset = offset(5), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow offset in the valid range if no pruning before" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( - offset = offset(5), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( + offset = offset(5), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow offset in the valid range lower boundary" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( - offset = offset(3), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( + offset = offset(3), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "allow offset in the valid range higher boundary" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( - offset = offset(10), - errorPruning = _ => "", - errorLedgerEnd = _ => "", - )(()) - ) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( + offset = offset(10), + errorPruning = _ => "", + errorLedgerEnd = _ => "", + )(Future.unit) + .futureValue } it should "deny in-valid range: earlier than pruning" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - loggerFactory.assertThrowsAndLogsSuppressing[StatusRuntimeException]( - SuppressionRule.Level(Level.INFO) - )( - within = executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( offset = offset(2), errorPruning = pruningOffset => s"pruning issue: ${pruningOffset.unwrap}", errorLedgerEnd = _ => "", - )(()) - ), - assertions = _.infoMessage should include( - "PARTICIPANT_PRUNED_DATA_ACCESSED(9,0): pruning issue: 3" - ), - ) + )(Future.unit), + assertions = _.infoMessage should include( + "PARTICIPANT_PRUNED_DATA_ACCESSED(9,0): pruning issue: 3" + ), + ) + .futureValue } it should "deny in-valid range: later than ledger end" in { executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) executeSql(updateLedgerEnd(offset(10), 10L)) - executeSql(backend.parameter.updatePrunedUptoInclusive(offset(3))) - loggerFactory.assertThrowsAndLogsSuppressing[StatusRuntimeException]( - SuppressionRule.Level(Level.INFO) - )( - within = executeSql(implicit connection => - QueryValidRangeImpl(backend.parameter, this.loggerFactory).withOffsetNotBeforePruning( + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).withOffsetNotBeforePruning( offset = offset(11), errorPruning = _ => "", errorLedgerEnd = ledgerEndOffset => s"ledger-end issue: ${ledgerEndOffset.fold(0L)(_.unwrap)}", - )(()) - ), - assertions = _.infoMessage should include( - "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 10" - ), - ) + )(Future.unit), + assertions = _.infoMessage should include( + "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): ledger-end issue: 10" + ), + ) + .futureValue + } + + behavior of "QueryValidRange.filterPrunedEvents" + + it should "return all events if no pruning" in { + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(updateLedgerEnd(offset(10), 10L)) + val events = (1L to 5L).map(offset) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).filterPrunedEvents[Offset](identity)(events).futureValue shouldBe events + } + + it should "filter out events at or below the pruning offset" in { + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(updateLedgerEnd(offset(10), 10L)) + val events = (1L to 5L).map(offset) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).filterPrunedEvents[Offset](identity)(events).futureValue shouldBe (4L to 5L).map(offset) + } + + it should "return empty if all events are pruned" in { + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(updateLedgerEnd(offset(10), 10L)) + val events = (1L to 3L).map(offset) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).filterPrunedEvents[Offset](identity)(events).futureValue shouldBe empty + } + + it should "return all events if pruning offset is before all events" in { + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(updateLedgerEnd(offset(10), 10L)) + val events = (5L to 7L).map(offset) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(Some(offset(3)))) + QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).filterPrunedEvents[Offset](identity)(events).futureValue shouldBe events + } + + it should "fail if any event offset is beyond ledger end" in { + executeSql(backend.parameter.initializeParameters(someIdentityParams, loggerFactory)) + executeSql(updateLedgerEnd(offset(2), 2L)) + val events = (1L to 5L).map(offset) + when(backend.pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) + loggerFactory + .assertThrowsAndLogsSuppressingAsync[StatusRuntimeException]( + SuppressionRule.Level(Level.INFO) + )( + within = QueryValidRangeImpl( + ledgerEndCache = backend.ledgerEndCache, + pruningOffsetService = backend.pruningOffsetService, + loggerFactory = this.loggerFactory, + ).filterPrunedEvents[Offset](identity)(events), + assertions = _.infoMessage should include( + "PARTICIPANT_DATA_ACCESSED_AFTER_LEDGER_END(9,0): Offset of event to be filtered Offset(3) is beyond ledger end" + ), + ) + .futureValue } } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacySpec.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacySpec.scala index 4e117ed271e4..5f8472591eac 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacySpec.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoLegacySpec.scala @@ -2063,7 +2063,14 @@ object UpdateToDbDtoLegacySpec { private val compressionAlgorithmId = Some(123) private val compressionStrategy: CompressionStrategy = { val noCompression = new FieldCompressionStrategy(compressionAlgorithmId, x => x) - CompressionStrategy(noCompression, noCompression, noCompression, noCompression) + CompressionStrategy( + noCompression, + noCompression, + noCompression, + noCompression, + noCompression, + noCompression, + ) } private val someParticipantId = diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala index 15aa31f5ae68..12f6d7c73657 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala @@ -619,12 +619,13 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { additional_witnesses = Some(Set.empty), exercise_choice = exerciseNode.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdConsumingArg, emptyArray)), + exercise_result = Some(compressArrayWith(compressionAlgorithmIdConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdConsumingArg, + exercise_result_compression = compressionAlgorithmIdConsumingRes, reassignment_id = None, assignment_exclusivity = None, target_synchronizer_id = None, @@ -749,12 +750,13 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { consuming = Some(true), exercise_choice = exerciseNode.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdConsumingArg, emptyArray)), + exercise_result = Some(compressArrayWith(compressionAlgorithmIdConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdConsumingArg, + exercise_result_compression = compressionAlgorithmIdConsumingRes, representative_package_id = None, contract_id = Some(exerciseNode.targetCoid), internal_contract_id = None, @@ -875,12 +877,14 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { consuming = Some(false), exercise_choice = exerciseNode.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, representative_package_id = None, contract_id = Some(exerciseNode.targetCoid), internal_contract_id = None, @@ -1009,12 +1013,14 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { consuming = Some(false), exercise_choice = exerciseNodeA.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(createNodeCId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, representative_package_id = None, contract_id = Some(exerciseNodeA.targetCoid), internal_contract_id = None, @@ -1047,12 +1053,14 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { consuming = Some(false), exercise_choice = exerciseNodeB.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeBId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, representative_package_id = None, contract_id = Some(exerciseNodeB.targetCoid), internal_contract_id = None, @@ -1228,12 +1236,14 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { consuming = Some(false), exercise_choice = exerciseNodeA.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(createNodeCId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, representative_package_id = None, contract_id = Some(exerciseNodeA.targetCoid), internal_contract_id = None, @@ -1266,12 +1276,14 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { consuming = Some(false), exercise_choice = exerciseNodeB.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeBId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, representative_package_id = None, contract_id = Some(exerciseNodeB.targetCoid), internal_contract_id = None, @@ -1463,12 +1475,14 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { consuming = Some(false), exercise_choice = exerciseNodeA.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeDId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, representative_package_id = None, contract_id = Some(exerciseNodeA.targetCoid), internal_contract_id = None, @@ -1501,12 +1515,14 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { consuming = Some(false), exercise_choice = exerciseNodeB.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeBId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, representative_package_id = None, contract_id = Some(exerciseNodeB.targetCoid), internal_contract_id = None, @@ -1539,12 +1555,14 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { consuming = Some(false), exercise_choice = exerciseNodeC.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeDId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, representative_package_id = None, contract_id = Some(exerciseNodeC.targetCoid), internal_contract_id = None, @@ -1577,12 +1595,14 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { consuming = Some(false), exercise_choice = exerciseNodeD.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingArg, emptyArray)), + exercise_result = + Some(compressArrayWith(compressionAlgorithmIdNonConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeDId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdNonConsumingArg, + exercise_result_compression = compressionAlgorithmIdNonConsumingRes, representative_package_id = None, contract_id = Some(exerciseNodeD.targetCoid), internal_contract_id = None, @@ -1771,12 +1791,13 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { additional_witnesses = Some(Set("divulgee")), exercise_choice = exerciseNode.choiceId, exercise_choice_interface_id = None, - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = + Some(compressArrayWith(compressionAlgorithmIdConsumingArg, emptyArray)), + exercise_result = Some(compressArrayWith(compressionAlgorithmIdConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdConsumingArg, + exercise_result_compression = compressionAlgorithmIdConsumingRes, reassignment_id = None, assignment_exclusivity = None, target_synchronizer_id = None, @@ -1946,12 +1967,12 @@ class UpdateToDbDtoSpec extends AnyWordSpec with Matchers { additional_witnesses = Some(Set("divulgee")), exercise_choice = Some(exerciseNode.choiceId), exercise_choice_interface_id = Some(interfaceId.toString), - exercise_argument = Some(emptyArray), - exercise_result = Some(emptyArray), + exercise_argument = Some(compressArrayWith(compressionAlgorithmIdConsumingArg, emptyArray)), + exercise_result = Some(compressArrayWith(compressionAlgorithmIdConsumingRes, emptyArray)), exercise_actors = Some(Set("signatory")), exercise_last_descendant_node_id = Some(exerciseNodeId.index), - exercise_argument_compression = compressionAlgorithmId, - exercise_result_compression = compressionAlgorithmId, + exercise_argument_compression = compressionAlgorithmIdConsumingArg, + exercise_result_compression = compressionAlgorithmIdConsumingRes, reassignment_id = None, assignment_exclusivity = None, target_synchronizer_id = None, @@ -2791,11 +2812,34 @@ object UpdateToDbDtoSpec { // These test do not check the correctness of compression. // All values are compressed using a dummy (identity) algorithm in this suite. - private val compressionAlgorithmId = Some(123) - private val compressionStrategy: CompressionStrategy = { - val noCompression = new FieldCompressionStrategy(compressionAlgorithmId, x => x) - CompressionStrategy(noCompression, noCompression, noCompression, noCompression) - } + private val compressionAlgorithmIdInvalid = Some(12) + private val compressionAlgorithmIdConsumingArg = Some(13) + private val compressionAlgorithmIdConsumingRes = Some(14) + private val compressionAlgorithmIdNonConsumingArg = Some(15) + private val compressionAlgorithmIdNonConsumingRes = Some(16) + private val compressionStrategy: CompressionStrategy = CompressionStrategy( + new FieldCompressionStrategy(compressionAlgorithmIdInvalid, x => x), + new FieldCompressionStrategy(compressionAlgorithmIdInvalid, x => x), + new FieldCompressionStrategy( + compressionAlgorithmIdConsumingArg, + compressArrayWith(compressionAlgorithmIdConsumingArg, _), + ), + new FieldCompressionStrategy( + compressionAlgorithmIdConsumingRes, + compressArrayWith(compressionAlgorithmIdConsumingRes, _), + ), + new FieldCompressionStrategy( + compressionAlgorithmIdNonConsumingArg, + compressArrayWith(compressionAlgorithmIdNonConsumingArg, _), + ), + new FieldCompressionStrategy( + compressionAlgorithmIdNonConsumingRes, + compressArrayWith(compressionAlgorithmIdNonConsumingRes, _), + ), + ) + + private def compressArrayWith(id: Option[Int], x: Array[Byte]) = + x ++ Array(id.getOrElse(-1).toByte) private val someParticipantId = Ref.ParticipantId.assertFromString("UpdateToDbDtoSpecParticipant") diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreRaceTests.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreRaceTests.scala index af03a9de1e01..0e64526deb2b 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreRaceTests.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/MutableCacheBackedContractStoreRaceTests.scala @@ -59,7 +59,7 @@ class MutableCacheBackedContractStoreRaceTests it should "preserve causal monotonicity under contention for key state" in { val workload = generateWorkload(keysCount = 10L, contractsCount = 1000L) val indexViewContractsReader = IndexViewContractsReader()(unboundedExecutionContext) - val cantonContractStore = new InMemoryContractStore( + val participantContractStore = new InMemoryContractStore( timeouts = timeouts, loggerFactory = loggerFactory, )(unboundedExecutionContext) @@ -68,13 +68,13 @@ class MutableCacheBackedContractStoreRaceTests indexViewContractsReader, unboundedExecutionContext, loggerFactory, - cantonContractStore, + participantContractStore, ) for { _ <- test( indexViewContractsReader, - cantonContractStore, + participantContractStore, workload, unboundedExecutionContext, ) { ec => event => @@ -86,7 +86,7 @@ class MutableCacheBackedContractStoreRaceTests it should "preserve causal monotonicity under contention for contract state" in { val workload = generateWorkload(keysCount = 10L, contractsCount = 1000L) val indexViewContractsReader = IndexViewContractsReader()(unboundedExecutionContext) - val cantonContractStore = new InMemoryContractStore( + val participantContractStore = new InMemoryContractStore( timeouts = timeouts, loggerFactory = loggerFactory, )(unboundedExecutionContext) @@ -95,13 +95,13 @@ class MutableCacheBackedContractStoreRaceTests indexViewContractsReader, unboundedExecutionContext, loggerFactory, - cantonContractStore, + participantContractStore, ) for { _ <- test( indexViewContractsReader, - cantonContractStore, + participantContractStore, workload, unboundedExecutionContext, ) { ec => event => @@ -117,7 +117,7 @@ private object MutableCacheBackedContractStoreRaceTests { private def test( indexViewContractsReader: IndexViewContractsReader, - cantonContractStore: ContractStore, + participantContractStore: ContractStore, workload: Seq[Long => SimplifiedContractStateEvent], unboundedExecutionContext: ExecutionContext, )( @@ -135,7 +135,7 @@ private object MutableCacheBackedContractStoreRaceTests { } .map { event => indexViewContractsReader.update(event) - update(cantonContractStore, event)(unboundedExecutionContext).futureValue + update(participantContractStore, event)(unboundedExecutionContext).futureValue event } .mapAsync(1)( @@ -358,7 +358,7 @@ private object MutableCacheBackedContractStoreRaceTests { indexViewContractsReader: IndexViewContractsReader, ec: ExecutionContext, loggerFactory: NamedLoggerFactory, - cantonContractStore: ContractStore, + participantContractStore: ContractStore, ) = { val metrics = LedgerApiServerMetrics.ForTesting new MutableCacheBackedContractStore( @@ -370,7 +370,7 @@ private object MutableCacheBackedContractStoreRaceTests { metrics = metrics, loggerFactory = loggerFactory, )(ec), - contractStore = cantonContractStore, + contractStore = participantContractStore, loggerFactory = loggerFactory, )(ec) } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala index b2bc66fb16dd..9610b427f411 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala @@ -13,6 +13,8 @@ import com.digitalasset.canton.ledger.api.ParticipantId import com.digitalasset.canton.logging.LoggingContextWithTrace.withNewLoggingContext import com.digitalasset.canton.logging.SuppressingLogger import com.digitalasset.canton.metrics.{LedgerApiServerHistograms, LedgerApiServerMetrics} +import com.digitalasset.canton.participant.store.ContractStore +import com.digitalasset.canton.participant.store.memory.InMemoryContractStore import com.digitalasset.canton.platform.config.{ ActiveContractsServiceStreamsConfig, ServerRole, @@ -28,7 +30,12 @@ import com.digitalasset.canton.platform.store.dao.events.{ LfValueTranslation, } import com.digitalasset.canton.platform.store.interning.StringInterningView -import com.digitalasset.canton.platform.store.{DbSupport, DbType, FlywayMigrations} +import com.digitalasset.canton.platform.store.{ + DbSupport, + DbType, + FlywayMigrations, + PruningOffsetService, +} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.archive.DamlLf.Archive import com.digitalasset.daml.lf.data.Ref @@ -158,6 +165,8 @@ private[dao] trait JdbcLedgerDaoBackend extends PekkoBeforeAndAfterAll with Base loadPackage = (packageId, _) => loadPackage(packageId), loggerFactory = loggerFactory, ), + pruningOffsetService = pruningOffsetService, + contractStore = contractStore, ) } } @@ -166,7 +175,11 @@ private[dao] trait JdbcLedgerDaoBackend extends PekkoBeforeAndAfterAll with Base protected final var ledgerDao: LedgerDao = _ protected var ledgerEndCache: MutableLedgerEndCache = _ + protected var contractStore: ContractStore = _ protected var stringInterningView: StringInterningView = _ + protected val pruningOffsetService: PruningOffsetService = mock[PruningOffsetService] + when(pruningOffsetService.pruningOffset(any[TraceContext])) + .thenReturn(Future.successful(None)) // `dbDispatcher` and `ledgerDao` depend on the `postgresFixture` which is in turn initialized `beforeAll` private var resource: Resource[LedgerDao] = _ @@ -176,6 +189,7 @@ private[dao] trait JdbcLedgerDaoBackend extends PekkoBeforeAndAfterAll with Base // We use the dispatcher here because the default Scalatest execution context is too slow. implicit val resourceContext: ResourceContext = ResourceContext(system.dispatcher) ledgerEndCache = MutableLedgerEndCache() + contractStore = new InMemoryContractStore(timeouts, loggerFactory) stringInterningView = new StringInterningView(loggerFactory) resource = withNewLoggingContext() { implicit loggingContext => for { diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackagesTest.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackagesTest.scala new file mode 100644 index 000000000000..804fad57a7c0 --- /dev/null +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/events/InputContractPackagesTest.scala @@ -0,0 +1,100 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.store.dao.events + +import com.digitalasset.canton.protocol.ExampleTransactionFactory.{ + exerciseNode, + fetchNode, + lookupByKeyNode, + templateId, +} +import com.digitalasset.canton.protocol.{ExampleContractFactory, LfGlobalKey, LfTemplateId} +import com.digitalasset.canton.util.LfTransactionBuilder.defaultPackageName +import com.digitalasset.canton.{BaseTest, LfPackageId} +import com.digitalasset.daml.lf.transaction.test.TestIdFactory +import com.digitalasset.daml.lf.transaction.test.TreeTransactionBuilder.{ + NodeOps, + toVersionedTransaction, +} +import com.digitalasset.daml.lf.value.Value +import org.scalatest.wordspec.AnyWordSpec + +class InputContractPackagesTest extends AnyWordSpec with BaseTest with TestIdFactory { + + import InputContractPackages.* + + val (cid1, cid2, cid3) = (newCid, newCid, newCid) + val (p1, p2, p3) = (newPackageId, newPackageId, newPackageId) + def t(pId: LfPackageId): LfTemplateId = LfTemplateId(pId, templateId.qualifiedName) + + "InputContractPackages.forTransaction" should { + + "extract package ids associated with nodes" in { + + val globalKey = LfGlobalKey.assertBuild( + t(p2), + Value.ValueUnit, + defaultPackageName, + ) + + val example = toVersionedTransaction( + exerciseNode(cid1, templateId = t(p1)).withChildren( + lookupByKeyNode(globalKey, resolution = Some(cid2)), + fetchNode(cid3, templateId = t(p3)), + ) + ).transaction + + forTransaction(example) shouldBe Map( + cid1 -> Set(p1), + cid2 -> Set(p2), + cid3 -> Set(p3), + ) + } + + "return multiple package where the same contract is bound to different packages" in { + + val example = toVersionedTransaction( + exerciseNode(cid1, templateId = t(p1)).withChildren( + exerciseNode(cid1, templateId = t(p2)), + exerciseNode(cid1, templateId = t(p3)), + ) + ).transaction + + forTransaction(example) shouldBe Map( + cid1 -> Set(p1, p2, p3) + ) + } + + } + + "InputContractPackages.mergeToExactTuple" should { + "work where both maps have identical keys" in { + strictZipByKey(Map(1 -> "a", 2 -> "b"), Map(1 -> 3.0, 2 -> 4.0)) shouldBe Right( + Map(1 -> ("a", 3.0), 2 -> ("b", 4.0)) + ) + } + "fail where the key sets are unequal" in { + inside(strictZipByKey(Map(1 -> "a", 2 -> "b"), Map(2 -> 4.0, 3 -> 5.0))) { + case Left(mismatch) => mismatch shouldBe Set(1, 3) + } + } + + } + + "InputContractPackages.forTransactionWithContracts" should { + + val cid = newCid + val inst = ExampleContractFactory.build() + val tx = toVersionedTransaction( + exerciseNode(cid, templateId = t(p1)) + ).transaction + + "combine transaction contracts with contracts instances map" in { + forTransactionWithContracts(tx, Map(cid -> inst)) shouldBe Right( + Map(cid -> (inst.inst, Set(p1))) + ) + } + } + +} diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml index 5eab3e5f984d..1ca6c7662b27 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --enable-interfaces=yes name: carbonv1-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml index 1891edc08d5e..9ecab5bceb70 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --enable-interfaces=yes name: carbonv2-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml index 87cfac60bb84..1279e7ff5eec 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: experimental-tests source: . version: 3.1.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml index 754aef073486..9877cb4afee0 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --enable-interfaces=yes name: model-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model_iface/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model_iface/daml.yaml index 8e62681b33c2..7d547e3f3e74 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model_iface/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model_iface/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --enable-interfaces=yes name: model-iface-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml index 326b1ff93ed2..918ea997ad4f 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: ongoing-stream-package-upload-tests source: . version: 3.1.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml index ec5eecb2eb16..801ad5c24cd7 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: package-management-tests source: . version: 3.1.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml index bd13a23263fc..083f1c81d91b 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --enable-interfaces=yes name: semantic-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml index 095dcba3fb9e..5f8b65be8a66 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: upgrade-tests data-dependencies: - ../../../../../scala-2.13/resource_managed/main/upgrade-iface-tests-3.1.0.dar diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml index f6bfb645d265..eec867dd4bd5 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: upgrade-tests data-dependencies: - ../../../../../scala-2.13/resource_managed/main/upgrade-iface-tests-3.1.0.dar diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml index 7aaa042da1c2..a57bbc0f2cb0 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: upgrade-tests data-dependencies: - ../../../../../scala-2.13/resource_managed/main/upgrade-iface-tests-3.1.0.dar diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/1.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/1.0.0/daml.yaml index 2e73c58c95b0..f63671d14822 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/1.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/1.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: upgrade-fetch-tests source: . version: 1.0.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/2.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/2.0.0/daml.yaml index bea272b254e2..8055c6f1465c 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/2.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_fetch/2.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: upgrade-fetch-tests source: . version: 2.0.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_iface/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_iface/daml.yaml index 0c2cfad12e48..a6d58b19c8af 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_iface/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade_iface/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --enable-interfaces=yes name: upgrade-iface-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_alt/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_alt/daml.yaml index 251da7b95f9c..ac059a53941d 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_alt/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_alt/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: vetting-alt source: . version: 1.0.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_dep/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_dep/daml.yaml index 2f23ef046b32..956dd186164b 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_dep/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_dep/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: vetting-dep source: . version: 1.0.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/1.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/1.0.0/daml.yaml index 80fcadff47cb..f425d6c26d65 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/1.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/1.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: vetting-main data-dependencies: - ../../../../../scala-2.13/resource_managed/main/vetting-dep-1.0.0.dar diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/2.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/2.0.0/daml.yaml index 068a20cf1733..e598d1c6b334 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/2.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/2.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: vetting-main data-dependencies: - ../../../../../scala-2.13/resource_managed/main/vetting-dep-1.0.0.dar diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/split-lineage-2.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/split-lineage-2.0.0/daml.yaml index d0a411a31a4a..e0522054d26f 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/split-lineage-2.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/vetting_main/split-lineage-2.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 canton-daml-plugin-name-suffix: split-lineage name: vetting-main data-dependencies: diff --git a/sdk/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/CommonErrors.scala b/sdk/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/CommonErrors.scala index 23d3ec8e06f5..6009e8eef87c 100644 --- a/sdk/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/CommonErrors.scala +++ b/sdk/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/CommonErrors.scala @@ -52,9 +52,10 @@ object CommonErrors extends CommonErrorGroup { id = "REQUEST_ALREADY_IN_FLIGHT", ErrorCategory.ContentionOnSharedResources, ) { - final case class Reject(requestId: String)(implicit errorLogger: ErrorLoggingContext) - extends DamlErrorWithDefiniteAnswer( - cause = s"The request $requestId is already in flight" + final case class Reject(requestId: String, details: String)(implicit + errorLogger: ErrorLoggingContext + ) extends DamlErrorWithDefiniteAnswer( + cause = s"Request with ID $requestId is already in flight: $details" ) } diff --git a/sdk/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/CommandExecutionErrors.scala b/sdk/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/CommandExecutionErrors.scala index f414adfc33f6..6aada5330e84 100644 --- a/sdk/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/CommandExecutionErrors.scala +++ b/sdk/canton/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/CommandExecutionErrors.scala @@ -3,7 +3,16 @@ package com.digitalasset.canton.ledger.error.groups -import com.digitalasset.base.error.{DamlErrorWithDefiniteAnswer, ErrorCategory, ErrorCategoryRetry, ErrorCode, ErrorGroup, ErrorResource, Explanation, Resolution} +import com.digitalasset.base.error.{ + DamlErrorWithDefiniteAnswer, + ErrorCategory, + ErrorCategoryRetry, + ErrorCode, + ErrorGroup, + ErrorResource, + Explanation, + Resolution, +} import com.digitalasset.canton.ledger.error.LedgerApiErrors import com.digitalasset.canton.ledger.error.ParticipantErrorGroup.LedgerApiErrorGroup.CommandExecutionErrorGroup import com.digitalasset.canton.logging.ErrorLoggingContext @@ -12,7 +21,11 @@ import com.digitalasset.daml.lf.data.Ref.{Identifier, PackageId} import com.digitalasset.daml.lf.engine.Error as LfError import com.digitalasset.daml.lf.interpretation.Error as LfInterpretationError import com.digitalasset.daml.lf.language.{Ast, LanguageVersion, Reference} -import com.digitalasset.daml.lf.transaction.{GlobalKey, SerializationVersion, GlobalKeyWithMaintainers} +import com.digitalasset.daml.lf.transaction.{ + GlobalKey, + GlobalKeyWithMaintainers, + SerializationVersion, +} import com.digitalasset.daml.lf.value.Value.ContractId import com.digitalasset.daml.lf.value.{Value, ValueCoder} import com.digitalasset.daml.lf.{VersionRange, language} @@ -864,11 +877,22 @@ object CommandExecutionErrors extends CommandExecutionErrorGroup { ) { override def resources: Seq[(ErrorResource, String)] = { - def optKeyResources(keyOpt: Option[GlobalKeyWithMaintainers]): Seq[(ErrorResource, String)] = + def optKeyResources( + keyOpt: Option[GlobalKeyWithMaintainers] + ): Seq[(ErrorResource, String)] = Seq( - (ErrorResource.ContractKey.nullable, keyOpt.flatMap(key => tryEncodeValue(key.globalKey.key)).getOrElse("NULL")), - (ErrorResource.PackageName.nullable, keyOpt.map(_.globalKey.packageName).getOrElse("NULL")), - (ErrorResource.Parties.nullable, keyOpt.map(_.maintainers.mkString(",")).getOrElse("NULL")) + ( + ErrorResource.ContractKey.nullable, + keyOpt.flatMap(key => tryEncodeValue(key.globalKey.key)).getOrElse("NULL"), + ), + ( + ErrorResource.PackageName.nullable, + keyOpt.map(_.globalKey.packageName).getOrElse("NULL"), + ), + ( + ErrorResource.Parties.nullable, + keyOpt.map(_.maintainers.mkString(",")).getOrElse("NULL"), + ), ) Seq( @@ -876,12 +900,12 @@ object CommandExecutionErrors extends CommandExecutionErrorGroup { (ErrorResource.TemplateId, err.srcTemplateId.toString), (ErrorResource.TemplateId, err.dstTemplateId.toString), ) - ++ encodeParties(err.originalSignatories) - ++ encodeParties(err.originalObservers) - ++ optKeyResources(err.originalKeyOpt) - ++ encodeParties(err.recomputedSignatories) - ++ encodeParties(err.recomputedObservers) - ++ optKeyResources(err.recomputedKeyOpt) + ++ encodeParties(err.originalSignatories) + ++ encodeParties(err.originalObservers) + ++ optKeyResources(err.originalKeyOpt) + ++ encodeParties(err.recomputedSignatories) + ++ encodeParties(err.recomputedObservers) + ++ optKeyResources(err.recomputedKeyOpt) } } } diff --git a/sdk/canton/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-2.1.dar b/sdk/canton/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-2.1.dar index d1469f1ac6d8..9720101db08b 100755 Binary files a/sdk/canton/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-2.1.dar and b/sdk/canton/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-2.1.dar differ diff --git a/sdk/canton/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-2.dev.dar b/sdk/canton/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-2.dev.dar index f7d06e3e54dd..20a44d18b3c6 100755 Binary files a/sdk/canton/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-2.dev.dar and b/sdk/canton/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-2.dev.dar differ diff --git a/sdk/canton/community/ledger/ledger-json-api/src/main/resources/ledger-api/proto-data.yml b/sdk/canton/community/ledger/ledger-json-api/src/main/resources/ledger-api/proto-data.yml index 691e09d81d1b..861d3ab6d6dd 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/main/resources/ledger-api/proto-data.yml +++ b/sdk/canton/community/ledger/ledger-json-api/src/main/resources/ledger-api/proto-data.yml @@ -30,7 +30,15 @@ messages: Required onboarding_transactions: |- TopologyTransactions to onboard the external party - Must contain 3 signed transactions: NamespaceDelegation, PartyToKeyMapping, PartyToParticipant + Can contain: + - A namespace for the party. + This can be either a single NamespaceDelegation, + or DecentralizedNamespaceDefinition along with its authorized namespace owners in the form of NamespaceDelegations. + May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + - A PartyToKeyMapping to register the party's signing keys. + May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + - A PartyToParticipant to register the hosting relationship of the party. + Must be provided. Required multi_hash_signatures: |- Optional signatures of the combined hash of all onboarding_transactions @@ -601,7 +609,7 @@ messages: fieldComments: version: |- [docs-entry-end: DamlTransaction.Node] - Transaction version, will be >= max(nodes version) + serialization version, will be >= max(nodes version) roots: Root nodes of the transaction nodes: List of nodes in the transaction node_seeds: Node seeds are values associated with certain nodes used for generating @@ -1751,6 +1759,13 @@ messages: Transaction Metadata Refer to the hashing documentation for information on how it should be hashed. fieldComments: + max_record_time: |- + Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer `synchronizer_id`. + If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + with a new valid max_record_time. + Unsigned in 3.3 to avoid a breaking protocol change + Will be signed in 3.4+ + Set max_record_time in the PreparedTransactionRequest to get this field set accordingly synchronizer_id: '' preparation_time: '' min_ledger_effective_time: '' @@ -2010,6 +2025,15 @@ messages: The change ID can be used for matching the intended ledger changes with all their completions. Must be a valid LedgerString (as described in ``value.proto``). Required + max_record_time: |- + Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer specified in the `PrepareSubmissionResponse`. + If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + with a new valid max_record_time. + Use this to limit the time-to-life of a prepared transaction, + which is useful to know when it can definitely not be accepted + anymore and resorting to preparing another transaction for the same + intent is safe again. + Optional synchronizer_id: |- Must be a valid synchronizer id If not set, a suitable synchronizer that this node is connected to will be chosen diff --git a/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsInteractiveSubmissionService.scala b/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsInteractiveSubmissionService.scala index 2842c7e12252..cbcf81829d6c 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsInteractiveSubmissionService.scala +++ b/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsInteractiveSubmissionService.scala @@ -10,8 +10,8 @@ import com.daml.ledger.api.v2.interactive.interactive_submission_service.{ InteractiveSubmissionServiceGrpc, MinLedgerTime, } -import com.daml.ledger.api.v2.package_reference import com.daml.ledger.api.v2.transaction_filter.TransactionFormat +import com.daml.ledger.api.v2.{crypto as lapicrypto, package_reference} import com.digitalasset.canton.auth.AuthInterceptor import com.digitalasset.canton.http.json.v2.CirceRelaxedCodec.deriveRelaxedCodec import com.digitalasset.canton.http.json.v2.Endpoints.{CallerContext, TracedInput, v2Endpoint} @@ -184,6 +184,7 @@ final case class JsPrepareSubmissionRequest( packageIdSelectionPreference: Seq[String], verboseHashing: Boolean = false, prefetchContractKeys: Seq[js.PrefetchContractKey] = Seq.empty, + maxRecordTime: Option[com.google.protobuf.timestamp.Timestamp], ) final case class JsPrepareSubmissionResponse( @@ -386,19 +387,17 @@ object JsInteractiveSubmissionServiceCodecs { : Codec[interactive_submission_service.SinglePartySignatures] = deriveRelaxedCodec - implicit val signatureRW: Codec[interactive_submission_service.Signature] = + implicit val signatureRW: Codec[lapicrypto.Signature] = deriveRelaxedCodec - implicit val signingAlgorithmSpecEncoder - : Encoder[interactive_submission_service.SigningAlgorithmSpec] = + implicit val signingAlgorithmSpecEncoder: Encoder[lapicrypto.SigningAlgorithmSpec] = stringEncoderForEnum() - implicit val signingAlgorithmSpecDecoder - : Decoder[interactive_submission_service.SigningAlgorithmSpec] = + implicit val signingAlgorithmSpecDecoder: Decoder[lapicrypto.SigningAlgorithmSpec] = stringDecoderForEnum() - implicit val signatureFormatDecoder: Decoder[interactive_submission_service.SignatureFormat] = + implicit val signatureFormatDecoder: Decoder[lapicrypto.SignatureFormat] = stringDecoderForEnum() - implicit val signatureFormatEncoder: Encoder[interactive_submission_service.SignatureFormat] = + implicit val signatureFormatEncoder: Encoder[lapicrypto.SignatureFormat] = stringEncoderForEnum() implicit val jsExecuteSubmissionRequestRW: Codec[JsExecuteSubmissionRequest] = @@ -432,10 +431,10 @@ object JsInteractiveSubmissionServiceCodecs { deriveRelaxedCodec // Schema mappings are added to align generated tapir docs with a circe mapping of ADTs - implicit val signatureFormatSchema: Schema[interactive_submission_service.SignatureFormat] = + implicit val signatureFormatSchema: Schema[lapicrypto.SignatureFormat] = stringSchemaForEnum() - implicit val signingAlgorithmSpec: Schema[interactive_submission_service.SigningAlgorithmSpec] = + implicit val signingAlgorithmSpec: Schema[lapicrypto.SigningAlgorithmSpec] = stringSchemaForEnum() implicit val timeSchema: Schema[interactive_submission_service.MinLedgerTime.Time] = diff --git a/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPartyManagementService.scala b/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPartyManagementService.scala index a4fe13722341..db20be9f8d43 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPartyManagementService.scala +++ b/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/JsPartyManagementService.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.http.json.v2 import com.daml.ledger.api.v2.admin.party_management_service import com.daml.ledger.api.v2.admin.party_management_service.GenerateExternalPartyTopologyRequest -import com.daml.ledger.api.v2.interactive.interactive_submission_service +import com.daml.ledger.api.v2.crypto as lapicrypto import com.digitalasset.canton.auth.AuthInterceptor import com.digitalasset.canton.http.json.v2.CirceRelaxedCodec.{ deriveRelaxedCodec, @@ -241,10 +241,10 @@ object JsPartyManagementCodecs { import JsInteractiveSubmissionServiceCodecs.signatureRW import JsSchema.Crypto.* - implicit val signatureFormatSchema: Schema[interactive_submission_service.SignatureFormat] = + implicit val signatureFormatSchema: Schema[lapicrypto.SignatureFormat] = Schema.string - implicit val signingAlgorithmSpec: Schema[interactive_submission_service.SigningAlgorithmSpec] = + implicit val signingAlgorithmSpec: Schema[lapicrypto.SigningAlgorithmSpec] = Schema.string implicit val partyDetails: Codec[party_management_service.PartyDetails] = deriveRelaxedCodec diff --git a/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/TranscodePackageIdResolver.scala b/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/TranscodePackageIdResolver.scala index 2e7007f89d62..3bd1964cb3b7 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/TranscodePackageIdResolver.scala +++ b/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/v2/TranscodePackageIdResolver.scala @@ -307,7 +307,6 @@ private class TranscodePackageMetadataBackedResolver( (for { userPreferences <- - // TODO(#27499): Support conflicting preferences (two package-ids with the same package-name) TranscodePackageIdResolver.resolvePackageNames(userPreferences, packageMetadataSnapshot) localPreferences <- packageNames.forgetNE.toList.traverse { packageName => packageNameMap @@ -366,6 +365,7 @@ object TranscodePackageIdResolver { .toRight(show"Package-id $packageId not known") .map(_ -> packageId) } + // TODO(#27500): support multiple preferences per package-name .map(_.toMap) } diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/dep/daml.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/dep/daml.yaml index 1bed3d03654e..067feb475606 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/dep/daml.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/dep/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.dev - --enable-interfaces=yes diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/main/daml.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/main/daml.yaml index b7a55d75421f..457a3ae3ee07 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/main/daml.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/main/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.dev - --enable-interfaces=yes diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml index 94cf3df9426f..ff025e340b5b 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: JsonEncodingTest diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml index bb975920a9aa..37a38251d4a7 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.dev name: JsonEncodingTestDev diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml index 33080af0baf5..48658572a9b1 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/resources/json-api-docs/openapi.yaml @@ -2072,7 +2072,15 @@ components: onboardingTransactions: description: |- TopologyTransactions to onboard the external party - Must contain 3 signed transactions: NamespaceDelegation, PartyToKeyMapping, PartyToParticipant + Can contain: + - A namespace for the party. + This can be either a single NamespaceDelegation, + or DecentralizedNamespaceDefinition along with its authorized namespace owners in the form of NamespaceDelegations. + May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + - A PartyToKeyMapping to register the party's signing keys. + May be provided, if so it must be fully authorized by the signatures in this request combined with the existing topology state. + - A PartyToParticipant to register the hosting relationship of the party. + Must be provided. Required type: array items: @@ -4841,6 +4849,17 @@ components: type: array items: $ref: '#/components/schemas/PrefetchContractKey' + maxRecordTime: + description: |- + Maximum timestamp at which the transaction can be recorded onto the ledger via the synchronizer specified in the `PrepareSubmissionResponse`. + If submitted after it will be rejected even if otherwise valid, in which case it needs to be prepared and signed again + with a new valid max_record_time. + Use this to limit the time-to-life of a prepared transaction, + which is useful to know when it can definitely not be accepted + anymore and resorting to preparing another transaction for the same + intent is safe again. + Optional + type: string JsPrepareSubmissionResponse: title: JsPrepareSubmissionResponse description: '[docs-entry-end: HashingSchemeVersion]' diff --git a/sdk/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/CantonGenerators.scala b/sdk/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/CantonGenerators.scala index d396844cd61c..1d9e383a7b52 100644 --- a/sdk/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/CantonGenerators.scala +++ b/sdk/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/CantonGenerators.scala @@ -16,13 +16,11 @@ object CantonGenerators { } // We define custom generators for the enums here, so that UNRECOGNIZED values are not generated - implicit val arbSignatureFormat - : Arbitrary[lapi.interactive.interactive_submission_service.SignatureFormat] = - enumArbitrary(lapi.interactive.interactive_submission_service.SignatureFormat.enumCompanion) - implicit val arbSigningAlgorithmSpec - : Arbitrary[lapi.interactive.interactive_submission_service.SigningAlgorithmSpec] = + implicit val arbSignatureFormat: Arbitrary[lapi.crypto.SignatureFormat] = + enumArbitrary(lapi.crypto.SignatureFormat.enumCompanion) + implicit val arbSigningAlgorithmSpec: Arbitrary[lapi.crypto.SigningAlgorithmSpec] = enumArbitrary( - lapi.interactive.interactive_submission_service.SigningAlgorithmSpec.enumCompanion + lapi.crypto.SigningAlgorithmSpec.enumCompanion ) implicit val arbHashingSchemeVersion : Arbitrary[lapi.interactive.interactive_submission_service.HashingSchemeVersion] = diff --git a/sdk/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/OpenapiTypesTest.scala b/sdk/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/OpenapiTypesTest.scala index 41e705d3a9bd..ee5d43769a93 100644 --- a/sdk/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/OpenapiTypesTest.scala +++ b/sdk/canton/community/ledger/ledger-json-client/src/test/scala/com/digitalasset/canton/openapi/OpenapiTypesTest.scala @@ -653,7 +653,7 @@ class OpenapiTypesTest extends AnyWordSpec with Matchers { Mapping[v2.admin.user_management_service.Right, openapi.Right]( openapi.Right.fromJson ), - Mapping[v2.interactive.interactive_submission_service.Signature, openapi.Signature]( + Mapping[v2.crypto.Signature, openapi.Signature]( openapi.Signature.fromJson ), Mapping[ diff --git a/sdk/canton/community/participant/src/main/daml/canton-builtin-admin-workflow-party-replication-alpha/daml.yaml b/sdk/canton/community/participant/src/main/daml/canton-builtin-admin-workflow-party-replication-alpha/daml.yaml index 898d5ef0bb8a..c6e0c8d81be5 100644 --- a/sdk/canton/community/participant/src/main/daml/canton-builtin-admin-workflow-party-replication-alpha/daml.yaml +++ b/sdk/canton/community/participant/src/main/daml/canton-builtin-admin-workflow-party-replication-alpha/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: canton-builtin-admin-workflow-party-replication-alpha diff --git a/sdk/canton/community/participant/src/main/daml/canton-builtin-admin-workflow-ping/daml.yaml b/sdk/canton/community/participant/src/main/daml/canton-builtin-admin-workflow-ping/daml.yaml index cc495f7e0ed4..a26818ecac22 100644 --- a/sdk/canton/community/participant/src/main/daml/canton-builtin-admin-workflow-ping/daml.yaml +++ b/sdk/canton/community/participant/src/main/daml/canton-builtin-admin-workflow-ping/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: canton-builtin-admin-workflow-ping diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala index f948d4ce4bb4..0a1674f2bca2 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala @@ -32,6 +32,7 @@ final case class ParticipantNodeParameters( unsafeOnlinePartyReplication: Option[UnsafeOnlinePartyReplicationConfig], automaticallyPerformLogicalSynchronizerUpgrade: Boolean, reassignmentsConfig: ReassignmentsConfig, + doNotAwaitOnCheckingIncomingCommitments: Boolean, ) extends CantonNodeParameters with HasGeneralCantonNodeParameters { override def dontWarnOnDeprecatedPV: Boolean = protocolConfig.dontWarnOnDeprecatedPV @@ -86,5 +87,6 @@ object ParticipantNodeParameters { reassignmentsConfig = ReassignmentsConfig( targetTimestampForwardTolerance = NonNegativeFiniteDuration.ofSeconds(30) ), + doNotAwaitOnCheckingIncomingCommitments = false, ) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/CantonPackageServiceError.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/CantonPackageServiceError.scala index b29daccff296..2b3c1db13692 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/CantonPackageServiceError.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/CantonPackageServiceError.scala @@ -11,6 +11,7 @@ import com.digitalasset.base.error.{ Explanation, Resolution, } +import com.digitalasset.canton.LfPackageId import com.digitalasset.canton.error.CantonErrorGroups.ParticipantErrorGroup.PackageServiceErrorGroup import com.digitalasset.canton.error.{CantonError, ContextualizedCantonError, ParentCantonError} import com.digitalasset.canton.ledger.api.VettedPackagesRef @@ -236,14 +237,12 @@ object CantonPackageServiceError extends PackageServiceErrorGroup { id = "AMBIGUOUS_VETTING_REFERENCE", ErrorCategory.InvalidGivenCurrentSystemStateOther, ) { - final case class Reject(reference: VettedPackagesRef)(implicit - val loggingContext: ErrorLoggingContext + final case class Reject(reference: VettedPackagesRef, matchingPackages: Set[LfPackageId])( + implicit val loggingContext: ErrorLoggingContext ) extends CantonError.Impl( cause = - s"The vetted package reference $reference matches more than one package in the package store." + show"The package reference $reference matches multiple packages: ${matchingPackages.toSeq}" ) } - } - } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageService.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageService.scala index 90a6b33953a3..49e87734e2d2 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageService.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/PackageService.scala @@ -232,15 +232,14 @@ class PackageService( // vetted. On the other hand, when unvetting, it is safe to unvet all // versions of a package. case Right(matchingPackages) => - val pkgsSeq: List[PackageId] = matchingPackages.iterator.toList - if (targetState.isVetting && pkgsSeq.lengthIs >= 2) { + if (targetState.isVetting && matchingPackages.sizeIs >= 2) { EitherT.leftT[FutureUnlessShutdown, List[SinglePackageTargetVetting[PackageId]]]( - VettingReferenceMoreThanOne.Reject(targetState.ref) + VettingReferenceMoreThanOne.Reject(targetState.ref, matchingPackages) ) } else { - EitherT.rightT[FutureUnlessShutdown, RpcError](pkgsSeq.map { (pkgId: PackageId) => - SinglePackageTargetVetting(pkgId, targetState.bounds) - }) + EitherT.rightT[FutureUnlessShutdown, RpcError]( + matchingPackages.toList.map(SinglePackageTargetVetting(_, targetState.bounds)) + ) } } @@ -447,7 +446,7 @@ class PackageService( // Unvetting a DAR requires AllowUnvettedDependencies because it is going to unvet all // packages from the DAR, even the utility packages. UnvetDar is an experimental // operation that requires expert-level knowledge. - ForceFlags(ForceFlag.AllowUnvetPackage, ForceFlag.AllowUnvettedDependencies), + ForceFlags(ForceFlag.AllowUnvettedDependencies), ) .leftWiden } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcParticipantRepairService.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcParticipantRepairService.scala index 52e2b9ffd2dd..1ffd7b1b57fd 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcParticipantRepairService.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcParticipantRepairService.scala @@ -54,7 +54,6 @@ import com.digitalasset.canton.util.{ OptionUtil, ResourceUtil, } -import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{ LfPartyId, ReassignmentCounter, @@ -140,9 +139,7 @@ final class GrpcParticipantRepairService( )(implicit traceContext: TraceContext): Future[Unit] = { val gzipOut = new GZIPOutputStream(out) val res = for { - validRequest <- EitherT.fromEither[FutureUnlessShutdown]( - ValidExportAcsOldRequest(request, sync.stateInspection.allProtocolVersions) - ) + validRequest <- EitherT.fromEither[FutureUnlessShutdown](ValidExportAcsOldRequest(request)) timestampAsString = validRequest.timestamp.fold("head")(ts => s"at $ts") _ = logger.info( s"Exporting active contract set ($timestampAsString) for parties ${validRequest.parties}" @@ -155,7 +152,6 @@ final class GrpcParticipantRepairService( _.filterString.startsWith(request.filterSynchronizerId), validRequest.parties, validRequest.timestamp, - validRequest.contractSynchronizerRenames, skipCleanTimestampCheck = validRequest.force, partiesOffboarding = validRequest.partiesOffboarding, ) @@ -878,48 +874,8 @@ object GrpcParticipantRepairService { // TODO(#24610) - remove, used by ExportAcsOldRequest only private object ValidExportAcsOldRequest { - - private def validateContractSynchronizerRenames( - contractSynchronizerRenames: Map[String, ExportAcsOldRequest.TargetSynchronizer], - allProtocolVersions: Map[SynchronizerId, ProtocolVersion], - ): Either[String, List[(SynchronizerId, (SynchronizerId, ProtocolVersion))]] = - contractSynchronizerRenames.toList.traverse { - case ( - source, - ExportAcsOldRequest.TargetSynchronizer(targetSynchronizer, targetProtocolVersionRaw), - ) => - for { - sourceId <- SynchronizerId - .fromProtoPrimitive(source, "source synchronizer id") - .leftMap(_.message) - - targetSynchronizerId <- SynchronizerId - .fromProtoPrimitive(targetSynchronizer, "target synchronizer id") - .leftMap(_.message) - targetProtocolVersion <- ProtocolVersion - .fromProtoPrimitive(targetProtocolVersionRaw) - .leftMap(_.toString) - - /* - The `targetProtocolVersion` should be the one running on the corresponding synchronizer. - */ - _ <- allProtocolVersions - .get(targetSynchronizerId) - .map { foundProtocolVersion => - Either.cond( - foundProtocolVersion == targetProtocolVersion, - (), - s"Inconsistent protocol versions for synchronizer $targetSynchronizerId: found version is $foundProtocolVersion, passed is $targetProtocolVersion", - ) - } - .getOrElse(Either.unit) - - } yield (sourceId, (targetSynchronizerId, targetProtocolVersion)) - } - private def validateRequestOld( - request: ExportAcsOldRequest, - allProtocolVersions: Map[SynchronizerId, ProtocolVersion], + request: ExportAcsOldRequest ): Either[String, ValidExportAcsOldRequest] = for { parties <- request.parties.traverse(party => @@ -928,37 +884,29 @@ object GrpcParticipantRepairService { timestamp <- request.timestamp .traverse(CantonTimestamp.fromProtoTimestamp) .leftMap(_.message) - contractSynchronizerRenames <- validateContractSynchronizerRenames( - request.contractSynchronizerRenames, - allProtocolVersions, - ) } yield ValidExportAcsOldRequest( parties.toSet, timestamp, - contractSynchronizerRenames.toMap, force = request.force, partiesOffboarding = request.partiesOffboarding, ) def apply( - request: ExportAcsOldRequest, - allProtocolVersions: Map[SynchronizerId, ProtocolVersion], + request: ExportAcsOldRequest )(implicit elc: ErrorLoggingContext ): Either[RepairServiceError, ValidExportAcsOldRequest] = for { - validRequest <- validateRequestOld(request, allProtocolVersions).leftMap( + validRequest <- validateRequestOld(request).leftMap( RepairServiceError.InvalidArgument.Error(_) ) } yield validRequest - } // TODO(#24610) - remove, used by ExportAcsOldRequest only private final case class ValidExportAcsOldRequest private ( parties: Set[LfPartyId], timestamp: Option[CantonTimestamp], - contractSynchronizerRenames: Map[SynchronizerId, (SynchronizerId, ProtocolVersion)], force: Boolean, // if true, does not check whether `timestamp` is clean partiesOffboarding: Boolean, ) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala index f881aec9aef4..d819c3687ed9 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala @@ -266,109 +266,103 @@ final class SyncStateInspection( EitherT.right(disabledCleaningF) } - // TODO(#26061) Fix this computation - def allProtocolVersions: Map[SynchronizerId, ProtocolVersion] = - syncPersistentStateManager.getAll.keySet - .map(id => id.logical -> id.protocolVersion) - .toMap - - /* - TODO(#26061) If this method cannot be removed, ensure this is correct. - In particular, it does not make sense to do every step for each PS. - */ def exportAcsDumpActiveContracts( outputStream: OutputStream, filterSynchronizerId: SynchronizerId => Boolean, parties: Set[LfPartyId], timestamp: Option[CantonTimestamp], - contractSynchronizerRenames: Map[SynchronizerId, (SynchronizerId, ProtocolVersion)], skipCleanTimestampCheck: Boolean, partiesOffboarding: Boolean, )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, AcsInspectionError, Unit] = { - val allSynchronizers = syncPersistentStateManager.getAll + // To disable/re-enable background pruning + val allSynchronizers: Map[PhysicalSynchronizerId, SyncPersistentState] = + syncPersistentStateManager.getAll + + // For the ACS export + val latestSynchronizers = syncPersistentStateManager.getAllLatest + + def writeACSToStream(synchronizerId: SynchronizerId, state: SyncPersistentState) = { + val pv = state.staticSynchronizerParameters.protocolVersion + + val acsInspection = state.acsInspection + val timeOfSnapshotO = timestamp.map(TimeOfChange.apply) + for { + result <- acsInspection + .forEachVisibleActiveContract( + synchronizerId.logical, + parties, + timeOfSnapshotO, + skipCleanTocCheck = skipCleanTimestampCheck, + ) { case (contractInst, reassignmentCounter) => + (for { + contract <- SerializableContract.fromLfFatContractInst(contractInst.inst) + activeContract = ActiveContractOld.create( + synchronizerId, + contract, + reassignmentCounter, + )(pv) + + _ <- activeContract.writeDelimitedTo(outputStream) + } yield ()) match { + case Left(errorMessage) => + Left( + AcsInspectionError.SerializationIssue( + synchronizerId.logical, + contractInst.contractId, + errorMessage, + ) + ) + case Right(_) => + outputStream.flush() + Either.unit + } + } - // disable journal cleaning for the duration of the dump - disableJournalCleaningForFilter(allSynchronizers, filterSynchronizerId) - .mapK(FutureUnlessShutdown.outcomeK) - .flatMap { _ => - MonadUtil.sequentialTraverse_(allSynchronizers) { - case (synchronizerId, state) if filterSynchronizerId(synchronizerId.logical) => - val (synchronizerIdForExport, protocolVersion) = - contractSynchronizerRenames.getOrElse( - synchronizerId.logical, - (synchronizerId.logical, state.staticSynchronizerParameters.protocolVersion), - ) - val acsInspection = state.acsInspection - val timeOfSnapshotO = timestamp.map(TimeOfChange.apply) - val ret = for { - result <- acsInspection - .forEachVisibleActiveContract( + _ <- result match { + case Some((allStakeholders, snapshotToc)) if partiesOffboarding => + for { + connectedSynchronizer <- EitherT.fromOption[FutureUnlessShutdown]( + connectedSynchronizersLookup.get(synchronizerId), + AcsInspectionError.OffboardingParty( synchronizerId.logical, - parties, - timeOfSnapshotO, - skipCleanTocCheck = skipCleanTimestampCheck, - ) { case (contractInst, reassignmentCounter) => - (for { - contract <- SerializableContract.fromLfFatContractInst(contractInst.inst) - activeContract = - ActiveContractOld.create( - synchronizerIdForExport, - contract, - reassignmentCounter, - )( - protocolVersion - ) - _ <- activeContract.writeDelimitedTo(outputStream) - } yield ()) match { - case Left(errorMessage) => - Left( - AcsInspectionError.SerializationIssue( - synchronizerId.logical, - contractInst.contractId, - errorMessage, - ) - ) - case Right(_) => - outputStream.flush() - Either.unit - } - } - - _ <- result match { - case Some((allStakeholders, snapshotToc)) if partiesOffboarding => - for { - connectedSynchronizer <- EitherT.fromOption[FutureUnlessShutdown]( - connectedSynchronizersLookup.get(synchronizerId), - AcsInspectionError.OffboardingParty( - synchronizerId.logical, - s"Unable to get topology client for synchronizer $synchronizerId; check synchronizer connectivity.", - ), - ) + s"Unable to get topology client for synchronizer $synchronizerId; check synchronizer connectivity.", + ), + ) - _ <- acsInspection.checkOffboardingSnapshot( - participantId, - offboardedParties = parties, - allStakeholders = allStakeholders, - snapshotToc = snapshotToc, - topologyClient = connectedSynchronizer.topologyClient, - ) - } yield () + _ <- acsInspection.checkOffboardingSnapshot( + participantId, + offboardedParties = parties, + allStakeholders = allStakeholders, + snapshotToc = snapshotToc, + topologyClient = connectedSynchronizer.topologyClient, + ) + } yield () - // Snapshot is empty or partiesOffboarding is false - case _ => EitherTUtil.unitUS[AcsInspectionError] - } + // Snapshot is empty or partiesOffboarding is false + case _ => EitherTUtil.unitUS[AcsInspectionError] + } + } yield () + } - } yield () - // re-enable journal cleaning after the dump - ret.thereafter { _ => - journalCleaningControl.enable(synchronizerId) - } - case _ => - EitherTUtil.unitUS + // disable journal cleaning for the duration of the dump + val res: EitherT[FutureUnlessShutdown, AcsInspectionError, Unit] = + disableJournalCleaningForFilter(allSynchronizers, filterSynchronizerId) + .mapK(FutureUnlessShutdown.outcomeK) + .flatMap { _ => + MonadUtil.sequentialTraverse_(latestSynchronizers) { + case (synchronizerId, state) if filterSynchronizerId(synchronizerId) => + writeACSToStream(synchronizerId, state) + case _ => + EitherTUtil.unitUS + } } - } + + // re-enable journal cleaning after the dump + res.thereafter { _ => + allSynchronizers.keys.foreach(journalCleaningControl.enable) + } } def contractCount(implicit traceContext: TraceContext): FutureUnlessShutdown[Int] = diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/ParticipantNodeConfig.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/ParticipantNodeConfig.scala index 5143fc94fe73..bcc5be59c986 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/ParticipantNodeConfig.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/ParticipantNodeConfig.scala @@ -349,6 +349,9 @@ object TestingTimeServiceConfig { * [[com.digitalasset.canton.config.CantonParameters.enableAdditionalConsistencyChecks]] being * enabled are logged, measured in the number of contract activations during a single connection * to a synchronizer. Used only for database storage. + * @param doNotAwaitOnCheckingIncomingCommitments + * Enable fully asynchronous checking of incoming commitments. This may result in some incoming + * commitments not being checked in case of crashes or HA failovers. */ final case class ParticipantNodeParameterConfig( adminWorkflow: AdminWorkflowConfig = AdminWorkflowConfig(), @@ -383,6 +386,7 @@ final case class ParticipantNodeParameterConfig( automaticallyPerformLogicalSynchronizerUpgrade: Boolean = true, activationFrequencyForWarnAboutConsistencyChecks: Long = 1000, reassignmentsConfig: ReassignmentsConfig = ReassignmentsConfig(), + doNotAwaitOnCheckingIncomingCommitments: Boolean = false, ) extends LocalNodeParametersConfig with UniformCantonConfigValidation diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala index 44cfd481ebcb..946f984ff528 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala @@ -38,6 +38,7 @@ import com.digitalasset.canton.{RequestCounter, SequencerCounter} import java.util.concurrent.atomic.AtomicReference import scala.concurrent.{ExecutionContext, Future} +import scala.util.chaining.* import scala.util.{Failure, Success} /** Helper trait for Online Party Replication event publishing. Refer to methods in the @@ -351,9 +352,8 @@ class RecordOrderPublisher private ( } } - // TODO(#26580) More validation and setting should be done in case of cancelled upgrade (and when attempting the next one) - def setSuccessor(successor: SynchronizerSuccessor): Unit = - synchronizerSuccessor.set(Some(successor)) + def setSuccessor(successor: Option[SynchronizerSuccessor]): Unit = + synchronizerSuccessor.set(successor) private def scheduleBufferingEventTaskImmediately( perform: CantonTimestamp => FutureUnlessShutdown[Unit] @@ -553,8 +553,8 @@ object RecordOrderPublisher { loggerFactory: NamedLoggerFactory, futureSupervisor: FutureSupervisor, clock: Clock, - )(implicit executionContextForPublishing: ExecutionContext): RecordOrderPublisher = { - val rop = new RecordOrderPublisher( + )(implicit executionContextForPublishing: ExecutionContext): RecordOrderPublisher = + new RecordOrderPublisher( psid, initSc, initTimestamp, @@ -565,10 +565,5 @@ object RecordOrderPublisher { loggerFactory, futureSupervisor, clock, - ) - - synchronizerSuccessor.foreach(rop.setSuccessor) - - rop - } + ).tap(_.setSuccessor(synchronizerSuccessor)) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiServer.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiServer.scala index 5a0b11a7882a..68c86544e88f 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiServer.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiServer.scala @@ -42,7 +42,12 @@ import com.digitalasset.canton.participant.config.{ ParticipantNodeConfig, TestingTimeServiceConfig, } -import com.digitalasset.canton.participant.store.{ContractStore, ParticipantNodePersistentState} +import com.digitalasset.canton.participant.store.{ + ContractStore, + ParticipantNodePersistentState, + ParticipantPruningStore, + PruningOffsetServiceImpl, +} import com.digitalasset.canton.participant.sync.CantonSyncService import com.digitalasset.canton.participant.{ LedgerApiServerBootstrapUtils, @@ -103,7 +108,8 @@ class LedgerApiServer( cantonParameterConfig: ParticipantNodeParameters, testingTimeService: Option[TimeServiceBackend], adminTokenDispenser: CantonAdminTokenDispenser, - cantonContractStore: ContractStore, + participantContractStore: Eval[ContractStore], + participantPruningStore: Eval[ParticipantPruningStore], enableCommandInspection: Boolean, tracerProvider: TracerProvider, grpcApiMetrics: LedgerApiServerMetrics, @@ -257,7 +263,9 @@ class LedgerApiServer( )( loggingContext ), - cantonContractStore = cantonContractStore, + participantContractStore = participantContractStore.value, + pruningOffsetService = + PruningOffsetServiceImpl(participantPruningStore.value, loggerFactory), ) _ = timedSyncService.registerInternalIndexService(new InternalIndexService { override def activeContracts( @@ -580,7 +588,8 @@ object LedgerApiServer { cantonParameterConfig = parameters, testingTimeService = ledgerTestingTimeService, adminTokenDispenser = adminTokenDispenser, - cantonContractStore = participantNodePersistentState.map(_.contractStore).value, + participantContractStore = participantNodePersistentState.map(_.contractStore), + participantPruningStore = participantNodePersistentState.map(_.pruningStore), enableCommandInspection = config.ledgerApi.enableCommandInspection, tracerProvider = tracerProvider, grpcApiMetrics = metrics, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ParticipantTopologyTerminateProcessing.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ParticipantTopologyTerminateProcessing.scala index 49311b8d7d93..50b48fac260c 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ParticipantTopologyTerminateProcessing.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ParticipantTopologyTerminateProcessing.scala @@ -100,12 +100,21 @@ class ParticipantTopologyTerminateProcessing( override def notifyUpgradeAnnouncement( successor: SynchronizerSuccessor )(implicit traceContext: TraceContext): Unit = { - logger.debug( + logger.info( s"Node is notified about the upgrade of $psid to ${successor.psid} scheduled at ${successor.upgradeTime}" ) lsuCallback.registerCallback(successor) - recordOrderPublisher.setSuccessor(successor) + recordOrderPublisher.setSuccessor(Some(successor)) + } + + override def notifyUpgradeCancellation()(implicit traceContext: TraceContext): Unit = { + logger.info( + s"Node is notified about the cancellation of upgrade" + ) + + lsuCallback.unregisterCallback() + recordOrderPublisher.setSuccessor(None) } private def scheduleEvent( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala index 95da22f62d02..172a019a4807 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala @@ -73,7 +73,7 @@ import com.digitalasset.canton.participant.protocol.validation.TimeValidator.Tim import com.digitalasset.canton.participant.store.* import com.digitalasset.canton.participant.sync.* import com.digitalasset.canton.participant.sync.SyncServiceError.SyncServiceAlarm -import com.digitalasset.canton.participant.util.DAMLe.{CreateNodeEnricher, TransactionEnricher} +import com.digitalasset.canton.participant.util.DAMLe.{ContractEnricher, TransactionEnricher} import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.protocol.* import com.digitalasset.canton.protocol.ContractIdAbsolutizer.ContractIdAbsolutizationDataV1 @@ -130,12 +130,13 @@ class TransactionProcessingSteps( crypto: SynchronizerCryptoClient, metrics: TransactionProcessingMetrics, transactionEnricher: TransactionEnricher, - createNodeEnricher: CreateNodeEnricher, + createNodeEnricher: ContractEnricher, authorizationValidator: AuthorizationValidator, internalConsistencyChecker: InternalConsistencyChecker, tracker: CommandProgressTracker, protected val loggerFactory: NamedLoggerFactory, futureSupervisor: FutureSupervisor, + messagePayloadLoggingEnabled: Boolean, )(implicit val ec: ExecutionContext) extends ProcessingSteps[ SubmissionParam, @@ -312,8 +313,13 @@ class TransactionProcessingSteps( override def maxSequencingTimeO: OptionT[FutureUnlessShutdown, CantonTimestamp] = OptionT.liftF( recentSnapshot.ipsSnapshot.findDynamicSynchronizerParametersOrDefault(protocolVersion).map { synchronizerParameters => - CantonTimestamp(transactionMeta.ledgerEffectiveTime) + val maxSequencingTimeFromLET = CantonTimestamp(transactionMeta.ledgerEffectiveTime) .add(synchronizerParameters.ledgerTimeRecordTimeTolerance.unwrap) + submitterInfo.externallySignedSubmission + .flatMap(_.maxRecordTimeO) + .map(CantonTimestamp.apply) + .map(_.min(maxSequencingTimeFromLET)) + .getOrElse(maxSequencingTimeFromLET) } ) @@ -916,6 +922,7 @@ class TransactionProcessingSteps( transactionEnricher, createNodeEnricher, logger, + messagePayloadLoggingEnabled, ) consistencyResultE = ContractConsistencyChecker diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala index 5a7a360eab2c..cfc9f85a5dce 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala @@ -79,6 +79,7 @@ class TransactionProcessor( packageResolver: PackageResolver, override val testingConfig: TestingConfigInternal, promiseFactory: PromiseUnlessShutdownFactory, + messagePayloadLoggingEnabled: Boolean, )(implicit val ec: ExecutionContext) extends ProtocolProcessor[ TransactionProcessingSteps.SubmissionParam, @@ -108,7 +109,7 @@ class TransactionProcessor( crypto, metrics, damle.enrichTransaction, - damle.enrichCreateNode, + damle.enrichContract, new AuthorizationValidator(participantId), new InternalConsistencyChecker( loggerFactory @@ -116,6 +117,7 @@ class TransactionProcessor( commandProgressTracker, loggerFactory, futureSupervisor, + messagePayloadLoggingEnabled, ), inFlightSubmissionSynchronizerTracker, ephemeral, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/AuthenticationValidator.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/AuthenticationValidator.scala index 4653ed877a45..4b22de7d7e4b 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/AuthenticationValidator.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/AuthenticationValidator.scala @@ -37,7 +37,8 @@ import com.digitalasset.canton.participant.protocol.validation.ModelConformanceC LazyAsyncReInterpretation, LazyAsyncReInterpretationMap, } -import com.digitalasset.canton.participant.util.DAMLe.{CreateNodeEnricher, TransactionEnricher} +import com.digitalasset.canton.participant.util.DAMLe.{ContractEnricher, TransactionEnricher} +import com.digitalasset.canton.protocol.hash.HashTracer import com.digitalasset.canton.protocol.{ExternalAuthorization, RequestId} import com.digitalasset.canton.topology.{ParticipantId, PhysicalSynchronizerId} import com.digitalasset.canton.tracing.TraceContext @@ -65,8 +66,9 @@ private[protocol] object AuthenticationValidator { reInterpretedTopLevelViewsEval: LazyAsyncReInterpretationMap, synchronizerId: PhysicalSynchronizerId, transactionEnricher: TransactionEnricher, - createNodeEnricher: CreateNodeEnricher, + createNodeEnricher: ContractEnricher, logger: TracedLogger, + messagePayloadLoggingEnabled: Boolean, )(implicit traceContext: TraceContext, executionContext: ExecutionContext, @@ -104,6 +106,7 @@ private[protocol] object AuthenticationValidator { transactionEnricher = transactionEnricher, createNodeEnricher = createNodeEnricher, logger = logger, + messagePayloadLoggingEnabled = messagePayloadLoggingEnabled, ) case None => // If we don't have the re-interpreted transaction for this view it's either a programming error @@ -266,9 +269,10 @@ private[protocol] object AuthenticationValidator { reInterpretationET: LazyAsyncReInterpretation, synchronizerId: PhysicalSynchronizerId, transactionEnricher: TransactionEnricher, - createNodeEnricher: CreateNodeEnricher, + createNodeEnricher: ContractEnricher, requestId: RequestId, logger: TracedLogger, + messagePayloadLoggingEnabled: Boolean, )(implicit traceContext: TraceContext, executionContext: ExecutionContext, @@ -289,6 +293,13 @@ private[protocol] object AuthenticationValidator { ) ) case Right(reInterpretedTopLevelView) => + // The trace contains detailed information about the transaction and is expensive to compute + // Only compute it if message payload logging is enabled and in debug level + val hashTracer = + if (messagePayloadLoggingEnabled && logger.underlying.isDebugEnabled) { + Some(HashTracer.StringHashTracer(traceSubNodes = true)) + } else None + reInterpretedTopLevelView .computeHash( externalAuthorization.hashingSchemeVersion, @@ -300,6 +311,7 @@ private[protocol] object AuthenticationValidator { protocolVersion, transactionEnricher, createNodeEnricher, + hashTracer.getOrElse[HashTracer](HashTracer.NoOp), ) // If Hash computation is successful, verify the signature is valid .flatMap { hash => @@ -308,7 +320,14 @@ private[protocol] object AuthenticationValidator { hash, externalAuthorization, submitterMetadata.actAs, - ).map(_.toLeft(Some(hash))) + ).map { + case error @ Some(_) => + hashTracer.map(_.result).foreach { trace => + logger.debug("Transaction hash computation trace:\n" + trace) + } + error + case None => None + }.map(_.toLeft(Some(hash))) ) } .map(res => diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala index 8e50b70d558f..2bfd2c4f9cc1 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/ModelConformanceChecker.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.participant.protocol.validation import cats.Eval import cats.data.EitherT -import cats.implicits.toFoldableOps +import cats.implicits.{toFoldableOps, toFunctorOps} import cats.syntax.alternative.* import cats.syntax.bifunctor.* import cats.syntax.parallel.* @@ -27,6 +27,7 @@ import com.digitalasset.canton.participant.protocol.validation.ModelConformanceC import com.digitalasset.canton.participant.store.ExtendedContractLookup import com.digitalasset.canton.participant.util.DAMLe import com.digitalasset.canton.participant.util.DAMLe.* +import com.digitalasset.canton.platform.store.dao.events.InputContractPackages import com.digitalasset.canton.protocol.* import com.digitalasset.canton.protocol.ContractIdAbsolutizer.{ ContractIdAbsolutizationDataV1, @@ -37,7 +38,7 @@ import com.digitalasset.canton.protocol.WellFormedTransaction.{ WithSuffixesAndMerged, WithoutSuffixes, } -import com.digitalasset.canton.protocol.hash.HashTracer.NoOp +import com.digitalasset.canton.protocol.hash.HashTracer import com.digitalasset.canton.sequencing.protocol.MediatorGroupRecipient import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.{ParticipantId, SynchronizerId} @@ -47,7 +48,6 @@ import com.digitalasset.canton.util.{ContractValidator, ErrorUtil, RoseTree} import com.digitalasset.canton.version.{HashingSchemeVersion, ProtocolVersion} import com.digitalasset.canton.{LfKeyResolver, LfPartyId, checked} import com.digitalasset.daml.lf.data.Ref.{CommandId, Identifier, PackageId, PackageName} -import com.digitalasset.daml.lf.transaction.{CreationTime, FatContractInstance} import java.util.UUID import scala.concurrent.ExecutionContext @@ -258,10 +258,7 @@ class ModelConformanceChecker( val seed = viewParticipantData.actionDescription.seedOption - val inputContracts = view.tryFlattenToParticipantViews - .flatMap(_.viewParticipantData.coreInputs) - .map { case (cid, InputContract(contract, _)) => cid -> contract } - .toMap + val inputContracts = view.inputContracts.fmap(_.contract) val contractAndKeyLookup = new ExtendedContractLookup(inputContracts, resolverFromView) @@ -465,27 +462,29 @@ object ModelConformanceChecker { synchronizerId: SynchronizerId, protocolVersion: ProtocolVersion, transactionEnricher: TransactionEnricher, - createNodeEnricher: CreateNodeEnricher, + contractEnricher: ContractEnricher, + hashTracer: HashTracer, )(implicit traceContext: TraceContext, ec: ExecutionContext, ): EitherT[FutureUnlessShutdown, String, Hash] = for { // Enrich the transaction... - enrichedTransaction <- transactionEnricher( - reInterpretationResult.transaction - )(traceContext) + enrichedTransaction <- transactionEnricher(reInterpretationResult.transaction)(traceContext) .leftMap(_.toString) + // ... and the input contracts so that labels and template identifiers are set and can be included in the hash - enrichedInputContracts <- viewInputContracts.toList - .parTraverse { case (cid, storedContract) => - createNodeEnricher(storedContract.toLf)(traceContext).map { enrichedNode => - cid -> FatContractInstance.fromCreateNode( - enrichedNode, - storedContract.inst.createdAt: CreationTime, - storedContract.inst.authenticationData, - ) - } + inputContracts <- EitherT.fromEither[FutureUnlessShutdown]( + InputContractPackages + .forTransactionWithContracts(enrichedTransaction.transaction, viewInputContracts) + .leftMap(mismatch => + s"The following input contract IDs were not found in both the transaction and the provided contracts: $mismatch" + ) + ) + + enrichedInputContracts <- inputContracts.toList + .parTraverse { case (cid, (inst, targetPackageIds)) => + contractEnricher((inst, targetPackageIds))(traceContext).map(cid -> _) } .map(_.toMap) .leftMap(_.toString) @@ -506,7 +505,7 @@ object ModelConformanceChecker { ), reInterpretationResult.metadata.seeds, protocolVersion, - hashTracer = NoOp, + hashTracer = hashTracer, ) .leftMap(_.message) ) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala index 536b5b17fd9b..d0eb543e5c84 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala @@ -221,6 +221,7 @@ class AcsCommitmentProcessor private ( batchingConfig: BatchingConfig, maxCommitmentSendDelayMillis: Option[NonNegativeInt], increasePerceivedComputationTimeForCommitments: Option[java.time.Duration], + doNotAwaitOnCheckingIncomingCommitments: Boolean, )(implicit ec: ExecutionContext) extends AcsChangeListener with FlagCloseable @@ -1036,7 +1037,7 @@ class AcsCommitmentProcessor private ( batch: Seq[OpenEnvelope[SignedProtocolMessage[AcsCommitment]]], )(implicit traceContext: TraceContext): HandlerResult = { - if (batch.lengthCompare(1) != 0) { + if (batch.sizeIs != 1) { Errors.InternalError .MultipleCommitmentsInBatch(psid.logical, timestamp, batch.length) .discard @@ -1342,8 +1343,8 @@ class AcsCommitmentProcessor private ( private def checkCommitment( commitment: AcsCommitment - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = - dbQueue + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { + val fut = dbQueue .executeUS( { // Make sure that the ready-for-remote check is atomic with buffering the commitment @@ -1360,6 +1361,15 @@ class AcsCommitmentProcessor private ( ) .flatten + if (doNotAwaitOnCheckingIncomingCommitments) { + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + fut, + s"check incoming commitment for ${commitment.period} by ${commitment.sender}", + ) + FutureUnlessShutdown.unit + } else fut + } + private def indicateReadyForRemote(timestamp: CantonTimestampSecond)(implicit traceContext: TraceContext ): Unit = { @@ -2214,6 +2224,7 @@ object AcsCommitmentProcessor extends HasLoggerName { batchingConfig: BatchingConfig, maxCommitmentSendDelayMillis: Option[NonNegativeInt] = None, increasePerceivedComputationTimeForCommitments: Option[java.time.Duration] = None, + doNotAwaitOnCheckingIncomingCommitments: Boolean, )(implicit ec: ExecutionContext, traceContext: TraceContext, @@ -2273,6 +2284,7 @@ object AcsCommitmentProcessor extends HasLoggerName { batchingConfig, maxCommitmentSendDelayMillis, increasePerceivedComputationTimeForCommitments, + doNotAwaitOnCheckingIncomingCommitments, ) // We trigger the processing of the buffered commitments, but we do not wait for it to complete here, // because, if processing buffered required topology updates that go through the same queue, we'd create a deadlock. diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala index 668aa51c5774..559ba216d57d 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala @@ -336,7 +336,7 @@ trait ActiveContractStore protected def getSynchronizerIndices( synchronizers: Seq[SynchronizerId] - ): CheckedT[FutureUnlessShutdown, AcsError, AcsWarning, Map[ + )(implicit traceContext: TraceContext): CheckedT[FutureUnlessShutdown, AcsError, AcsWarning, Map[ SynchronizerId, IndexedSynchronizer, ]] = diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/PruningOffsetServiceImpl.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/PruningOffsetServiceImpl.scala new file mode 100644 index 000000000000..13e9f5eaf975 --- /dev/null +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/PruningOffsetServiceImpl.scala @@ -0,0 +1,29 @@ +// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.store + +import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown +import com.digitalasset.canton.platform.store.PruningOffsetService +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.{ExecutionContext, Future} + +final case class PruningOffsetServiceImpl( + participantPruningStore: ParticipantPruningStore, + loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext) + extends PruningOffsetService + with NamedLogging { + + override def pruningOffset(implicit + traceContext: TraceContext + ): Future[Option[Offset]] = + participantPruningStore + .pruningStatus() + .map(_.startedO) + .failOnShutdownTo(AbortedDueToShutdown.Error().asGrpcError) + +} diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala index 5a5744b7f06a..f8b777314bd4 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala @@ -66,14 +66,14 @@ class DbReassignmentStore( private def indexedSynchronizerF[T[_]: SingletonTraverse]( synchronizerId: T[SynchronizerId] - ): FutureUnlessShutdown[T[IndexedSynchronizer]] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[T[IndexedSynchronizer]] = synchronizerId.traverseSingleton((_, synchronizerId) => IndexedSynchronizer.indexed(indexedStringStore)(synchronizerId) ) private def indexedSynchronizerET[E, T[_]: SingletonTraverse]( synchronizerId: T[SynchronizerId] - ): EitherT[FutureUnlessShutdown, E, T[IndexedSynchronizer]] = + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, E, T[IndexedSynchronizer]] = EitherT.right[E](indexedSynchronizerF(synchronizerId)) private def synchronizerIdF( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala index 63c735b44bd0..3b97beefc98a 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala @@ -224,7 +224,7 @@ class InMemoryActiveContractStore( reassignments: Seq[ (LfContractId, ReassignmentTag[SynchronizerId], ReassignmentCounter, TimeOfChange) ] - ): CheckedT[FutureUnlessShutdown, AcsError, AcsWarning, Seq[ + )(implicit traceContext: TraceContext): CheckedT[FutureUnlessShutdown, AcsError, AcsWarning, Seq[ (LfContractId, Int, ReassignmentCounter, TimeOfChange) ]] = { val synchronizers = reassignments.map { case (_, synchronizer, _, _) => diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/ConnectedSynchronizer.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/ConnectedSynchronizer.scala index 47741b5ba38f..656fd7d6c963 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/ConnectedSynchronizer.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/ConnectedSynchronizer.scala @@ -132,7 +132,7 @@ class ConnectedSynchronizer( private[sync] val persistent: SyncPersistentState, val ephemeral: SyncEphemeralState, val packageService: PackageService, - synchronizerCrypto: SynchronizerCryptoClient, + val synchronizerCrypto: SynchronizerCryptoClient, contractValidator: ContractValidator, identityPusher: ParticipantTopologyDispatcher, topologyProcessor: TopologyTransactionProcessor, @@ -221,6 +221,7 @@ class ConnectedSynchronizer( packageResolver = packageResolver, testingConfig = testingConfig, promiseUSFactory, + parameters.loggingConfig.api.messagePayloads, ) private val unassignmentProcessor: UnassignmentProcessor = new UnassignmentProcessor( @@ -1045,6 +1046,8 @@ object ConnectedSynchronizer { clock, exitOnFatalFailures = parameters.exitOnFatalFailures, parameters.batchingConfig, + doNotAwaitOnCheckingIncomingCommitments = + parameters.doNotAwaitOnCheckingIncomingCommitments, ) topologyProcessor <- topologyProcessorFactory.create( acsCommitmentProcessor.scheduleTopologyTick diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgrade.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgrade.scala index 677c8a00217e..da0065613b97 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgrade.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgrade.scala @@ -18,6 +18,10 @@ import com.digitalasset.canton.participant.store.{ } import com.digitalasset.canton.participant.sync.LogicalSynchronizerUpgrade.UpgradabilityCheckResult import com.digitalasset.canton.resource.DbExceptionRetryPolicy +import com.digitalasset.canton.topology.transaction.{ + SynchronizerUpgradeAnnouncement, + TopologyMapping, +} import com.digitalasset.canton.topology.{ KnownPhysicalSynchronizerId, PhysicalSynchronizerId, @@ -133,8 +137,48 @@ final class LogicalSynchronizerUpgrade( logger.info(s"Upgrade from $currentPSId to $successorPSId") + // Ensure upgraded is not attempted if announcement was revoked + def ensureUpgradeOngoing(): EitherT[FutureUnlessShutdown, String, Unit] = for { + topologyStore <- EitherT.fromOption[FutureUnlessShutdown]( + syncPersistentStateManager.get(currentPSId).map(_.topologyManager.store), + "Unable to find topology store", + ) + + announcements <- EitherT + .liftF( + topologyStore.findPositiveTransactions( + asOf = synchronizerSuccessor.upgradeTime, + asOfInclusive = false, + isProposal = false, + types = Seq(TopologyMapping.Code.SynchronizerUpgradeAnnouncement), + filterUid = None, + filterNamespace = None, + ) + ) + .map(_.collectOfMapping[SynchronizerUpgradeAnnouncement]) + .map(_.result.map(_.transaction.mapping)) + + _ <- announcements match { + case Seq() => EitherT.leftT[FutureUnlessShutdown, Unit]("No synchronizer upgrade ongoing") + case Seq(head) => + EitherT.cond[FutureUnlessShutdown]( + head.successor == synchronizerSuccessor, + (), + s"Expected synchronizer successor to be $synchronizerSuccessor but found ${head.successor} in topology state", + ) + case _more => + EitherT.liftF[FutureUnlessShutdown, String, Unit]( + FutureUnlessShutdown.failed( + new IllegalStateException("Found several SynchronizerUpgradeAnnouncement") + ) + ) + } + } yield () + performIfNotUpgradedYet(successorPSId)( for { + _ <- ensureUpgradeOngoing() + upgradabilityCheckResult <- EitherT[FutureUnlessShutdown, String, UpgradabilityCheckResult]( retryPolicy.unlessShutdown( upgradabilityCheck(alias, currentPSId, synchronizerSuccessor), diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgradeCallback.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgradeCallback.scala index 0850514dcb0c..876a2b713315 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgradeCallback.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/LogicalSynchronizerUpgradeCallback.scala @@ -4,13 +4,13 @@ package com.digitalasset.canton.participant.sync import com.digitalasset.canton.data.SynchronizerSuccessor -import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.time.SynchronizerTimeTracker import com.digitalasset.canton.topology.* import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureUnlessShutdownUtil -import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicReference import scala.concurrent.{ExecutionContext, Future} trait LogicalSynchronizerUpgradeCallback { @@ -22,6 +22,8 @@ trait LogicalSynchronizerUpgradeCallback { * - Successor is registered */ def registerCallback(successor: SynchronizerSuccessor)(implicit traceContext: TraceContext): Unit + + def unregisterCallback(): Unit } object LogicalSynchronizerUpgradeCallback { @@ -29,6 +31,8 @@ object LogicalSynchronizerUpgradeCallback { override def registerCallback(successor: SynchronizerSuccessor)(implicit traceContext: TraceContext ): Unit = () + + override def unregisterCallback(): Unit = () } } @@ -41,22 +45,37 @@ class LogicalSynchronizerUpgradeCallbackImpl( extends LogicalSynchronizerUpgradeCallback with NamedLogging { - private val registered: AtomicBoolean = new AtomicBoolean(false) + private val registered: AtomicReference[Option[SynchronizerSuccessor]] = new AtomicReference(None) def registerCallback( successor: SynchronizerSuccessor )(implicit traceContext: TraceContext): Unit = - if (registered.compareAndSet(false, true)) { - logger.info(s"Registering callback for upgrade of $psid to $successor") + if (registered.compareAndSet(None, Some(successor))) { + logger.info(s"Registering callback for upgrade of $psid to ${successor.psid}") synchronizerTimeTracker .awaitTick(successor.upgradeTime) .getOrElse(Future.unit) .foreach { _ => - synchronizerConnectionsManager.upgradeSynchronizerTo(psid, successor).discard + if (registered.get().contains(successor)) { + val upgradeResultF = synchronizerConnectionsManager + .upgradeSynchronizerTo(psid, successor) + .value + .map( + _.fold(err => logger.error(s"Upgrade to ${successor.psid} failed: $err"), _ => ()) + ) + + FutureUnlessShutdownUtil.doNotAwaitUnlessShutdown( + upgradeResultF, + s"Failed to upgrade to ${successor.psid}", + ) + } else + logger.info(s"Upgrade to ${successor.psid} was cancelled, not executing the upgrade.") } } else logger.info( - s"Not registering callback for upgrade of $psid to $successor because it was already done" + s"Not registering callback for upgrade of $psid to ${successor.psid} because it was already done" ) + + override def unregisterCallback(): Unit = registered.set(None) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/PartyAllocation.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/PartyAllocation.scala index 1bfe22483396..ee55f2cf8d5b 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/PartyAllocation.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/PartyAllocation.scala @@ -7,7 +7,7 @@ import cats.data.EitherT import cats.implicits.showInterpolator import cats.syntax.bifunctor.* import cats.syntax.either.* -import cats.syntax.traverse.* +import cats.syntax.parallel.* import com.digitalasset.canton.config.CantonRequireTypes.String255 import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.ledger.participant.state.* @@ -72,15 +72,18 @@ private[sync] class PartyAllocation( _ <- EitherT .cond[FutureUnlessShutdown](isActive(), (), SyncServiceError.Synchronous.PassiveNode) .leftWiden[SubmissionResult] - // External parties have their own namespace, local parties re-use the participant's namespace - namespace = externalPartyOnboardingDetails - .map(_.namespace) - .getOrElse(participantId.uid.namespace) - id <- UniqueIdentifier - .create(partyName, namespace) - .leftMap(SyncServiceError.Synchronous.internalError) - .toEitherT[FutureUnlessShutdown] - partyId = PartyId(id) + // External parties have their own namespace + partyId <- externalPartyOnboardingDetails + .map(_.partyId) + .map(EitherT.pure[FutureUnlessShutdown, SubmissionResult](_)) + .getOrElse { + UniqueIdentifier + // local parties re-use the participant's namespace + .create(partyName, participantId.uid.namespace) + .map(id => PartyId(id)) + .leftMap(SyncServiceError.Synchronous.internalError) + .toEitherT[FutureUnlessShutdown] + } validatedSubmissionId <- EitherT.fromEither[FutureUnlessShutdown]( String255 .fromProtoPrimitive(rawSubmissionId, "LedgerSubmissionId") @@ -97,16 +100,19 @@ private[sync] class PartyAllocation( .rpcStatus() ), ) - _ <- partyNotifier - .expectPartyAllocationForNodes( - partyId, - participantId, - validatedSubmissionId, - ) - .leftMap[SubmissionResult] { err => - reject(err, Some(Code.ABORTED)) - } - .toEitherT[FutureUnlessShutdown] + _ <- + if (externalPartyOnboardingDetails.forall(_.fullyAllocatesParty)) { + partyNotifier + .expectPartyAllocationForNodes( + partyId, + participantId, + validatedSubmissionId, + ) + .leftMap[SubmissionResult] { err => + reject(err, Some(Code.ABORTED)) + } + .toEitherT[FutureUnlessShutdown] + } else EitherT.pure[FutureUnlessShutdown, SubmissionResult](()) _ <- (externalPartyOnboardingDetails match { case Some(details) => partyOps.allocateExternalParty(participantId, details, synchronizerId) @@ -120,7 +126,7 @@ private[sync] class PartyAllocation( e.code.category.grpcCode, ) case IdentityManagerParentError(e) => reject(e.cause, e.code.category.grpcCode) - case e => reject(e.toString, Some(Code.INTERNAL)) + case e => reject(e.cause, e.code.category.grpcCode) } .leftMap { x => partyNotifier.expireExpectedPartyAllocationForNodes( @@ -133,17 +139,20 @@ private[sync] class PartyAllocation( // TODO(i25076) remove this waiting logic once topology events are published on the ledger api // wait for parties to be available on the currently connected synchronizers waitingSuccessful <- EitherT - .right[SubmissionResult](if (externalPartyOnboardingDetails.forall(!_.isMultiHosted)) { - connectedSynchronizersLookup.get(synchronizerId).traverse { connectedSynchronizer => - connectedSynchronizer.topologyClient - .awaitUS( - _.inspectKnownParties(partyId.filterString, participantId.filterString) - .map(_.nonEmpty), - timeouts.network.duration, - ) - .map(synchronizerId -> _) - } - } else FutureUnlessShutdown.pure(None)) + .right[SubmissionResult]( + if (externalPartyOnboardingDetails.forall(_.fullyAllocatesParty)) { + connectedSynchronizersLookup.snapshot.toSeq.parTraverse { + case (synchronizerId, connectedSynchronizer) => + connectedSynchronizer.topologyClient + .awaitUS( + _.inspectKnownParties(partyId.filterString, participantId.filterString) + .map(_.nonEmpty), + timeouts.network.duration, + ) + .map(synchronizerId -> _) + } + } else FutureUnlessShutdown.pure(Seq.empty) + ) _ = waitingSuccessful.foreach { case (synchronizerId, successful) => if (!successful) logger.warn( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncEphemeralStateFactory.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncEphemeralStateFactory.scala index e61e807d9d52..950dfec2f750 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncEphemeralStateFactory.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncEphemeralStateFactory.scala @@ -99,7 +99,7 @@ class SyncEphemeralStateFactoryImpl( of the successor a second time. */ synchronizerSuccessorO <- synchronizerCrypto.ips.currentSnapshotApproximation - .isSynchronizerUpgradeOngoing() + .synchronizerUpgradeOngoing() recordOrderPublisher = RecordOrderPublisher( persistentState.psid, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncPersistentStateManager.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncPersistentStateManager.scala index 6c46d1a53150..32ba7ea876db 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncPersistentStateManager.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncPersistentStateManager.scala @@ -200,12 +200,12 @@ class SyncPersistentStateManager( def getSynchronizerIdx( synchronizerId: SynchronizerId - ): FutureUnlessShutdown[IndexedSynchronizer] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[IndexedSynchronizer] = IndexedSynchronizer.indexed(this.indexedStringStore)(synchronizerId) def getPhysicalSynchronizerIdx( synchronizerId: PhysicalSynchronizerId - ): FutureUnlessShutdown[IndexedPhysicalSynchronizer] = + )(implicit traceContext: TraceContext): FutureUnlessShutdown[IndexedPhysicalSynchronizer] = IndexedPhysicalSynchronizer.indexed(this.indexedStringStore)(synchronizerId) /** Retrieves the [[com.digitalasset.canton.participant.store.SyncPersistentState]] from the diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/grpc/GrpcSynchronizerRegistry.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/grpc/GrpcSynchronizerRegistry.scala index f74993b34ecf..82d6434429a5 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/grpc/GrpcSynchronizerRegistry.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/synchronizer/grpc/GrpcSynchronizerRegistry.scala @@ -152,6 +152,13 @@ class GrpcSynchronizerRegistry( val sequencerConnections: SequencerConnections = config.sequencerConnections + val useNewConnectionPool = participantNodeParameters.sequencerClient.useNewConnectionPool + + val synchronizerLoggerFactory = loggerFactory.append( + "synchronizerId", + config.synchronizerId.map(_.toString).getOrElse(config.synchronizerAlias.toString), + ) + val connectionPoolFactory = new GrpcSequencerConnectionXPoolFactory( clientProtocolVersions = ProtocolVersionCompatibility.supportedProtocols(participantNodeParameters), @@ -163,7 +170,7 @@ class GrpcSynchronizerRegistry( seedForRandomnessO = testingConfig.sequencerTransportSeed, futureSupervisor = futureSupervisor, timeouts = timeouts, - loggerFactory = loggerFactory, + loggerFactory = synchronizerLoggerFactory, ) val connectionPoolE = connectionPoolFactory @@ -171,13 +178,12 @@ class GrpcSynchronizerRegistry( sequencerConnections = config.sequencerConnections, expectedPSIdO = config.synchronizerId, tracingConfig = participantNodeParameters.tracing, + name = if (useNewConnectionPool) "main" else "dummy", ) .leftMap[SynchronizerRegistryError](error => SynchronizerRegistryError.SynchronizerRegistryInternalError.InvalidState(error.toString) ) - val useNewConnectionPool = participantNodeParameters.sequencerClient.useNewConnectionPool - val runE = for { connectionPool <- connectionPoolE.toEitherT[FutureUnlessShutdown] _ <- diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala index c42bc93ed450..b152494e9f68 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifier.scala @@ -155,7 +155,7 @@ class LedgerServerPartyNotifier( ) } // propagate admin parties - case SynchronizerTrustCertificate(participantId, _) => + case SynchronizerTrustCertificate(participantId, _, _) => Seq( ( participantId.adminParty, @@ -217,6 +217,11 @@ class LedgerServerPartyNotifier( logger.debug( s"Not applying duplicate party metadata update with submission ID $submissionId" ) + // It is normally removed after we've stored the new metadata it into the DB, + // but since there's nothing to store in this case, it won't happen, so remove it now + update.participantId.foreach(pid => + pendingAllocationData.remove((update.partyId, pid)).discard + ) None } } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala index 71037be0768c..298a30c9c63d 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala @@ -259,7 +259,7 @@ class PackageOpsImpl( currentlyVettedPackages = currentPackages.map(_.packageId).toSet, nextPackageIds = newAllPackages.map(_.packageId).toSet, dryRunSnapshot = dryRunSnapshot, - forceFlags = ForceFlags(ForceFlag.AllowUnvetPackage), + forceFlags = ForceFlags.none, ) .leftMap[ParticipantTopologyManagerError](IdentityManagerParentError(_)) .map(_ => Option.empty[Set[VettedPackage]]) @@ -272,7 +272,7 @@ class PackageOpsImpl( currentPackages, newAllPackages, currentSerial, - ForceFlags(ForceFlag.AllowUnvetPackage), + ForceFlags.none, ) } // only synchronize with the connected synchronizers if a new VettedPackages transaction was actually issued diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala index 184c3bcbbf75..05e64c2dee06 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala @@ -34,6 +34,7 @@ import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.Ge import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.* import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.version.ParticipantProtocolFeatureFlags import scala.collection.concurrent.TrieMap import scala.concurrent.duration.* @@ -157,8 +158,12 @@ class ParticipantTopologyDispatcher( traceContext: TraceContext ): EitherT[FutureUnlessShutdown, SynchronizerRegistryError, Unit] = { val logicalSynchronizerId = synchronizerId.logical + val featureFlagsForPV = ParticipantProtocolFeatureFlags.supportedFeatureFlagsByPV.getOrElse( + synchronizerId.protocolVersion, + Set.empty, + ) - def alreadyTrustedInStore( + def alreadyTrustedInStoreWithSupportedFeatures( store: TopologyStore[?] ): EitherT[FutureUnlessShutdown, SynchronizerRegistryError, Boolean] = EitherT.right( @@ -173,7 +178,13 @@ class ParticipantTopologyDispatcher( filterNamespace = None, ) .map(_.toTopologyState.exists { - case SynchronizerTrustCertificate(`participantId`, `logicalSynchronizerId`) => true + // If certificate is missing feature flags, re-issue the trust certificate with it + case SynchronizerTrustCertificate( + `participantId`, + `logicalSynchronizerId`, + featureFlags, + ) => + featureFlagsForPV.diff(featureFlags.toSet).isEmpty case _ => false }) ) @@ -182,14 +193,17 @@ class ParticipantTopologyDispatcher( def trustSynchronizer( state: SyncPersistentState ): EitherT[FutureUnlessShutdown, SynchronizerRegistryError, Unit] = - synchronizeWithClosing(functionFullName) { - MonadUtil.unlessM(alreadyTrustedInStore(manager.store)) { + MonadUtil.unlessM( + alreadyTrustedInStoreWithSupportedFeatures(manager.store) + ) { + synchronizeWithClosing(functionFullName) { manager .proposeAndAuthorize( TopologyChangeOp.Replace, SynchronizerTrustCertificate( participantId, logicalSynchronizerId, + featureFlagsForPV.toSeq, ), serial = None, signingKeys = Seq.empty, @@ -204,9 +218,12 @@ class ParticipantTopologyDispatcher( ) } } + // check if cert already exists in the synchronizer store getState(synchronizerId).flatMap(state => - MonadUtil.unlessM(alreadyTrustedInStore(state.topologyStore))( + MonadUtil.unlessM( + alreadyTrustedInStoreWithSupportedFeatures(state.topologyStore) + )( trustSynchronizer(state) ) ) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PartyOps.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PartyOps.scala index b171713e551a..fccda6813a34 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PartyOps.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PartyOps.scala @@ -4,13 +4,18 @@ package com.digitalasset.canton.participant.topology import cats.data.EitherT +import cats.syntax.bifunctor.* import com.daml.nonempty.NonEmpty +import com.digitalasset.base.error.{ErrorCategory, ErrorCode, Explanation, Resolution} import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.error.* import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.participant.topology.ParticipantTopologyManagerError.IdentityManagerParentError +import com.digitalasset.canton.participant.topology.ParticipantTopologyManagerError.{ + ExternalPartyAlreadyExists, + IdentityManagerParentError, +} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.TopologyManagerError.{ InconsistentTopologySnapshot, @@ -151,28 +156,78 @@ class PartyOps( TopologyManagerError.TopologyStoreUnknown.Failure(SynchronizerStore(synchronizerId)) ), ) + // If the party already has a fully authorized P2P mapping, then it is allocated. + // Since this function only supports allocation of fresh parties, we fail here. + // Futher changes to the party topology should be handled via the admin API for now, + // or through party replication for hosting relationship updates. + // We can't rely on the topology manager failing with a MappingAlreadyExists here, + // because the "add" method simply ignores duplicate transactions. + // This is actually useful for us as it allows this endpoint to accept the same set of onboarding + // transactions being submitted on all hosting nodes and makes multi-hosted party onboarding easier from a client + // app. + // However we still want to fail if the party is already allocated, hence this check. + existingAuthorizedP2Ps <- EitherT + .right( + topologyManager.store.findPositiveTransactions( + asOf = CantonTimestamp.MaxValue, + asOfInclusive = false, + isProposal = false, + types = Seq(PartyToParticipant.code), + filterUid = Some(NonEmpty(Seq, externalPartyOnboardingDetails.partyId.uid)), + filterNamespace = None, + ) + ) + _ <- EitherT + .cond[FutureUnlessShutdown]( + existingAuthorizedP2Ps.result.isEmpty, + (), + ExternalPartyAlreadyExists.Failure( + externalPartyOnboardingDetails.partyId, + synchronizerId.logical, + ), + ) + .leftWiden[ParticipantTopologyManagerError] // Sign the party to participant tx with this participant - // Validation that this participant is a hosting should already be done in ExternalPartyOnboardingDetails + // Validation that this participant is a hosting node should already be done in ExternalPartyOnboardingDetails // If somehow that's not done, authorization will fail in the topology manager - partyToParticipantSigned <- topologyManager - .extendSignature( - externalPartyOnboardingDetails.signedPartyToParticipantTransaction, - Seq(participantId.fingerprint), - ForceFlags.none, - ) - .leftMap(IdentityManagerParentError(_): ParticipantTopologyManagerError) - // Add all 3 transactions at once + partyToParticipantSignedO <- + externalPartyOnboardingDetails.optionallySignedPartyToParticipant match { + // If it's already signed, extend the signature + case ExternalPartyOnboardingDetails.SignedPartyToParticipant(signed) => + topologyManager + .extendSignature( + signed, + Seq(participantId.fingerprint), + ForceFlags.none, + ) + .map(Some(_)) + .leftMap(IdentityManagerParentError(_): ParticipantTopologyManagerError) + case ExternalPartyOnboardingDetails.UnsignedPartyToParticipant(unsigned) => + // Otherwise add the mapping as a proposal + topologyManager + .proposeAndAuthorize( + op = TopologyChangeOp.Replace, + mapping = unsigned.mapping, + serial = Some(unsigned.serial), + signingKeys = Seq(participantId.fingerprint), + protocolVersion = synchronizerId.protocolVersion, + expectFullAuthorization = false, + waitToBecomeEffective = None, + ) + .map(_ => None) + .leftMap(IdentityManagerParentError(_): ParticipantTopologyManagerError) + } + // Add all transactions at once _ <- topologyManager .add( - Seq( - externalPartyOnboardingDetails.signedNamespaceTransaction.signedTransaction, + externalPartyOnboardingDetails.partyNamespace.toList + .flatMap(_.signedTransactions) ++ Seq( externalPartyOnboardingDetails.signedPartyToKeyMappingTransaction, - partyToParticipantSigned, - ), + partyToParticipantSignedO, + ).flatten, ForceFlags.none, - // Should be fully authorized only if the party is not multi hosted - expectFullAuthorization = !externalPartyOnboardingDetails.isMultiHosted, + expectFullAuthorization = externalPartyOnboardingDetails.fullyAllocatesParty, ) .leftMap(IdentityManagerParentError(_): ParticipantTopologyManagerError) } yield () @@ -189,4 +244,24 @@ object ParticipantTopologyManagerError extends ParticipantErrorGroup { override def logOnCreation: Boolean = false } + @Explanation( + """This error occurs when a request to allocate an external party is made for a party that already exists.""" + ) + @Resolution( + """Allocate a new party with unique keys. If you're trying to change the hosting nodes of the party, + follow the party replication procedure instead.""" + ) + object ExternalPartyAlreadyExists + extends ErrorCode( + id = "EXTERNAL_PARTY_ALREADY_EXISTS", + ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, + ) { + final case class Failure(partyId: PartyId, synchronizerId: SynchronizerId)(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"Party $partyId already exists on synchronizer $synchronizerId" + ) + with ParticipantTopologyManagerError + } + } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/SequencerConnectionSuccessorListener.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/SequencerConnectionSuccessorListener.scala index f597159a7c88..98592d109368 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/SequencerConnectionSuccessorListener.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/SequencerConnectionSuccessorListener.scala @@ -90,7 +90,7 @@ class SequencerConnectionSuccessorListener( }.toMap configuredSequencerIds = configuredSequencers.keySet - (synchronizerUpgradeOngoing, _) <- OptionT(snapshot.isSynchronizerUpgradeOngoing()) + (synchronizerUpgradeOngoing, _) <- OptionT(snapshot.synchronizerUpgradeOngoing()) SynchronizerSuccessor(successorPSId, upgradeTime) = synchronizerUpgradeOngoing _ = logger.debug( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala index fef6bba367e0..af897a31763c 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala @@ -13,7 +13,8 @@ import com.digitalasset.canton.logging.{LoggingContextUtil, NamedLoggerFactory, import com.digitalasset.canton.participant.protocol.EngineController.GetEngineAbortStatus import com.digitalasset.canton.participant.store.ContractAndKeyLookup import com.digitalasset.canton.participant.util.DAMLe.{ - CreateNodeEnricher, + ContractEnricher, + EnrichmentError, HasReinterpret, PackageResolver, ReInterpretationResult, @@ -24,7 +25,7 @@ import com.digitalasset.canton.platform.apiserver.execution.ContractAuthenticato import com.digitalasset.canton.protocol.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.Thereafter.syntax.ThereafterOps -import com.digitalasset.canton.{LfCommand, LfCreateCommand, LfKeyResolver, LfPartyId} +import com.digitalasset.canton.{LfCommand, LfCreateCommand, LfKeyResolver, LfPackageId, LfPartyId} import com.digitalasset.daml.lf.VersionRange import com.digitalasset.daml.lf.data.Ref.{PackageId, PackageName} import com.digitalasset.daml.lf.data.{ImmArray, Ref, Time} @@ -34,7 +35,7 @@ import com.digitalasset.daml.lf.interpretation.Error as LfInterpretationError import com.digitalasset.daml.lf.language.Ast.Package import com.digitalasset.daml.lf.language.LanguageVersion import com.digitalasset.daml.lf.language.LanguageVersion.v2_dev -import com.digitalasset.daml.lf.transaction.ContractKeyUniquenessMode +import com.digitalasset.daml.lf.transaction.{ContractKeyUniquenessMode, FatContractInstance} import java.nio.file.Path import scala.annotation.tailrec @@ -88,13 +89,13 @@ object DAMLe { * validation. */ type PackageResolver = PackageId => TraceContext => FutureUnlessShutdown[Option[Package]] - private type Enricher[A] = A => TraceContext => EitherT[ + private type Enricher[I, O] = I => TraceContext => EitherT[ FutureUnlessShutdown, ReinterpretationError, - A, + O, ] - type TransactionEnricher = Enricher[LfVersionedTransaction] - type CreateNodeEnricher = Enricher[LfNodeCreate] + type TransactionEnricher = Enricher[LfVersionedTransaction, LfVersionedTransaction] + type ContractEnricher = Enricher[(FatContractInstance, Set[LfPackageId]), FatContractInstance] sealed trait ReinterpretationError extends PrettyPrinting @@ -108,6 +109,10 @@ object DAMLe { ) } + final case class EnrichmentError(reason: String) extends ReinterpretationError { + override protected def pretty: Pretty[EnrichmentError] = adHocPrettyInstance + } + private val zeroSeed: LfHash = LfHash.assertFromByteArray(new Array[Byte](LfHash.underlyingHashLength)) @@ -180,8 +185,15 @@ class DAMLe( /** Enrich create node values by re-hydrating record labels and identifiers */ - val enrichCreateNode: CreateNodeEnricher = { createNode => implicit traceContext => - EitherT.liftF(interactiveSubmissionEnricher.enrichCreateNode(createNode)) + val enrichContract: ContractEnricher = { case (createNode, targetPackageIds) => + implicit traceContext => + interactiveSubmissionEnricher + .enrichContract(createNode, targetPackageIds) + .leftFlatMap(err => + EitherT.leftT[FutureUnlessShutdown, FatContractInstance]( + EnrichmentError(err): ReinterpretationError + ) + ) } override def reinterpret( diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala index ac77a88558ea..0a9301903117 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala @@ -176,7 +176,7 @@ class PackageOpsTest extends PackageOpsTestBase { import env.* arrangeCurrentlyVetted(List(pkgId1)) - expectNewVettingState(List(pkgId1, pkgId2), allowUnvetting = false) + expectNewVettingState(List(pkgId1, pkgId2)) packageOps .vetPackages(Seq(pkgId1, pkgId2), PackageVettingSynchronization.NoSync, psid) .value @@ -219,7 +219,7 @@ class PackageOpsTest extends PackageOpsTestBase { import env.* arrangeCurrentlyVetted(List(pkgId1, pkgId2, pkgId3)) - expectNewVettingState(List(pkgId3), allowUnvetting = true) + expectNewVettingState(List(pkgId3)) val str = String255.tryCreate("DAR descriptor") packageOps .revokeVettingForPackages( @@ -227,7 +227,7 @@ class PackageOpsTest extends PackageOpsTestBase { List(pkgId1, pkgId2), DarDescription(mainPackageId, str, str, str), psid, - ForceFlags(ForceFlag.AllowUnvetPackage), + ForceFlags.none, ) .value .unwrap @@ -248,7 +248,7 @@ class PackageOpsTest extends PackageOpsTestBase { List(pkgId3), DarDescription(mainPackageId, str, str, str), psid, - ForceFlags(ForceFlag.AllowUnvetPackage), + ForceFlags.none, ) .value .unwrap @@ -305,7 +305,7 @@ class PackageOpsTest extends PackageOpsTestBase { )(anyTraceContext) ).thenReturn(FutureUnlessShutdown.pure(packagesVettedStoredTx(currentlyVettedPackages))) - def expectNewVettingState(newVettedPackagesState: List[LfPackageId], allowUnvetting: Boolean) = + def expectNewVettingState(newVettedPackagesState: List[LfPackageId]) = when( topologyManager.proposeAndAuthorize( eqTo(TopologyChangeOp.Replace), @@ -319,8 +319,7 @@ class PackageOpsTest extends PackageOpsTestBase { eqTo(Seq.empty), eqTo(testedProtocolVersion), eqTo(true), - if (allowUnvetting) eqTo(ForceFlags(ForceFlag.AllowUnvetPackage)) - else eqTo(ForceFlags.none), + eqTo(ForceFlags.none), any[Option[NonNegativeFiniteDuration]], )(anyTraceContext) ).thenReturn( diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala index 33bb6cd79fba..dcaca878b05c 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala @@ -381,6 +381,7 @@ sealed trait AcsCommitmentProcessorBaseTest increasePerceivedComputationTimeForCommitments = Option.when( increasePerceivedComputationTimeForCommitments )(interval.duration.multipliedBy(2)), + doNotAwaitOnCheckingIncomingCommitments = false, ) (acsCommitmentProcessor, store, sequencerClient, changes, acsCommitmentConfigStore) } diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockChunkProcessor.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockChunkProcessor.scala index 3c2a4390250a..eb8c3efbe6d0 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockChunkProcessor.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/BlockChunkProcessor.scala @@ -482,7 +482,7 @@ final class BlockChunkProcessor( warnIfApproximate = false, ) synchronizerSuccessorO <- snapshot.ipsSnapshot - .isSynchronizerUpgradeOngoing() + .synchronizerUpgradeOngoing() .map(_.map { case (successor, _) => successor }) allAcknowledgements = fixedTsChanges.collect { case (_, t @ Traced(Acknowledgment(_, ack))) => t.map(_ => ack) diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/SubmissionRequestValidator.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/SubmissionRequestValidator.scala index 9573670c0e23..c864bd641da0 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/SubmissionRequestValidator.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/block/update/SubmissionRequestValidator.scala @@ -236,7 +236,8 @@ private[update] final class SubmissionRequestValidator( traceContext: TraceContext, ): EitherT[FutureUnlessShutdown, SubmissionOutcome, Map[GroupRecipient, Set[Member]]] = { val groupRecipients = submissionRequest.batch.allRecipients.collect { - case group: GroupRecipient => + // Note: we don't resolve AllMembersOfSynchronizer as it is encoded as -1 and handled internally by db sequencer + case group: GroupRecipient if group != AllMembersOfSynchronizer => group } @@ -614,7 +615,9 @@ private[update] final class SubmissionRequestValidator( // // See https://github.com/DACH-NY/canton/pull/17676#discussion_r1515926774 sequencerEventTimestamp = - Option.when(isThisSequencerAddressed(groupToMembers))(sequencingTimestamp) + Option.when(isThisSequencerAddressed(groupToMembers, submissionRequest))( + sequencingTimestamp + ) } yield SubmissionRequestValidationResult( inFlightAggregations, @@ -785,13 +788,17 @@ private[update] final class SubmissionRequestValidator( // after being deactivated in the Canton topology, specifically until the underlying consensus algorithm // allows them to be also removed from the BFT ordering topology), but they should not be considered addressed, // since they are not active in the Canton topology anymore (i.e., group recipients don't include them). - private def isThisSequencerAddressed(groupToMembers: Map[GroupRecipient, Set[Member]]): Boolean = + private def isThisSequencerAddressed( + groupToMembers: Map[GroupRecipient, Set[Member]], + submissionRequest: SubmissionRequest, + ): Boolean = groupToMembers .get(AllMembersOfSynchronizer) .exists(_.contains(sequencerId)) || groupToMembers .get(SequencersOfSynchronizer) - .exists(_.contains(sequencerId)) + .exists(_.contains(sequencerId)) || + submissionRequest.batch.isBroadcast } private[update] object SubmissionRequestValidator { diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNode.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNode.scala index 9c367db096cc..85370e550e71 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNode.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/mediator/MediatorNode.scala @@ -575,7 +575,7 @@ class MediatorNodeBootstrap( seedForRandomnessO = arguments.testingConfig.sequencerTransportSeed, futureSupervisor = futureSupervisor, timeouts = timeouts, - loggerFactory = loggerFactory, + loggerFactory = synchronizerLoggerFactory, ) val useNewConnectionPool = parameters.sequencerClient.useNewConnectionPool @@ -682,6 +682,7 @@ class MediatorNodeBootstrap( sequencerConnections = info.sequencerConnections, expectedPSIdO = None, tracingConfig = parameters.tracing, + name = "dummy", ) .leftMap(error => error.toString) ) @@ -721,9 +722,11 @@ class MediatorNodeBootstrap( ), sequencerClientFactory, sequencerInfoLoader, + connectionPoolFactory, synchronizerAlias, synchronizerId, sequencerClient, + parameters.tracing, loggerFactory, ) diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/EventSignaller.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/EventSignaller.scala index 66dd09c73cbe..ec95e9d5cc61 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/EventSignaller.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/EventSignaller.scala @@ -16,14 +16,14 @@ import scala.concurrent.Future /** Who gets notified that a event has been written */ sealed trait WriteNotification { def union(notification: WriteNotification): WriteNotification - def includes(memberId: SequencerMemberId): Boolean + def isBroadcastOrIncludes(memberId: SequencerMemberId): Boolean } object WriteNotification { case object None extends WriteNotification { override def union(notification: WriteNotification): WriteNotification = notification - override def includes(memberId: SequencerMemberId): Boolean = false + override def isBroadcastOrIncludes(memberId: SequencerMemberId): Boolean = false } final case class Members(memberIds: SortedSet[SequencerMemberId]) extends WriteNotification { override def union(notification: WriteNotification): WriteNotification = @@ -32,7 +32,8 @@ object WriteNotification { case None => this } - override def includes(memberId: SequencerMemberId): Boolean = memberIds.contains(memberId) + override def isBroadcastOrIncludes(memberId: SequencerMemberId): Boolean = + memberIds.contains(memberId) || memberIds.contains(SequencerMemberId.Broadcast) override def toString: String = s"Members(${memberIds.map(_.unwrap).mkString(",")})" } diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/LocalSequencerStateEventSignaller.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/LocalSequencerStateEventSignaller.scala index a29a91ac8491..cfd2205617ab 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/LocalSequencerStateEventSignaller.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/LocalSequencerStateEventSignaller.scala @@ -68,9 +68,9 @@ class LocalSequencerStateEventSignaller( member: Member, memberId: SequencerMemberId, )(implicit traceContext: TraceContext): Source[ReadSignal, NotUsed] = { - logger.debug(s"Creating signal source for $member") + logger.info(s"Creating signal source for $member") notificationsHubSource - .filter(_.includes(memberId)) + .filter(_.isBroadcastOrIncludes(memberId)) .map(_ => ReadSignal) // this conflate ensures that a slow consumer doesn't cause backpressure and therefore // block the stream of signals for other consumers diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerConfig.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerConfig.scala index eb026f140499..4c735601c94f 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerConfig.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerConfig.scala @@ -287,6 +287,7 @@ object BlockSequencerConfig { confirmationResponse: IndividualCircuitBreakerConfig = default3, verdict: IndividualCircuitBreakerConfig = default3, acknowledgement: IndividualCircuitBreakerConfig = default1, + unexpected: IndividualCircuitBreakerConfig = default1, ) extends UniformCantonConfigValidation object CircuitBreakerByMessageTypeConfig { implicit val circuitBreakerByMessageTypeConfigValidator diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReader.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReader.scala index bbcf18115b58..1b010fcdad52 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReader.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReader.scala @@ -188,6 +188,9 @@ class SequencerReader( _ = logger.debug( s"Current safe watermark is $safeWatermarkTimestampO" ) + _ = logger.debug( + s"Member $member was registered at ${registeredMember.registeredFrom}" + ) // It can happen that a member switching between sequencers runs into a sequencer that is catching up. // In this situation, the sequencer has to wait for the watermark to catch up to the requested timestamp. @@ -292,10 +295,10 @@ class SequencerReader( // This is a "reading watermark" meaning that "we have read up to and including this timestamp", // so if we want to grab the event exactly at timestampInclusive, we do -1 here nextReadTimestamp = readFromTimestampInclusive + .map(_.immediatePredecessor) .getOrElse( registeredMember.registeredFrom - ) - .immediatePredecessor, + ), nextPreviousEventTimestamp = previousEventTimestamp, latestTopologyClientRecipientTimestamp = latestTopologyClientRecipientTimestampO, ) diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerRuntime.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerRuntime.scala index 8fad460f03cf..386afe40dcc4 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerRuntime.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerRuntime.scala @@ -431,7 +431,7 @@ class SequencerRuntime( .getOrElse(EitherT.rightT[FutureUnlessShutdown, String](())) // Note: we use head snapshot as we want the latest announced upgrade anyway, an overlapping update is idempotent synchronizerUpgradeO <- EitherT.right( - topologyClient.headSnapshot.isSynchronizerUpgradeOngoing() + topologyClient.headSnapshot.synchronizerUpgradeOngoing() ) } yield { synchronizerUpgradeO.foreach { case (successor, effectiveTime) => diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSource.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSource.scala index d08bdd642c4e..e6352e4ae9e3 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSource.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSource.scala @@ -388,6 +388,11 @@ class SendEventGenerator( } def deliver(recipientIds: Set[SequencerMemberId]): StoreEvent[BytesPayload] = { + val finalRecipientIds = if (submission.batch.isBroadcast) { + Set(SequencerMemberId.Broadcast) + } else { + recipientIds + } val payload = BytesPayload( submissionOrOutcome.fold( @@ -400,7 +405,7 @@ class SendEventGenerator( DeliverStoreEvent.ensureSenderReceivesEvent( senderId, submission.messageId, - recipientIds, + finalRecipientIds, payload, submission.topologyTimestamp, trafficReceiptO, diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencer.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencer.scala index e43b77404aa6..da2d41ca807d 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencer.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencer.scala @@ -358,11 +358,8 @@ class BlockSequencer( ) for { - _ <- rejectSubmissionsBeforeOrAtSequencingTimeLowerBound() - _ <- - if (submission.isConfirmationRequest) rejectSubmissionsIfOverloaded(submission) - else EitherT.rightT[FutureUnlessShutdown, SequencerDeliverError](()) + _ <- rejectSubmissionsIfOverloaded(submission) // TODO(i17584): revisit the consequences of no longer enforcing that // aggregated submissions with signed envelopes define a topology snapshot _ <- validateMaxSequencingTime(submission) @@ -619,14 +616,11 @@ class BlockSequencer( _ = logger.trace(s"Storage active: ${storage.isActive}") } yield { if (!ledgerStatus.isActive) SequencerHealthStatus(isActive = false, ledgerStatus.description) - else if (!isStorageActive) - SequencerHealthStatus(isActive = false, Some("Can't connect to database")) - else if (circuitBreaker.shouldRejectRequests(SubmissionRequestType.ConfirmationRequest)) + else SequencerHealthStatus( - isActive = false, - Some("Overloaded. Can't receive requests at the moment"), + isStorageActive, + if (isStorageActive) None else Some("Can't connect to database"), ) - else SequencerHealthStatus(isActive = true, None) } override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = { diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerCircuitBreaker.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerCircuitBreaker.scala index f5472572530c..3cc6b398c5e1 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerCircuitBreaker.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/block/BlockSequencerCircuitBreaker.scala @@ -69,8 +69,14 @@ class BlockSequencerCircuitBreaker( previousTimestamp.set(timestamp) } - def shouldRejectRequests(submissionRequestType: SubmissionRequestType): Boolean = - enabled.get() && pekkoCircuitBreakers.get(submissionRequestType).forall(_.shouldRejectRequests) + def shouldRejectRequests(submissionRequestType: SubmissionRequestType): Boolean = { + val subTypeKey = submissionRequestType match { + case SubmissionRequestType.Unexpected(_) => + BlockSequencerCircuitBreaker.unexpectedSubmissionRequestTypeKey + case x => x + } + enabled.get() && pekkoCircuitBreakers.get(subTypeKey).forall(_.shouldRejectRequests) + } def shouldRejectRequests(submissionRequest: SubmissionRequest): Boolean = shouldRejectRequests(submissionRequest.requestType) @@ -94,6 +100,7 @@ class BlockSequencerCircuitBreaker( messages.topUp -> "top up", messages.topology -> "topology", messages.timeProof -> "time proof", + messages.unexpected -> "unexpected", messages.acknowledgement -> "acknowledgment", ).groupBy(_._1).map { case (config, group) => val messageNames = group.map(_._2) @@ -115,6 +122,7 @@ class BlockSequencerCircuitBreaker( SubmissionRequestType.TopUpMed -> messages.topUp, SubmissionRequestType.TopologyTransaction -> messages.topology, SubmissionRequestType.TimeProof -> messages.timeProof, + BlockSequencerCircuitBreaker.unexpectedSubmissionRequestTypeKey -> messages.unexpected, ).fmap(configToCircuitBreaker(_)), ) } @@ -122,6 +130,7 @@ class BlockSequencerCircuitBreaker( } object BlockSequencerCircuitBreaker { + private val unexpectedSubmissionRequestTypeKey = SubmissionRequestType.Unexpected("unexpected") class IndividualCircuitBreaker( config: IndividualCircuitBreakerConfig, diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/DbSequencerStore.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/DbSequencerStore.scala index 66d820f3a3a7..b2df5dc5b6ae 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/DbSequencerStore.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/DbSequencerStore.scala @@ -669,7 +669,9 @@ class DbSequencerStore( recipientRows = eventRows.forgetNE.flatMap { row => row.recipientsO.toList.flatMap { members => val isTopologyEvent = - members.contains(sequencerMemberId) && members.sizeIs > 1 + (members.contains(sequencerMemberId) && members.sizeIs > 1) || members.contains( + SequencerMemberId.Broadcast + ) members.map(m => (row.instanceIndex, m, row.timestamp, isTopologyEvent)) } } @@ -969,7 +971,7 @@ class DbSequencerStore( watermarks as (select * from sequencer_watermarks) select events.ts, events.node_index, events.event_type, events.message_id, events.sender, case - when #$memberContainsBefore $topologyClientMemberId #$memberContainsAfter then true + when #$memberContainsBefore $topologyClientMemberId #$memberContainsAfter or #$memberContainsBefore ${SequencerMemberId.Broadcast} #$memberContainsAfter then true else false end as addressed_to_sequencer, events.payload_id, events.topology_timestamp, @@ -982,25 +984,39 @@ class DbSequencerStore( -- (scanning a wrong index or the table itself). select * from sequencer_events where ts in ( - select ts + (select ts + from sequencer_event_recipients recipients + where + recipients.node_index = watermarks.node_index + -- if the sequencer that produced the event is offline, only consider up until its offline watermark + and (watermarks.sequencer_online = true or recipients.ts <= watermarks.watermark_ts) + and (recipients.recipient_id = $memberId) + -- inclusive timestamp bound that defaults to MinValue if unset + and recipients.ts >= $fromTimestampInclusive + -- only consider events within the safe watermark + and recipients.ts <= $safeWatermark + order by recipients.ts asc + limit $limit) + union + (select ts from sequencer_event_recipients recipients where recipients.node_index = watermarks.node_index -- if the sequencer that produced the event is offline, only consider up until its offline watermark and (watermarks.sequencer_online = true or recipients.ts <= watermarks.watermark_ts) - and recipients.recipient_id = $memberId + and (recipients.recipient_id = ${SequencerMemberId.Broadcast}) -- inclusive timestamp bound that defaults to MinValue if unset and recipients.ts >= $fromTimestampInclusive -- only consider events within the safe watermark and recipients.ts <= $safeWatermark order by recipients.ts asc - -- We only have limit on the inner query. We can add an extra limit outside (for DB sequencer case), - -- but it doesn't make sense to drop already read events and it seems reasonable to just return them. - limit $limit + limit $limit) ) ) events on (true) - order by events.ts asc""".as[Sequenced[PayloadId]]( + order by events.ts asc + -- NB: outer limit is crucial to ensure no event gaps between 2 sub-queries above + limit $limit""".as[Sequenced[PayloadId]]( getResultFixedRecipients(topologyClientMemberId) ) case _: H2 => @@ -1019,7 +1035,7 @@ class DbSequencerStore( on events.node_index = recipients.node_index and events.ts = recipients.ts inner join sequencer_watermarks watermarks on recipients.node_index = watermarks.node_index - where recipients.recipient_id = $memberId + where (recipients.recipient_id = $memberId or recipients.recipient_id = ${SequencerMemberId.Broadcast}) and ( -- inclusive timestamp bound that defaults to MinValue if unset recipients.ts >= $fromTimestampInclusive @@ -1239,7 +1255,7 @@ class DbSequencerStore( ) select m.member, - coalesce( + coalesce(greatest( ( select ( @@ -1247,12 +1263,13 @@ class DbSequencerStore( from sequencer_event_recipients member_recipient where member_recipient.node_index = watermarks.node_index - and m.id = member_recipient.recipient_id - """ ++ topologyClientMemberFilter ++ sql""" + and (${SequencerMemberId.Broadcast} = member_recipient.recipient_id) + """ ++ topologyClientMemberFilter // keeping this filter for consistency with the other subquery + ++ sql""" and member_recipient.ts <= watermarks.watermark_ts and member_recipient.ts <= $beforeInclusive and member_recipient.ts <= $safeWatermark - and member_recipient.ts >= m.registered_ts + and member_recipient.ts > m.registered_ts order by member_recipient.node_index, member_recipient.recipient_id, member_recipient.ts desc limit 1 ) as ts @@ -1260,6 +1277,26 @@ class DbSequencerStore( order by ts desc limit 1 ), + ( + select + ( + select member_recipient.ts + from sequencer_event_recipients member_recipient + where + member_recipient.node_index = watermarks.node_index + and (m.id = member_recipient.recipient_id) + """ ++ topologyClientMemberFilter ++ sql""" + and member_recipient.ts <= watermarks.watermark_ts + and member_recipient.ts <= $beforeInclusive + and member_recipient.ts <= $safeWatermark + and member_recipient.ts > m.registered_ts + order by member_recipient.node_index, member_recipient.recipient_id, member_recipient.ts desc + limit 1 + ) as ts + from watermarks + order by ts desc + limit 1 + )), -- end of greatest m.pruned_previous_event_timestamp ) previous_ts from enabled_members m""").as[(Member, Option[CantonTimestamp])].map(_.toMap) @@ -1316,7 +1353,7 @@ class DbSequencerStore( watermarks.watermark_ts is not null and (watermarks.sequencer_online = true or events.ts <= watermarks.watermark_ts) ) and ts <= $timestampInclusive - and (#$memberContainsBefore $memberId #$memberContainsAfter) + and ((#$memberContainsBefore $memberId #$memberContainsAfter) or (#$memberContainsBefore ${SequencerMemberId.Broadcast} #$memberContainsAfter)) and ts <= $safeWatermark order by ts desc limit 1 diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/InMemorySequencerStore.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/InMemorySequencerStore.scala index e4ebcb09ce4b..5acdf49d2449 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/InMemorySequencerStore.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/InMemorySequencerStore.scala @@ -263,6 +263,7 @@ class InMemorySequencerStore( private def isMemberRecipient(member: SequencerMemberId)(event: StoreEvent[_]): Boolean = event match { case deliver: DeliverStoreEvent[_] => + deliver.members.contains(SequencerMemberId.Broadcast) || deliver.members.contains( member ) // only if they're a recipient (sender should already be a recipient) diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStore.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStore.scala index 6748169e042d..6b55dffc291c 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStore.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStore.scala @@ -58,6 +58,8 @@ final case class SequencerMemberId(private val id: Int) extends PrettyPrinting { } object SequencerMemberId { + val Broadcast: SequencerMemberId = SequencerMemberId(-1) + implicit val sequencerMemberIdOrdering: Ordering[SequencerMemberId] = Ordering.by[SequencerMemberId, Int](_.id) implicit val sequencerMemberIdOrder: Order[SequencerMemberId] = fromOrdering( @@ -720,7 +722,10 @@ trait SequencerStore extends SequencerMemberValidator with NamedLogging with Aut val events = cache .slice(start, cache.size) .view - .filter(_.event.members.contains(memberId)) + .filter(event => + event.event.members.contains(memberId) || event.event.members + .contains(SequencerMemberId.Broadcast) + ) .take(limit) .toSeq diff --git a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/service/GrpcSequencerConnectionService.scala b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/service/GrpcSequencerConnectionService.scala index 4ff0d6b7bdf1..10e1fe800045 100644 --- a/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/service/GrpcSequencerConnectionService.scala +++ b/sdk/canton/community/synchronizer/src/main/scala/com/digitalasset/canton/synchronizer/service/GrpcSequencerConnectionService.scala @@ -20,6 +20,7 @@ import com.digitalasset.canton.mediator.admin.v30 import com.digitalasset.canton.mediator.admin.v30.SequencerConnectionServiceGrpc.SequencerConnectionService import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors.AbortedDueToShutdown import com.digitalasset.canton.networking.grpc.{CantonGrpcUtil, CantonMutableHandlerRegistry} +import com.digitalasset.canton.sequencing.SequencerConnectionXPool.SequencerConnectionXPoolError import com.digitalasset.canton.sequencing.client.SequencerClient.SequencerTransports import com.digitalasset.canton.sequencing.client.{ RequestSigner, @@ -162,9 +163,11 @@ object GrpcSequencerConnectionService extends HasLoggerName { requestSigner: RequestSigner, transportFactory: SequencerClientTransportFactory, sequencerInfoLoader: SequencerInfoLoader, + connectionPoolFactory: SequencerConnectionXPoolFactory, synchronizerAlias: SynchronizerAlias, synchronizerId: PhysicalSynchronizerId, sequencerClient: SequencerClient, + tracingConfig: TracingConfig, loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContextExecutor, @@ -174,6 +177,8 @@ object GrpcSequencerConnectionService extends HasLoggerName { closeContext: CloseContext, ): UpdateSequencerClient = { val clientO = new AtomicReference[Option[RichSequencerClient]](None) + implicit val namedLoggingContext: NamedLoggingContext = + NamedLoggingContext(loggerFactory, traceContext) registry.addServiceU( SequencerConnectionService.bindService( new GrpcSequencerConnectionService( @@ -186,14 +191,52 @@ object GrpcSequencerConnectionService extends HasLoggerName { newConfig = sequencerConnectionLens.replace(newSequencerConnection)(currentConfig) // load and potentially validate the new connection - newEndpointsInfo <- sequencerInfoLoader - .loadAndAggregateSequencerEndpoints( - synchronizerAlias, - Some(synchronizerId), - newSequencerConnection, - sequencerConnectionValidation, - ) - .leftMap(_.cause) + // + // Retries are not strictly necessary for the "change sequencer connection" scenario because the API is + // idempotent and retries on the client-side are cheap. So by not retrying internally, the application + // gets some error feedback more quickly and can in theory react to it. Whether callers will reasonably + // inspect the returned error is debatable though. + // In principle, mediator node start-up could also fail without retrying and rely on the container + // framework to restart the pod. But that's a much more expensive operation, so it kinda makes sense to + // retry there (see `waitUntilSequencerConnectionIsValidWithPool`). + newEndpointsInfoAndPoolConfigO <- + if (useNewConnectionPool) for { + // The following implementation strives to keep the same behavior as with the transport mechanisms, + // which is ensure the new config is valid before replacing the old config. + // The transport mechanism supports a variety of validation modes, whereas here we support only the + // equivalent to `THRESHOLD_ACTIVE`, i.e. the config is considered valid if at least trust-threshold-many + // connections are successful. + // + // Performing this validation here protects the node operator from typos in the connection config that + // would render their node dysfunctional because it cannot connect to the sequencer. + // On the other hand, the operator should be able to set a new configuration in case of a substantial + // change to sequencer endpoints and that should be doable concurrently to those sequencer endpoint + // changes taking place, so one could argue that we should not validate the config here and rely on the + // pool to report through health status. + // + // As we cannot satisfy both needs here, this will likely be discussed and revisited. + connectionPoolAndInfo <- validateConfig( + connectionPoolFactory = connectionPoolFactory, + sequencerConnections = newSequencerConnection, + poolName = "temp", + tracingConfig = tracingConfig, + ) + } yield { + val (pool, info) = connectionPoolAndInfo + pool.close() + (info, Some(pool.config)) + } + else + sequencerInfoLoader + .loadAndAggregateSequencerEndpoints( + synchronizerAlias, + Some(synchronizerId), + newSequencerConnection, + sequencerConnectionValidation, + ) + .leftMap(_.cause) + .map((_, None)) + (newEndpointsInfo, newPoolConfigO) = newEndpointsInfoAndPoolConfigO sequencerTransportsMapO = Option.when(!useNewConnectionPool)( transportFactory @@ -218,17 +261,14 @@ object GrpcSequencerConnectionService extends HasLoggerName { ) // important to only save the config and change the transport after the `makeTransport` has run and done the handshake + _ <- clientO.get.fold { + // need to close here + sequencerTransportsMapO.foreach(_.values.foreach(_.close())) + EitherT.pure[FutureUnlessShutdown, String](()) + }( + _.changeTransport(sequencerTransports, newPoolConfigO) + ) _ <- EitherT.right(saveConfig(newConfig)) - _ <- EitherT - .right( - clientO - .get() - .fold { - // need to close here - sequencerTransportsMapO.foreach(_.values.foreach(_.close())) - FutureUnlessShutdown.unit - }(_.changeTransport(sequencerTransports)) - ) } yield (), sequencerClient.logout _, loggerFactory, @@ -307,44 +347,15 @@ object GrpcSequencerConnectionService extends HasLoggerName { ] = for { sequencerConnections <- OptionT(loadConfig).toRight("No sequencer connection config") - connectionPool <- EitherT.fromEither[FutureUnlessShutdown]( - connectionPoolFactory - .createFromOldConfig( - sequencerConnections, - expectedPSIdO = None, - tracingConfig = tracingConfig, - ) - .leftMap(_.toString) - ) - _ <- connectionPool.start().leftMap { error => - namedLoggingContext.warn(s"Waiting for valid sequencer connection: $error") - error.toString - } - } yield { - val psid = connectionPool.physicalSynchronizerIdO.getOrElse( - ErrorUtil.invalidState( - "a successfully started connection pool must have the synchronizer ID defined" - ) - ) - val staticParameters = connectionPool.staticSynchronizerParametersO.getOrElse( - ErrorUtil.invalidState( - "a successfully started connection pool must have the static parameters defined" - ) - ) - - // `sequencerConnections.aliasToConnections` built with the transport mechanism depends on the validation mode - // (all, active only, etc.), whereas with the connection pool we provide the original configuration. - // It seems this parameter is however only used later on for building the transports, so it does not matter - // when using the connection pool. - val info = SequencerAggregatedInfo( - psid = psid, - staticSynchronizerParameters = staticParameters, - expectedSequencersO = None, + connectionPoolAndInfo <- validateConfig( + connectionPoolFactory = connectionPoolFactory, sequencerConnections = sequencerConnections, + poolName = "main", + tracingConfig = tracingConfig, + logErrorFn = + error => namedLoggingContext.warn(s"Waiting for valid sequencer connection: $error"), ) - - (connectionPool, info) - } + } yield connectionPoolAndInfo import scala.concurrent.duration.* EitherT( @@ -362,4 +373,66 @@ object GrpcSequencerConnectionService extends HasLoggerName { ) ) } + + private def validateConfig( + connectionPoolFactory: SequencerConnectionXPoolFactory, + sequencerConnections: SequencerConnections, + poolName: String, + tracingConfig: TracingConfig, + logErrorFn: SequencerConnectionXPoolError => Unit = _ => (), + )(implicit + namedLoggingContext: NamedLoggingContext, + executionContext: ExecutionContextExecutor, + executionSequencerFactory: ExecutionSequencerFactory, + materializer: Materializer, + ): EitherT[FutureUnlessShutdown, String, (SequencerConnectionXPool, SequencerAggregatedInfo)] = { + implicit val traceContext: TraceContext = namedLoggingContext.traceContext + + for { + connectionPool <- EitherT.fromEither[FutureUnlessShutdown]( + connectionPoolFactory + .createFromOldConfig( + sequencerConnections, + expectedPSIdO = None, + tracingConfig = tracingConfig, + name = poolName, + ) + .leftMap { error => + logErrorFn(error) + error.toString + } + ) + _ <- connectionPool.start().leftMap { error => + logErrorFn(error) + error.toString + } + } yield { + val psid = connectionPool.physicalSynchronizerIdO.getOrElse( + ErrorUtil.invalidState( + "a successfully started connection pool must have the synchronizer ID defined" + ) + ) + val staticParameters = connectionPool.staticSynchronizerParametersO.getOrElse( + ErrorUtil.invalidState( + "a successfully started connection pool must have the static parameters defined" + ) + ) + + // The `sequencerConnections.aliasToConnections` field that we place into `SequencerAggregatedInfo` is different + // with the connection pool compared to what is produced with the transport mechanism with the `SequencerInfoLoader`: + // with the connection pool, we provide the original configuration, whereas `SequencerInfoLoader.loadAndAggregateSequencerEndpoints` + // produces a map that depends on the validation mode (all, active only, etc.). + // It seems this parameter is however only used later on for building the transports, so it does not matter + // when using the connection pool (since they are not used). + val info = SequencerAggregatedInfo( + psid = psid, + staticSynchronizerParameters = staticParameters, + expectedSequencersO = None, + sequencerConnections = sequencerConnections, + ) + + (connectionPool, info) + } + } + } diff --git a/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReaderTest.scala b/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReaderTest.scala index 5b88263fb530..3a109316d64c 100644 --- a/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReaderTest.scala +++ b/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerReaderTest.scala @@ -472,8 +472,10 @@ class SequencerReaderTest event <- pullFromQueue(queue) _ = queue.cancel() // cancel the queue now we're done with it } yield { - // the first event expected to reach alice is at its registration timestamp (its topo mapping effective time) - event.value.timestamp shouldBe ts(2) + // the first event expected to reach alice is the next event after its registration time + // (onboarding topology effective time). Alice must not receive the event at ts(2), + // since it would already have seen its onboarding tx in the topology snapshot at ts(2). + event.value.timestamp shouldBe ts(3) } } diff --git a/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSourceTest.scala b/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSourceTest.scala index 5fcfa267c9b2..7ddfc2426b6e 100644 --- a/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSourceTest.scala +++ b/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/SequencerWriterSourceTest.scala @@ -640,7 +640,7 @@ class SequencerWriterSourceTest combinedNotificationsF map { notification => forAll(members) { member => withClue(s"expecting notification for $member") { - notification.includes(member) shouldBe true + notification.isBroadcastOrIncludes(member) shouldBe true } } diff --git a/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStoreTest.scala b/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStoreTest.scala index 473f0e652005..c61be8ab3e38 100644 --- a/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStoreTest.scala +++ b/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencer/store/SequencerStoreTest.scala @@ -492,7 +492,7 @@ trait SequencerStoreTest } } - "support paging results" in { + "support paging results and interleave broadcasts correctly" in { val env = Env() for { @@ -501,7 +501,12 @@ trait SequencerStoreTest events = NonEmptyUtil.fromUnsafe( (0L until 20L).map { n => env.deliverEventWithDefaults(ts1.plusSeconds(n), sender = registeredAlice.memberId)() - }.toSeq + } ++ + List( + env.deliverEventWithDefaults(ts(30), sender = registeredAlice.memberId)( + NonEmpty(SortedSet, SequencerMemberId.Broadcast) + ) + ) ) _ <- env.saveEventsAndBuffer(instanceIndex, events) _ <- env.saveWatermark(events.last1.timestamp).valueOrFail("saveWatermark") @@ -516,7 +521,7 @@ trait SequencerStoreTest seconds(firstPage) shouldBe (1L to 10L).toList seconds(secondPage) shouldBe (11L to 20L).toList - seconds(partialPage) shouldBe (16L to 20L).toList + seconds(partialPage) shouldBe (16L to 20L).toList :+ 30L } } diff --git a/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerIntegrationTest.scala b/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerIntegrationTest.scala index 5adf09831bc4..6565eee08b71 100644 --- a/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerIntegrationTest.scala +++ b/sdk/canton/community/synchronizer/src/test/scala/com/digitalasset/canton/synchronizer/sequencing/service/GrpcSequencerIntegrationTest.scala @@ -355,7 +355,7 @@ class Env(override val loggerFactory: SuppressingLogger)(implicit for { connectionPool <- EitherT.fromEither[FutureUnlessShutdown]( - connectionPoolFactory.create(poolConfig).leftMap(error => error.toString) + connectionPoolFactory.create(poolConfig, name = "test").leftMap(error => error.toString) ) _ <- if (useNewConnectionPool) diff --git a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/interactive/ExternalPartyUtils.scala b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/interactive/ExternalPartyUtils.scala deleted file mode 100644 index 30f087299ec3..000000000000 --- a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/interactive/ExternalPartyUtils.scala +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright (c) 2025 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.interactive - -import com.daml.ledger.api.v2.admin.party_management_service.AllocateExternalPartyRequest -import com.daml.ledger.api.v2.interactive.interactive_submission_service as iss -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.BaseTest.{testedProtocolVersion, testedReleaseProtocolVersion} -import com.digitalasset.canton.FutureHelpers -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.config.{CachingConfigs, CryptoConfig, ProcessingTimeout} -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.crypto.store.CryptoPrivateStoreFactory -import com.digitalasset.canton.data.OnboardingTransactions -import com.digitalasset.canton.logging.SuppressingLogger -import com.digitalasset.canton.resource.MemoryStorage -import com.digitalasset.canton.time.WallClock -import com.digitalasset.canton.topology.transaction.* -import com.digitalasset.canton.topology.transaction.DelegationRestriction.CanSignAllMappings -import com.digitalasset.canton.topology.transaction.TopologyTransaction.GenericTopologyTransaction -import com.digitalasset.canton.topology.{ExternalParty, ParticipantId, PartyId, SynchronizerId} -import com.digitalasset.canton.tracing.{NoReportingTracerProvider, TraceContext} -import com.google.protobuf.ByteString -import io.scalaland.chimney.dsl.* -import org.scalatest.EitherValues - -import scala.concurrent.ExecutionContext - -object ExternalPartyUtils { - final case class ExternalParty( - partyId: PartyId, - signingFingerprints: NonEmpty[Seq[Fingerprint]], - ) { - def primitiveId: String = partyId.toProtoPrimitive - } - final case class OnboardingTransactions( - namespaceDelegation: TopologyTransaction[TopologyChangeOp.Replace, NamespaceDelegation], - partyToParticipant: TopologyTransaction[TopologyChangeOp.Replace, PartyToParticipant], - partyToKeyMapping: TopologyTransaction[TopologyChangeOp.Replace, PartyToKeyMapping], - multiHashSignatures: Seq[Signature], - singleTransactionSignatures: Seq[(GenericTopologyTransaction, Seq[Signature])], - ) { - def toAllocateExternalPartyRequest( - synchronizerId: SynchronizerId, - identityProviderId: String = "", - ): AllocateExternalPartyRequest = - AllocateExternalPartyRequest( - synchronizer = synchronizerId.toProtoPrimitive, - onboardingTransactions = singleTransactionSignatures.map { case (transaction, signatures) => - AllocateExternalPartyRequest.SignedTransaction( - transaction.getCryptographicEvidence, - signatures.map(_.toProtoV30.transformInto[iss.Signature]), - ) - }, - multiHashSignatures = multiHashSignatures.map( - _.toProtoV30.transformInto[iss.Signature] - ), - identityProviderId = identityProviderId, - ) - } -} - -trait ExternalPartyUtils extends FutureHelpers with EitherValues { - - def loggerFactory: SuppressingLogger - def futureSupervisor: FutureSupervisor - protected def timeouts: ProcessingTimeout - def wallClock: WallClock - - implicit def externalPartyExecutionContext: ExecutionContext - implicit protected def traceContext: TraceContext - - private val storage = new MemoryStorage(loggerFactory, timeouts) - - private lazy val crypto: Crypto = Crypto - .create( - CryptoConfig(), - CachingConfigs.defaultSessionEncryptionKeyCacheConfig, - CachingConfigs.defaultPublicKeyConversionCache, - storage, - CryptoPrivateStoreFactory.withoutKms(wallClock, externalPartyExecutionContext), - testedReleaseProtocolVersion, - futureSupervisor, - wallClock, - externalPartyExecutionContext, - timeouts, - loggerFactory, - NoReportingTracerProvider, - ) - .valueOrFailShutdown("Failed to create crypto object") - .futureValue - - private def generateProtocolSigningKeys( - numberOfKeys: Int - ): NonEmpty[Seq[SigningPublicKey]] = - NonEmpty - .from( - Seq.fill(numberOfKeys)( - crypto.generateSigningKey(usage = SigningKeyUsage.ProtocolOnly).futureValueUS.value - ) - ) - .getOrElse( - fail("Expected at least one protocol signing key") - ) - - protected def generateExternalPartyOnboardingTransactions( - name: String, - confirming: Seq[ParticipantId] = Seq.empty, - observing: Seq[ParticipantId] = Seq.empty, - confirmationThreshold: PositiveInt = PositiveInt.one, - numberOfKeys: PositiveInt = PositiveInt.one, - keyThreshold: PositiveInt = PositiveInt.one, - shareNamespaceAndSigningKey: Boolean = false, - ): (OnboardingTransactions, ExternalParty) = { - - val namespaceKey: SigningPublicKey = - crypto - .generateSigningKey(usage = - if (shareNamespaceAndSigningKey) SigningKeyUsage.All else SigningKeyUsage.NamespaceOnly - ) - .futureValueUS - .value - val partyId: PartyId = PartyId.tryCreate(name, namespaceKey.fingerprint) - val protocolSigningKeys: NonEmpty[Seq[SigningPublicKey]] = - if (shareNamespaceAndSigningKey && numberOfKeys == PositiveInt.one) { - NonEmpty.mk(Seq, namespaceKey) - } else if (shareNamespaceAndSigningKey) { - NonEmpty.mk(Seq, namespaceKey, generateProtocolSigningKeys(numberOfKeys.value - 1)*) - } else generateProtocolSigningKeys(numberOfKeys.value) - - val namespaceDelegationTx = - TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - NamespaceDelegation.tryCreate( - namespace = partyId.uid.namespace, - target = namespaceKey, - CanSignAllMappings, - ), - testedProtocolVersion, - ) - - val confirmingHostingParticipants = confirming.map { cp => - HostingParticipant( - cp, - ParticipantPermission.Confirmation, - ) - } - val observingHostingParticipants = observing.map { op => - HostingParticipant( - op, - ParticipantPermission.Observation, - ) - } - val partyToParticipantTx = - TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - PartyToParticipant.tryCreate( - partyId = partyId, - threshold = confirmationThreshold, - participants = confirmingHostingParticipants ++ observingHostingParticipants, - ), - testedProtocolVersion, - ) - - val partyToKeyTx = - TopologyTransaction( - TopologyChangeOp.Replace, - serial = PositiveInt.one, - PartyToKeyMapping.tryCreate( - partyId = partyId, - threshold = keyThreshold, - signingKeys = protocolSigningKeys, - ), - testedProtocolVersion, - ) - - val transactionHashes = - NonEmpty.mk(Set, namespaceDelegationTx.hash, partyToParticipantTx.hash, partyToKeyTx.hash) - val combinedMultiTxHash = - MultiTransactionSignature.computeCombinedHash(transactionHashes, crypto.pureCrypto) - - // Sign the multi hash with the namespace key, as it is needed to authorize all 3 transactions - val namespaceSignature = - crypto.privateCrypto - .sign( - combinedMultiTxHash, - namespaceKey.fingerprint, - NonEmpty.mk(Set, SigningKeyUsage.Namespace), - ) - .futureValueUS - .value - - // The protocol key signature is only needed on the party to key mapping, so we can sign only that, and combine it with the - // namespace signature - val protocolSignatures = protocolSigningKeys.map { key => - crypto.privateCrypto - .sign( - partyToKeyTx.hash.hash, - key.fingerprint, - NonEmpty.mk(Set, SigningKeyUsage.Protocol), - ) - .futureValueUS - .value - } - - val multiTxSignatures = - NonEmpty.mk(Seq, MultiTransactionSignature(transactionHashes, namespaceSignature)) - - val signedNamespaceDelegation = SignedTopologyTransaction - .withTopologySignatures( - namespaceDelegationTx, - multiTxSignatures, - isProposal = false, - testedProtocolVersion, - ) - - val signedPartyToParticipant = SignedTopologyTransaction - .withTopologySignatures( - partyToParticipantTx, - multiTxSignatures, - isProposal = true, - testedProtocolVersion, - ) - - val signedPartyToKey = SignedTopologyTransaction - .withTopologySignatures( - partyToKeyTx, - multiTxSignatures, - isProposal = false, - testedProtocolVersion, - ) - // Merge the signature from the protocol key - .addSingleSignatures(protocolSignatures.toSet) - - ( - OnboardingTransactions(signedNamespaceDelegation, signedPartyToParticipant, signedPartyToKey), - ExternalParty(partyId, protocolSigningKeys.map(_.fingerprint)), - ) - } - - protected def signTxAs( - hash: ByteString, - p: ExternalParty, - ): Map[PartyId, Seq[Signature]] = { - val signatures = - p.signingFingerprints.map { fingerprint => - crypto.privateCrypto - .signBytes( - hash, - fingerprint, - SigningKeyUsage.ProtocolOnly, - ) - .valueOrFailShutdown("Failed to sign transaction hash") - .futureValue - } - - Map(p.partyId -> signatures.forgetNE) - } -} diff --git a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/SuppressingLogger.scala b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/SuppressingLogger.scala index 72e785fd61f5..ed7b99ab1373 100644 --- a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/SuppressingLogger.scala +++ b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/logging/SuppressingLogger.scala @@ -127,6 +127,20 @@ class SuppressingLogger private[logging] ( )(implicit c: ClassTag[T], pos: source.Position): Assertion = assertLogs(rule)(checkThrowable[T](the[Throwable] thrownBy within), assertions*) + def assertThrowsAndLogsSuppressingAsync[T <: Throwable](rule: SuppressionRule)( + within: => Future[_], + assertions: (LogEntry => Assertion)* + )(implicit c: ClassTag[T], pos: source.Position): Future[Assertion] = + assertLogs(rule)( + within.transform { + case Success(_) => + fail(s"An exception of type $c was expected, but no exception was thrown.") + case Failure(c(_)) => Success(succeed) + case Failure(t) => fail(s"Exception has wrong type. Expected type: $c. Got: $t.", t) + }(directExecutionContext), + assertions *, + ) + def assertThrowsAndLogsAsync[T <: Throwable]( within: => Future[_], assertion: T => Assertion, diff --git a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala index 29e2ac9b9457..1af2759e49c5 100644 --- a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala +++ b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala @@ -887,9 +887,15 @@ class TestingOwnerWithKeys( ) ) - val p1_dtc = mkAdd(SynchronizerTrustCertificate(participant1, synchronizerId)) - val p2_dtc = mkAdd(SynchronizerTrustCertificate(participant2, synchronizerId)) - val p3_dtc = mkAdd(SynchronizerTrustCertificate(participant3, synchronizerId)) + val p1_dtc = mkAdd( + SynchronizerTrustCertificate(participant1, synchronizerId) + ) + val p2_dtc = mkAdd( + SynchronizerTrustCertificate(participant2, synchronizerId) + ) + val p3_dtc = mkAdd( + SynchronizerTrustCertificate(participant3, synchronizerId) + ) val p1_otk = mkAddMultiKey( OwnerToKeyMapping .tryCreate(participant1, NonEmpty(Seq, EncryptionKeys.key1, SigningKeys.key1)), diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/AppUpgrade/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/AppUpgrade/V1/daml.yaml index 2fd6e7d53bd8..5b9c825f559d 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/AppUpgrade/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/AppUpgrade/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: AppUpgrade diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/AppUpgrade/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/AppUpgrade/V2/daml.yaml index 1f8812b334c2..eea7e2f82086 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/AppUpgrade/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/AppUpgrade/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: AppUpgrade diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/If/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/If/daml.yaml index a35c40cc4913..3e6f57f14499 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/If/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/If/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/V1/daml.yaml index ea9572d2d66b..c358468179aa 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/V2/daml.yaml index 4337c9c301ef..9253970c41a7 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/CantonUpgrade/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/AssetFactory/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/AssetFactory/V1/daml.yaml index 7cb402cfba6a..98f605806ecb 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/AssetFactory/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/AssetFactory/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 module-prefixes: !java.util.LinkedHashMap dvp-assets-1.0.0: DvpAssetsV1 build-options: diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/AssetFactory/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/AssetFactory/V2/daml.yaml index 38078eb14ced..88ca3e48fd75 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/AssetFactory/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/AssetFactory/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 module-prefixes: !java.util.LinkedHashMap dvp-assets-1.0.0: DvpAssetsV1 dvp-assets-2.0.0: DvpAssetsV2 diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Assets/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Assets/V1/daml.yaml index 8337ff89f5d0..0e42319a1ce7 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Assets/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Assets/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: dvp-assets diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Assets/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Assets/V2/daml.yaml index 8e2e42220236..250960efb23a 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Assets/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Assets/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: dvp-assets diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Offer/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Offer/V1/daml.yaml index 7c47e1e46517..6590bdb7212b 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Offer/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Offer/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: dvp-offer diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Offer/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Offer/V2/daml.yaml index da454301459d..e243795de807 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Offer/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/DvP/Offer/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: dvp-offer diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/NonConforming/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/NonConforming/V1/daml.yaml index ee7e30227072..09da1500b1e4 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/NonConforming/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/NonConforming/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/NonConforming/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/NonConforming/V2/daml.yaml index 3f05a8bc1425..893e8e11f423 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/NonConforming/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/NonConforming/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Bar/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Bar/V1/daml.yaml index 9967ab6582f6..0139db7fb68d 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Bar/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Bar/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: bar diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Bar/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Bar/V2/daml.yaml index 105f809b4d29..c9c103d91f6d 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Bar/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Bar/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: bar diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Baz/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Baz/V1/daml.yaml index 2d9ee4087415..3479f4e89e4d 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Baz/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Baz/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: baz diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Baz/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Baz/V2/daml.yaml index 356e7f00a7e0..7572817152e9 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Baz/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Baz/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: baz diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V1/daml.yaml index e4d50d5fb0bf..c180ef19feca 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: foo diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V2/daml.yaml index 24d2a50327a8..9a41c6ff5a72 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: foo diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V3/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V3/daml.yaml index 325cfda09c0f..c69034ae81e0 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V3/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V3/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: foo diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V4/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V4/daml.yaml index fbc9eaa6d464..00041981a422 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V4/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Foo/V4/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: foo diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/IBar/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/IBar/daml.yaml index 7ca7fc715aaa..2a8718f1f5ec 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/IBar/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/IBar/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: ibar diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/IBaz/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/IBaz/daml.yaml index b551f3d94513..c8db4baf668f 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/IBaz/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/IBaz/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: ibaz diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Util/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Util/V1/daml.yaml index d291b86ba35e..bc0d2bac2ed2 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Util/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Util/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: util diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Util/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Util/V2/daml.yaml index 2d1a4455492d..4bd7384bbe71 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Util/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/Systematic/Util/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: util diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/If/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/If/daml.yaml index 8c475e514d93..03cf10a92fcd 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/If/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/If/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/V1/daml.yaml index 37da8ec516f9..bf99b467fd99 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/V2/daml.yaml index d5ebe13005ea..305e76692b50 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/FeaturedAppRight/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/ScenarioAppInstall/V1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/ScenarioAppInstall/V1/daml.yaml index 27fc9c8c75d2..2f3ff5a562de 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/ScenarioAppInstall/V1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/ScenarioAppInstall/V1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/ScenarioAppInstall/V2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/ScenarioAppInstall/V2/daml.yaml index 0c5697803c67..c17a15384fee 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/ScenarioAppInstall/V2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/TopologyAwarePackageSelection/ScenarioAppInstall/V2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/HoldingV1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/HoldingV1/daml.yaml index 78ccd2a456b7..59071bc9ed7b 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/HoldingV1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/HoldingV1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: tests-Holding-v1 diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/HoldingV2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/HoldingV2/daml.yaml index e78d19ba563c..727b31575be0 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/HoldingV2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/HoldingV2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 name: tests-Holding-v2 diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV1/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV1/daml.yaml index f875448f71a2..e7095f16539c 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV1/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV2/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV2/daml.yaml index 087395bfe7eb..3367a7e9e2d9 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV2/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 build-options: - --target=2.1 - --enable-interfaces=yes diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV3/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV3/daml.yaml index 9e3fe5a72982..6282dbb76024 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV3/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV3/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: tests-Token data-dependencies: - ../../../scala-2.13/resource_managed/test/tests-Holding-v1-1.0.0.dar diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV4/daml.yaml b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV4/daml.yaml index 4dfe6549465e..3a16dd4405c6 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV4/daml.yaml +++ b/sdk/canton/community/upgrading-integration-tests/src/test/daml/UpgradesWithInterfaces/TokenV4/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.4.0-snapshot.20251002.14252.0.v9bd7e7ad +sdk-version: 3.4.0-snapshot.20251007.14274.0.ve2024cd6 name: tests-Token data-dependencies: - ../../../scala-2.13/resource_managed/test/tests-Holding-v1-1.0.0.dar diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InteractiveSubmissionUpgradingTest.scala b/sdk/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InteractiveSubmissionUpgradingTest.scala index 5aecc011a354..353ef0f2fea7 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InteractiveSubmissionUpgradingTest.scala +++ b/sdk/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/InteractiveSubmissionUpgradingTest.scala @@ -3,77 +3,174 @@ package com.digitalasset.canton.integration.tests.upgrading -import com.daml.ledger.api.v2.commands.{Command, DisclosedContract} -import com.daml.ledger.api.v2.event.CreatedEvent.toJavaProto -import com.digitalasset.canton.LfPackageId -import com.digitalasset.canton.damltests.upgrade.v1.java.upgrade.Upgrading as UpgradingV1 +import com.daml.ledger.api.v2.transaction.Transaction +import com.daml.ledger.javaapi.data.DisclosedContract +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.console.LocalParticipantReference +import com.digitalasset.canton.damltests.upgrade +import com.digitalasset.canton.damltests.upgrade.v1.java.upgrade.{FetchQuote, Quote} import com.digitalasset.canton.integration.EnvironmentDefinition import com.digitalasset.canton.integration.tests.ledgerapi.submission.InteractiveSubmissionIntegrationTestSetup +import com.digitalasset.canton.integration.util.PartyToParticipantDeclarative +import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.topology.ExternalParty +import com.digitalasset.canton.topology.transaction.ParticipantPermission +import com.digitalasset.canton.{HasExecutionContext, LfPackageId} +import org.scalatest.OptionValues -class InteractiveSubmissionUpgradingTest extends InteractiveSubmissionIntegrationTestSetup { +import scala.jdk.CollectionConverters.SeqHasAsJava + +class InteractiveSubmissionUpgradingTest + extends InteractiveSubmissionIntegrationTestSetup + with OptionValues + with HasExecutionContext { + + private var lse: ExternalParty = _ + private var alice: ExternalParty = _ + private var bob: ExternalParty = _ + private val v1PackageId = upgrade.v1.java.upgrade.Quote.PACKAGE_ID + private val v2PackageId = upgrade.v2.java.upgrade.Quote.PACKAGE_ID override def environmentDefinition: EnvironmentDefinition = super.environmentDefinition .withSetup { implicit env => import env.* + participant1.dars.upload(UpgradingBaseTest.UpgradeV1) participant1.dars.upload(UpgradingBaseTest.UpgradeV2) participant2.dars.upload(UpgradingBaseTest.UpgradeV2) + participant3.dars.upload(UpgradingBaseTest.UpgradeV1) + + lse = participant1.parties.external.enable("LSE") + alice = participant2.parties.external.enable("Alice") + bob = participant3.parties.external.enable("Bob") + } "Interactive submission" should { - // TODO(#23876) - remove ignore - "prepare a transaction without the creating package of an input contract" ignore { - implicit env => - import env.* + "use a v2 disclosed contract available on all participants" in { implicit env => + import env.* + val (quoteCid, disclosedQuote) = discloseQuote(lse, participant1, v2PackageId) + val (fetchQuote, _) = createFetchQuote(participant2, alice) + exerciseFetch(participant2, quoteCid, disclosedQuote, fetchQuote, alice) + } - val bobE = participant1.parties.external.enable("BobE") + "use a v1 disclosed contract on a participant that only has v2 available" in { implicit env => + import env.* + val (quoteCid, disclosedQuote) = discloseQuote(lse, participant1, v1PackageId) + val (fetchQuote, _) = createFetchQuote(participant2, alice) + exerciseFetch(participant2, quoteCid, disclosedQuote, fetchQuote, alice) + } - // Create a V1 contract with alice on P1 - val commands = new UpgradingV1( - bobE.toProtoPrimitive, - bobE.toProtoPrimitive, - 100, - ) - val prepared = participant1.ledger_api.interactive_submission.prepare( - Seq(bobE.partyId), - Seq(Command.fromJavaProto(commands.create.commands.loneElement.toProtoCommand)), - userPackageSelectionPreference = - Seq(LfPackageId.assertFromString(UpgradingV1.PACKAGE_ID)), // Force V1 - ) - val execResponse = execAndWait( - prepared, - Map(bobE.partyId -> global_secret.sign(prepared.preparedTransactionHash, bobE)), - ) - val event = findTransactionByUpdateId( - bobE, - execResponse.updateId, - verbose = true, - ).events.head.getCreated - - val contract = UpgradingV1.Contract.fromCreatedEvent( - com.daml.ledger.javaapi.data.CreatedEvent.fromProto(toJavaProto(event)) - ) - val changeOwnerCommands = - contract.id.exerciseChangeOwner(bobE.toProtoPrimitive).commands() - - // Exercise a choice on the contract with explicit disclosure, on P2, which only has V2 of the package - val disclosedContract = DisclosedContract( - event.templateId, - event.contractId, - event.createdEventBlob, - daId.logical.toProtoPrimitive, - ) - - participant2.ledger_api.javaapi.commands.submit( - Seq(bobE), - Seq(changeOwnerCommands.loneElement), - disclosedContracts = Seq( - com.daml.ledger.javaapi.data.DisclosedContract - .fromProto(DisclosedContract.toJavaProto(disclosedContract)) + "use a v1 disclosed contract on a participant that only has v1 available" in { implicit env => + import env.* + val (quoteCid, disclosedQuote) = discloseQuote(lse, participant1, v1PackageId) + val (fetchQuote, disclosedFetch) = createFetchQuote(participant3, bob) + + def setBobConfirmer( + confirmingParticipant: LocalParticipantReference + ): Unit = + PartyToParticipantDeclarative( + Set(participant2, participant3), + Set(daId), + )( + owningParticipants = Map.empty, + targetTopology = Map( + bob.partyId -> Map( + daId -> (PositiveInt.one, Set( + (confirmingParticipant, ParticipantPermission.Confirmation) + )) + ) ), - ) + externalParties = Set(bob), + )(executorService, env) + + // Set Bob confirmer to participant2 so that V2 gets used for the prepare step + setBobConfirmer(participant2) + val preparedExercise = participant1.ledger_api.javaapi.interactive_submission.prepare( + Seq(bob.partyId), + Seq(fetchQuote.id.exerciseFQ_ExFetch(quoteCid).commands().loneElement), + disclosedContracts = Seq(disclosedQuote, disclosedFetch), + ) + + // Set Bob confirmer to participant3 where V2 is not available + setBobConfirmer(participant3) + assertThrowsAndLogsCommandFailures( + participant1.ledger_api.commands.external.submit_prepared(bob, preparedExercise), + { le => + le.errorMessage should include regex raw"(?s)FAILED_PRECONDITION/INVALID_PRESCRIBED_SYNCHRONIZER_ID" + le.errorMessage should include regex raw"(?s)because: Some packages are not known to all informees.*on synchronizer synchronizer1" + le.errorMessage should include regex raw"(?s)Participant PAR::participant3.*has not vetted ${v2PackageId + .take(10)}" + }, + ) + } + + } + + private def exerciseFetch( + participant: => LocalParticipantReference, + quoteCid: Quote.ContractId, + disclosedQuote: DisclosedContract, + fetchQuote: FetchQuote.Contract, + party: ExternalParty, + ): Transaction = { + val preparedExercise = participant.ledger_api.javaapi.interactive_submission.prepare( + Seq(party.partyId), + Seq(fetchQuote.id.exerciseFQ_ExFetch(quoteCid).commands().loneElement), + disclosedContracts = Seq(disclosedQuote), + ) + participant.ledger_api.commands.external.submit_prepared(party, preparedExercise) + } + + private def createFetchQuote( + participant1: => LocalParticipantReference, + party: ExternalParty, + ): (FetchQuote.Contract, DisclosedContract) = { + val txFetchQuote = participant1.ledger_api.javaapi.commands.submit( + Seq(party), + Seq( + new FetchQuote( + party.toProtoPrimitive, + party.toProtoPrimitive, + party.toProtoPrimitive, + ).create.commands.loneElement + ), + includeCreatedEventBlob = true, + ) + + val disclosed = JavaDecodeUtil.decodeDisclosedContracts(txFetchQuote).loneElement + + val fetchQuote = + JavaDecodeUtil.decodeAllCreated(FetchQuote.COMPANION)(txFetchQuote).loneElement + (fetchQuote, disclosed) } + + private def discloseQuote( + quoter: ExternalParty, + participant: LocalParticipantReference, + quotePackageId: LfPackageId, + ): (Quote.ContractId, DisclosedContract) = { + val quoteTx = participant.ledger_api.javaapi.commands.submit( + Seq(quoter), + Seq( + new Quote( + Seq(quoter.toProtoPrimitive).asJava, + Seq.empty.asJava, + "VOD", + 100, + ).create.commands.loneElement + ), + includeCreatedEventBlob = true, + userPackageSelectionPreference = Seq(quotePackageId), + ) + + val disclosedQuote = JavaDecodeUtil.decodeDisclosedContracts(quoteTx).loneElement + + val quoteCid = new Quote.ContractId(disclosedQuote.contractId.get()) + (quoteCid, disclosedQuote) + } + } diff --git a/sdk/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/UpgradePackageAvailabilityIntegrationTest.scala b/sdk/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/UpgradePackageAvailabilityIntegrationTest.scala index 30d35bfa69b6..9e2a5892399d 100644 --- a/sdk/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/UpgradePackageAvailabilityIntegrationTest.scala +++ b/sdk/canton/community/upgrading-integration-tests/src/test/scala/com/digitalasset/canton/integration/tests/upgrading/UpgradePackageAvailabilityIntegrationTest.scala @@ -17,7 +17,7 @@ import com.digitalasset.canton.integration.{ SharedEnvironment, } import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil -import com.digitalasset.canton.topology.{ForceFlag, ForceFlags, PartyId} +import com.digitalasset.canton.topology.PartyId import com.digitalasset.daml.lf.data.Ref import java.util.Optional @@ -62,12 +62,10 @@ sealed abstract class UpgradePackageAvailabilityIntegrationTest participant3.topology.vetted_packages.propose_delta( participant3, removes = Seq(Ref.PackageId.assertFromString(v1.upgrade.Quote.PACKAGE_ID)), - force = ForceFlags(ForceFlag.AllowUnvetPackage), ) // Participant 4 (dan) has only ever had V2 loaded participant4.dars.upload(UpgradingBaseTest.UpgradeV2) - } private def discloseQuote( diff --git a/sdk/canton/community/util-observability/src/main/scala/com/digitalasset/canton/logging/LoggingContextWithTrace.scala b/sdk/canton/community/util-observability/src/main/scala/com/digitalasset/canton/logging/LoggingContextWithTrace.scala index 21eb1821daf4..50b4a9d214c4 100644 --- a/sdk/canton/community/util-observability/src/main/scala/com/digitalasset/canton/logging/LoggingContextWithTrace.scala +++ b/sdk/canton/community/util-observability/src/main/scala/com/digitalasset/canton/logging/LoggingContextWithTrace.scala @@ -9,7 +9,7 @@ import com.daml.tracing.Telemetry import com.digitalasset.canton.logging.LoggingContextUtil.createLoggingContext import com.digitalasset.canton.tracing.TraceContext -/** Class to enrich [[com.digitalasset.canton.logging.ErrorLoggingContext]] with +/** Class to enrich [[com.daml.logging.LoggingContext]] with * [[com.digitalasset.canton.tracing.TraceContext]] */ class LoggingContextWithTrace( diff --git a/sdk/canton/ref b/sdk/canton/ref index b0631fe84549..3501b7b5b67b 100644 --- a/sdk/canton/ref +++ b/sdk/canton/ref @@ -1 +1 @@ -20251007.17096.v88d57bc4 +20251009.17126.v6d38818f diff --git a/sdk/daml-script/runner/src/main/scala/com/digitalasset/daml/lf/engine/script/v2/ledgerinteraction/grpcLedgerClient/AdminLedgerClient.scala b/sdk/daml-script/runner/src/main/scala/com/digitalasset/daml/lf/engine/script/v2/ledgerinteraction/grpcLedgerClient/AdminLedgerClient.scala index e4c967d3e902..00480a11564d 100644 --- a/sdk/daml-script/runner/src/main/scala/com/digitalasset/daml/lf/engine/script/v2/ledgerinteraction/grpcLedgerClient/AdminLedgerClient.scala +++ b/sdk/daml-script/runner/src/main/scala/com/digitalasset/daml/lf/engine/script/v2/ledgerinteraction/grpcLedgerClient/AdminLedgerClient.scala @@ -140,7 +140,6 @@ class AdminLedgerClient private[grpcLedgerClient] ( ), mustFullyAuthorize = true, forceChanges = Seq( - ForceFlag.FORCE_FLAG_ALLOW_UNVET_PACKAGE, ForceFlag.FORCE_FLAG_ALLOW_UNVET_PACKAGE_WITH_ACTIVE_CONTRACTS, ForceFlag.FORCE_FLAG_ALLOW_UNVETTED_DEPENDENCIES, ),