diff --git a/cmd/bootstrap/README.md b/cmd/bootstrap/README.md index a0c7a242b19..97057448165 100644 --- a/cmd/bootstrap/README.md +++ b/cmd/bootstrap/README.md @@ -32,6 +32,15 @@ The bootstrapping will generate the following information: - public networking key - weight + +#### Collector clusters +_Each cluster_ of collector nodes needs to have its own root Block and root QC +* Root clustering: assignment of collector nodes to clusters +* For each cluster: + * Root `cluster.Block` + * Root QC: votes from collector nodes for the respective root `cluster.Block` + + #### Root Block for main consensus * Root Block * Root QC: votes from consensus nodes for the root block (required to start consensus) @@ -39,12 +48,6 @@ The bootstrapping will generate the following information: * Root Block Seal: block seal for the initial execution result -#### Root Blocks for Collector clusters -_Each cluster_ of collector nodes needs to have its own root Block and root QC -* Root `ClusterBlockProposal` -* Root QC from cluster for their respective `ClusterBlockProposal` - - # Usage `go run ./cmd/bootstrap` prints usage information @@ -97,6 +100,8 @@ Each input is a config file specified as a command line parameter: * folder containing the `.node-info.pub.json` files for _all_ partner nodes (see `.example_files/partner-node-infos`) * `json` containing the weight value for all partner nodes (see `./example_files/partner-weights.json`). Format: ```: ``` +* random seed for the new collector node clustering and epoch RandomSource (min 32 bytes in hex encoding) + Provided seeds should be derived from a verifiable random source, such as the previous epoch's RandomSource. #### Example ```bash @@ -121,6 +126,19 @@ go run . keygen \ ``` +```bash +go run . cluster-assignment \ + --epoch-counter 0 \ + --collection-clusters 1 \ + --clustering-random-seed 00000000000000000000000000000000000000000000000000000000deadbeef \ + --config ./bootstrap-example/node-config.json \ + -o ./bootstrap-example \ + --partner-dir ./example_files/partner-node-infos \ + --partner-weights ./example_files/partner-weights.json \ + --internal-priv-dir ./bootstrap-example/keys + +``` + ```bash go run . rootblock \ --root-chain bench \ @@ -131,15 +149,19 @@ go run . rootblock \ --epoch-length 30000 \ --epoch-staking-phase-length 20000 \ --epoch-dkg-phase-length 2000 \ + --random-seed 00000000000000000000000000000000000000000000000000000000deadbeef \ --collection-clusters 1 \ --protocol-version=0 \ --use-default-epoch-timing \ - --epoch-commit-safety-threshold=1000 \ + --kvstore-finalization-safety-threshold=1000 \ + --kvstore-epoch-extension-view-count=2000 \ --config ./bootstrap-example/node-config.json \ -o ./bootstrap-example \ --partner-dir ./example_files/partner-node-infos \ --partner-weights ./example_files/partner-weights.json \ - --internal-priv-dir ./bootstrap-example/keys + --internal-priv-dir ./bootstrap-example/keys \ + --intermediary-clustering-data ./bootstrap-example/public-root-information/root-clustering.json \ + --cluster-votes-dir ./bootstrap-example/public-root-information/root-block-votes/ ``` ```bash @@ -187,14 +209,6 @@ go run . finalize \ * file `dkg-data.pub.json` - REQUIRED at NODE START by all nodes -* file `.root-cluster-block.json` - - root `ClusterBlockProposal` for collector cluster with ID `` - - REQUIRED at NODE START by all collectors of the respective cluster - - file can be made accessible to all nodes at boot up (or recovery after crash) -* file `.root-cluster-qc.json` - - root Quorum Certificate for `ClusterBlockProposal` for collector cluster with ID `` - - REQUIRED at NODE START by all collectors of the respective cluster - - file can be made accessible to all nodes at boot up (or recovery after crash) ## Generating networking key for Observer diff --git a/cmd/bootstrap/cmd/block.go b/cmd/bootstrap/cmd/block.go index 475cc55f32f..db7d9facef4 100644 --- a/cmd/bootstrap/cmd/block.go +++ b/cmd/bootstrap/cmd/block.go @@ -58,10 +58,10 @@ func constructRootEpochEvents( clusterQCs []*flow.QuorumCertificate, dkgData dkg.ThresholdKeySet, dkgIndexMap flow.DKGIndexMap, - csprg random.Rand, + rng random.Rand, ) (*flow.EpochSetup, *flow.EpochCommit, error) { randomSource := make([]byte, flow.EpochSetupRandomSourceLength) - csprg.Read(randomSource) + rng.Read(randomSource) epochSetup, err := flow.NewEpochSetup( flow.UntrustedEpochSetup{ Counter: flagEpochCounter, diff --git a/cmd/bootstrap/cmd/clustering.go b/cmd/bootstrap/cmd/clustering.go new file mode 100644 index 00000000000..1c331437712 --- /dev/null +++ b/cmd/bootstrap/cmd/clustering.go @@ -0,0 +1,180 @@ +package cmd + +import ( + "fmt" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" + hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" + model "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" + cluster2 "github.com/onflow/flow-go/state/cluster" + "github.com/onflow/flow-go/state/protocol/prg" +) + +var ( + flagClusteringRandomSeed []byte +) + +// clusterAssignmentCmd represents the clusterAssignment command +var clusterAssignmentCmd = &cobra.Command{ + Use: "cluster-assignment", + Short: "Generate cluster assignment", + Long: `Generate cluster assignment for collection nodes based on partner and internal node info and weights. Serialize into file with Epoch Counter`, + Run: clusterAssignment, +} + +func init() { + rootCmd.AddCommand(clusterAssignmentCmd) + addClusterAssignmentCmdFlags() +} + +func addClusterAssignmentCmdFlags() { + // required parameters for network configuration and generation of root node identities + clusterAssignmentCmd.Flags().StringVar(&flagConfig, "config", "", + "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") + clusterAssignmentCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ + "containing the output from the `keygen` command for internal nodes") + clusterAssignmentCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-dir", "", "path to directory "+ + "containing one JSON file starting with node-info.pub..json for every partner node (fields "+ + " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey)") + clusterAssignmentCmd.Flags().StringVar(&flagPartnerWeights, "partner-weights", "", "path to a JSON file containing "+ + "a map from partner node's NodeID to their stake") + + cmd.MarkFlagRequired(clusterAssignmentCmd, "config") + cmd.MarkFlagRequired(clusterAssignmentCmd, "internal-priv-dir") + cmd.MarkFlagRequired(clusterAssignmentCmd, "partner-dir") + cmd.MarkFlagRequired(clusterAssignmentCmd, "partner-weights") + + // optional parameters for cluster assignment + clusterAssignmentCmd.Flags().UintVar(&flagCollectionClusters, "collection-clusters", 2, "number of collection clusters") + + // required parameters for generation of cluster root blocks + clusterAssignmentCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "epoch counter for the epoch beginning with the root block") + cmd.MarkFlagRequired(clusterAssignmentCmd, "epoch-counter") + + clusterAssignmentCmd.Flags().BytesHexVar(&flagClusteringRandomSeed, "clustering-random-seed", nil, "random seed to generate the clustering assignment") + cmd.MarkFlagRequired(clusterAssignmentCmd, "clustering-random-seed") + +} + +func clusterAssignment(cmd *cobra.Command, args []string) { + // Read partner node's information and internal node's information. + // With "internal nodes" we reference nodes, whose private keys we have. In comparison, + // for "partner nodes" we generally do not have their keys. However, we allow some overlap, + // in that we tolerate a configuration where information about an "internal node" is also + // duplicated in the list of "partner nodes". + log.Info().Msg("collecting partner network and staking keys") + rawPartnerNodes, err := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full partner node infos") + } + log.Info().Msg("") + + log.Info().Msg("generating internal private networking and staking keys") + internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full internal node infos") + } + log.Info().Msg("") + + // we now convert to the strict meaning of: "internal nodes" vs "partner nodes" + // • "internal nodes" we have they private keys for + // • "partner nodes" we don't have the keys for + // • both sets are disjoint (no common nodes) + log.Info().Msg("remove internal partner nodes") + partnerNodes := common.FilterInternalPartners(rawPartnerNodes, internalNodes) + log.Info().Msgf("removed %d internal partner nodes", len(rawPartnerNodes)-len(partnerNodes)) + + log.Info().Msg("checking constraints on consensus nodes") + checkConstraints(partnerNodes, internalNodes) + log.Info().Msg("") + + log.Info().Msg("assembling network and staking keys") + stakingNodes, err := mergeNodeInfos(internalNodes, partnerNodes) + if err != nil { + log.Fatal().Err(err).Msgf("failed to merge node infos") + } + publicInfo, err := model.ToPublicNodeInfoList(stakingNodes) + if err != nil { + log.Fatal().Msg("failed to read public node info") + } + err = common.WriteJSON(model.PathNodeInfosPub, flagOutdir, publicInfo) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfosPub) + log.Info().Msg("") + + // Convert to IdentityList + partnerList := model.ToIdentityList(partnerNodes) + internalList := model.ToIdentityList(internalNodes) + + clusteringPrg, err := prg.New(flagClusteringRandomSeed, prg.BootstrapClusterAssignment, nil) + if err != nil { + log.Fatal().Err(err).Msg("failed to initialize pseudorandom generator") + } + + log.Info().Msg("computing collection node clusters") + assignments, clusters, err := common.ConstructClusterAssignment(log, partnerList, internalList, int(flagCollectionClusters), clusteringPrg) + if err != nil { + log.Fatal().Err(err).Msg("unable to generate cluster assignment") + } + log.Info().Msg("") + + // Output assignment with epoch counter + output := IntermediaryClusteringData{ + EpochCounter: flagEpochCounter, + Assignments: assignments, + Clusters: clusters, + } + err = common.WriteJSON(model.PathClusteringData, flagOutdir, output) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathClusteringData) + log.Info().Msg("") + + log.Info().Msg("constructing and writing cluster block votes for internal nodes") + constructClusterRootVotes( + output, + model.FilterByRole(internalNodes, flow.RoleCollection), + ) + log.Info().Msg("") +} + +// constructClusterRootVotes generates and writes vote files for internal collector nodes with private keys available. +func constructClusterRootVotes(data IntermediaryClusteringData, internalCollectors []model.NodeInfo) { + for i := range data.Clusters { + clusterRootBlock, err := cluster2.CanonicalRootBlock(data.EpochCounter, data.Assignments[i]) + if err != nil { + log.Fatal().Err(err).Msg("could not construct cluster root block") + } + block := hotstuff.GenesisBlockFromFlow(clusterRootBlock.ToHeader()) + // collate private NodeInfos for internal nodes in this cluster + signers := make([]model.NodeInfo, 0) + for _, nodeID := range data.Assignments[i] { + for _, node := range internalCollectors { + if node.NodeID == nodeID { + signers = append(signers, node) + } + } + } + votes, err := run.CreateClusterRootBlockVotes(signers, block) + if err != nil { + log.Fatal().Err(err).Msg("could not create cluster root block votes") + } + for _, vote := range votes { + path := filepath.Join(model.DirnameRootBlockVotes, fmt.Sprintf(model.FilenameRootClusterBlockVote, vote.SignerID)) + err = common.WriteJSON(path, flagOutdir, vote) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, path) + } + } +} diff --git a/cmd/bootstrap/cmd/finalize.go b/cmd/bootstrap/cmd/finalize.go index 74d526d2845..4de0a5f167a 100644 --- a/cmd/bootstrap/cmd/finalize.go +++ b/cmd/bootstrap/cmd/finalize.go @@ -31,11 +31,9 @@ import ( ) var ( - flagConfig string - flagInternalNodePrivInfoDir string - flagPartnerNodeInfoDir string - // Deprecated: use flagPartnerWeights instead - deprecatedFlagPartnerStakes string + flagConfig string + flagInternalNodePrivInfoDir string + flagPartnerNodeInfoDir string flagPartnerWeights string flagDKGDataPath string flagRootBlockPath string @@ -70,8 +68,6 @@ func addFinalizeCmdFlags() { finalizeCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-dir", "", "path to directory "+ "containing one JSON file starting with node-info.pub..json for every partner node (fields "+ " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey, StakingKeyPoP)") - // Deprecated: remove this flag - finalizeCmd.Flags().StringVar(&deprecatedFlagPartnerStakes, "partner-stakes", "", "deprecated: use partner-weights instead") finalizeCmd.Flags().StringVar(&flagPartnerWeights, "partner-weights", "", "path to a JSON file containing "+ "a map from partner node's NodeID to their weight") finalizeCmd.Flags().StringVar(&flagDKGDataPath, "dkg-data", "", "path to a JSON file containing data as output from the random beacon key generation") @@ -102,17 +98,6 @@ func addFinalizeCmdFlags() { } func finalize(cmd *cobra.Command, args []string) { - - // maintain backward compatibility with old flag name - if deprecatedFlagPartnerStakes != "" { - log.Warn().Msg("using deprecated flag --partner-stakes (use --partner-weights instead)") - if flagPartnerWeights == "" { - flagPartnerWeights = deprecatedFlagPartnerStakes - } else { - log.Fatal().Msg("cannot use both --partner-stakes and --partner-weights flags (use only --partner-weights)") - } - } - log.Info().Msg("collecting partner network and staking keys") partnerNodes, err := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) if err != nil { diff --git a/cmd/bootstrap/cmd/finalize_test.go b/cmd/bootstrap/cmd/finalize_test.go index 3a5e8c9dff0..c68bb4e593c 100644 --- a/cmd/bootstrap/cmd/finalize_test.go +++ b/cmd/bootstrap/cmd/finalize_test.go @@ -68,12 +68,19 @@ func TestFinalize_HappyPath(t *testing.T) { flagPartnerWeights = partnerWeights flagInternalNodePrivInfoDir = internalPrivDir + flagIntermediaryClusteringDataPath = filepath.Join(bootDir, model.PathClusteringData) + flagRootClusterBlockVotesDir = filepath.Join(bootDir, model.DirnameRootBlockVotes) + flagEpochCounter = epochCounter + + // clusterAssignment will generate the collector clusters + // In addition, it also generates votes from internal collector nodes + clusterAssignment(clusterAssignmentCmd, nil) + flagRootChain = chainName flagRootParent = hex.EncodeToString(rootParent[:]) flagRootHeight = rootHeight flagRootView = 1_000 flagRootCommit = hex.EncodeToString(rootCommit[:]) - flagEpochCounter = epochCounter flagNumViewsInEpoch = 100_000 flagNumViewsInStakingAuction = 50_000 flagNumViewsInDKGPhase = 2_000 diff --git a/cmd/bootstrap/cmd/genconfig.go b/cmd/bootstrap/cmd/genconfig.go index f1902778f3a..771581bce80 100644 --- a/cmd/bootstrap/cmd/genconfig.go +++ b/cmd/bootstrap/cmd/genconfig.go @@ -17,24 +17,11 @@ var ( flagNodesConsensus int flagNodesExecution int flagNodesVerification int - // Deprecated: use flagWeight instead - deprecatedFlagStake uint64 - flagWeight uint64 + flagWeight uint64 ) // genconfigCmdRun generates the node-config.json file func genconfigCmdRun(_ *cobra.Command, _ []string) { - - // maintain backward compatibility with old flag name - if deprecatedFlagStake != 0 { - log.Warn().Msg("using deprecated flag --stake (use --weight instead)") - if flagWeight == 0 { - flagWeight = deprecatedFlagStake - } else { - log.Fatal().Msg("cannot use both --stake and --weight flags (use only --weight)") - } - } - if flagWeight != flow.DefaultInitialWeight { log.Warn().Msgf("using non-standard initial weight %d!=%d - make sure this is desired", flagWeight, flow.DefaultInitialWeight) } @@ -85,7 +72,6 @@ func init() { genconfigCmd.Flags().IntVar(&flagNodesExecution, "execution", 2, "number of execution nodes") genconfigCmd.Flags().IntVar(&flagNodesVerification, "verification", 1, "number of verification nodes") genconfigCmd.Flags().Uint64Var(&flagWeight, "weight", flow.DefaultInitialWeight, "weight for all nodes") - genconfigCmd.Flags().Uint64Var(&deprecatedFlagStake, "stake", 0, "deprecated: use --weight") } func createConf(r flow.Role, i int) model.NodeConfig { diff --git a/cmd/bootstrap/cmd/intermediary.go b/cmd/bootstrap/cmd/intermediary.go index 00f498e6254..6f7c4f8c25f 100644 --- a/cmd/bootstrap/cmd/intermediary.go +++ b/cmd/bootstrap/cmd/intermediary.go @@ -6,7 +6,7 @@ import ( ) // IntermediaryBootstrappingData stores data which needs to be passed between the -// 2 steps of the bootstrapping process: `rootblock` and `finalize`. +// last 2 steps of the bootstrapping process: `rootblock` and `finalize`. // This structure is created in `rootblock`, written to disk, then read in `finalize`. type IntermediaryBootstrappingData struct { IntermediaryParamsData @@ -31,3 +31,12 @@ type IntermediaryEpochData struct { RootEpochSetup *flow.EpochSetup RootEpochCommit *flow.EpochCommit } + +// IntermediaryClusteringData stores the collector cluster assignment and epoch counter. +// This is used for the collection nodes to construct and vote on their cluster root blocks, +// and also to pass data between the clustering command and the rootblock command. +type IntermediaryClusteringData struct { + EpochCounter uint64 + Assignments flow.AssignmentList + Clusters flow.ClusterList +} diff --git a/cmd/bootstrap/cmd/rootblock.go b/cmd/bootstrap/cmd/rootblock.go index 01b4d8de0c5..37755e45975 100644 --- a/cmd/bootstrap/cmd/rootblock.go +++ b/cmd/bootstrap/cmd/rootblock.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "strconv" + "strings" "time" "github.com/onflow/cadence" @@ -12,7 +13,9 @@ import ( "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/bootstrap/utils" "github.com/onflow/flow-go/cmd/util/cmd/common" + hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/model/encodable" @@ -43,11 +46,15 @@ var ( flagNumViewsInEpoch uint64 flagNumViewsInStakingAuction uint64 flagNumViewsInDKGPhase uint64 + flagEpochRandomSeed []byte // Epoch target end time config flagUseDefaultEpochTargetEndTime bool flagEpochTimingRefCounter uint64 flagEpochTimingRefTimestamp uint64 flagEpochTimingDuration uint64 + + flagIntermediaryClusteringDataPath string + flagRootClusterBlockVotesDir string ) // rootBlockCmd represents the rootBlock command @@ -72,7 +79,6 @@ func addRootBlockCmdFlags() { rootBlockCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-dir", "", "path to directory "+ "containing one JSON file starting with node-info.pub..json for every partner node (fields "+ " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey)") - rootBlockCmd.Flags().StringVar(&deprecatedFlagPartnerStakes, "partner-stakes", "", "deprecated: use --partner-weights") rootBlockCmd.Flags().StringVar(&flagPartnerWeights, "partner-weights", "", "path to a JSON file containing "+ "a map from partner node's NodeID to their stake") @@ -106,11 +112,17 @@ func addRootBlockCmdFlags() { rootBlockCmd.Flags().Uint64Var(&flagEpochExtensionViewCount, "kvstore-epoch-extension-view-count", 0, "length of epoch extension in views, default is 100_000 which is approximately 1 day") rootBlockCmd.Flags().StringVar(&flagKVStoreVersion, "kvstore-version", "default", "protocol state KVStore version to initialize ('default' or an integer equal to a supported protocol version: '0', '1', '2', ...)") + rootBlockCmd.Flags().BytesHexVar(&flagEpochRandomSeed, "random-seed", nil, "random seed") + rootBlockCmd.Flags().StringVar(&flagIntermediaryClusteringDataPath, "intermediary-clustering-data", "", "path to a JSON file containing intermediary clustering data generated by the clustering command") + rootBlockCmd.Flags().StringVar(&flagRootClusterBlockVotesDir, "cluster-votes-dir", "", "directory containing votes for root cluster blocks") cmd.MarkFlagRequired(rootBlockCmd, "root-chain") cmd.MarkFlagRequired(rootBlockCmd, "root-parent") cmd.MarkFlagRequired(rootBlockCmd, "root-height") cmd.MarkFlagRequired(rootBlockCmd, "root-view") + cmd.MarkFlagRequired(rootBlockCmd, "random-seed") + cmd.MarkFlagRequired(rootBlockCmd, "intermediary-clustering-data") + cmd.MarkFlagRequired(rootBlockCmd, "cluster-votes-dir") // Epoch timing config - these values must be set identically to `EpochTimingConfig` in the FlowEpoch smart contract. // See https://github.com/onflow/flow-core-contracts/blob/240579784e9bb8d97d91d0e3213614e25562c078/contracts/epochs/FlowEpoch.cdc#L259-L266 @@ -133,15 +145,6 @@ func addRootBlockCmdFlags() { } func rootBlock(cmd *cobra.Command, args []string) { - // maintain backward compatibility with old flag name - if deprecatedFlagPartnerStakes != "" { - log.Warn().Msg("using deprecated flag --partner-stakes (use --partner-weights instead)") - if flagPartnerWeights == "" { - flagPartnerWeights = deprecatedFlagPartnerStakes - } else { - log.Fatal().Msg("cannot use both --partner-stakes and --partner-weights flags (use only --partner-weights)") - } - } if deprecatedFlagProtocolVersion != 0 { log.Warn().Msg("using deprecated flag --protocol-version; please remove this flag from your workflow, it is ignored and will be removed in a future release") } @@ -224,16 +227,6 @@ func rootBlock(cmd *cobra.Command, args []string) { if err != nil { log.Fatal().Err(err).Msgf("failed to merge node infos") } - publicInfo, err := model.ToPublicNodeInfoList(stakingNodes) - if err != nil { - log.Fatal().Msg("failed to read public node info") - } - err = common.WriteJSON(model.PathNodeInfosPub, flagOutdir, publicInfo) - if err != nil { - log.Fatal().Err(err).Msg("failed to write json") - } - log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfosPub) - log.Info().Msg("") log.Info().Msg("running DKG for consensus nodes") randomBeaconData, dkgIndexMap := runBeaconKG(model.FilterByRole(stakingNodes, flow.RoleConsensus)) @@ -242,30 +235,22 @@ func rootBlock(cmd *cobra.Command, args []string) { // create flow.IdentityList representation of the participant set participants := model.ToIdentityList(stakingNodes).Sort(flow.Canonical[flow.Identity]) - // use system randomness to create cluster assignment - // TODO(7848): use randomness provided from the command line - var randomSeed [32]byte - _, err = rand.Read(randomSeed[:]) - if err != nil { - log.Fatal().Err(err).Msg("unable to generate random seed") - } - clusteringPrg, err := prg.New(randomSeed[:], prg.BootstrapClusterAssignment, nil) - if err != nil { - log.Fatal().Err(err).Msg("unable to initialize pseudorandom generator") - } - log.Info().Msg("computing collection node clusters") - assignments, clusters, err := common.ConstructClusterAssignment(log, model.ToIdentityList(partnerNodes), model.ToIdentityList(internalNodes), int(flagCollectionClusters), clusteringPrg) - if err != nil { - log.Fatal().Err(err).Msg("unable to generate cluster assignment") + // read the previously created cluster assignment (see cmd/bootstrap/cmd/clustering.go) + clusteringData := readIntermediaryClusteringData() + if flagEpochCounter != clusteringData.EpochCounter { + log.Fatal().Msgf("epoch counter does not match the one used to generate collector clusters") } + clusters := clusteringData.Clusters + log.Info().Msg("reading votes for collection node cluster root blocks") + votes := readClusterBlockVotes(clusteringData.Assignments) log.Info().Msg("") log.Info().Msg("constructing root blocks for collection node clusters") - clusterBlocks := run.GenerateRootClusterBlocks(flagEpochCounter, clusters) + clusterBlocks := run.GenerateRootClusterBlocks(clusteringData.EpochCounter, clusters) log.Info().Msg("") log.Info().Msg("constructing root QCs for collection node clusters") - clusterQCs := run.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) + clusterQCs := run.ConstructClusterRootQCsFromVotes(log, clusters, internalNodes, clusterBlocks, votes) log.Info().Msg("") log.Info().Msg("constructing root header") @@ -275,13 +260,13 @@ func rootBlock(cmd *cobra.Command, args []string) { } log.Info().Msg("") - // use the same randomness for the RandomSource of the new epoch - randomSourcePrg, err := prg.New(randomSeed[:], prg.BootstrapEpochRandomSource, nil) + // use provided randomness for the RandomSource of the new epoch + randomSourcePrg, err := prg.New(flagEpochRandomSeed, prg.BootstrapEpochRandomSource, nil) if err != nil { log.Fatal().Err(err).Msg("failed to initialize pseudorandom generator") } log.Info().Msg("constructing intermediary bootstrapping data") - epochSetup, epochCommit, err := constructRootEpochEvents(headerBody.View, participants, assignments, clusterQCs, randomBeaconData, dkgIndexMap, randomSourcePrg) + epochSetup, epochCommit, err := constructRootEpochEvents(headerBody.View, participants, clusteringData.Assignments, clusterQCs, randomBeaconData, dkgIndexMap, randomSourcePrg) if err != nil { log.Fatal().Err(err).Msg("failed to construct root epoch events") } @@ -388,6 +373,54 @@ func validateEpochConfig() error { return nil } +// readIntermediaryClusteringData reads intermediary clustering data file from disk. +// This file needs to be prepared with the clustering bootstrap command +func readIntermediaryClusteringData() *IntermediaryClusteringData { + intermediaryData, err := utils.ReadData[IntermediaryClusteringData](flagIntermediaryClusteringDataPath) + if err != nil { + log.Fatal().Err(err).Msg("could not read clustering data, was the clustering command run?") + } + return intermediaryData +} + +// readClusterBlockVotes reads votes for root cluster blocks. +// It sorts the votes into the appropriate clusters according to the given assignment list. +// The returned list of votes is in cluster index order (first list of votes is for cluster 0, etc.) +func readClusterBlockVotes(al flow.AssignmentList) [][]*hotstuff.Vote { + votes := make([][]*hotstuff.Vote, len(al)) + files, err := common.FilesInDir(flagRootClusterBlockVotesDir) + if err != nil { + log.Fatal().Err(err).Msg("could not read cluster block votes") + } + for _, f := range files { + // skip files that do not include node-infos + if !strings.Contains(f, model.FilenameClusterBlockVotePrefix) { + continue + } + + // read file and append to partners + var vote hotstuff.Vote + err = common.ReadJSON(f, &vote) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + log.Info().Msgf("read vote %v for block %v from signerID %v", vote.ID(), vote.BlockID, vote.SignerID) + + // put the vote in the correct bucket for its cluster + found := false + for i, cluster := range al { + if cluster.Contains(vote.SignerID) { + votes[i] = append(votes[i], &vote) + found = true + } + } + if !found { + log.Fatal().Msgf("Halting because found vote for block %v from signerID %v not part of the assignment", vote.BlockID, vote.SignerID) + } + } + return votes +} + // generateExecutionStateEpochConfig generates epoch-related configuration used // to generate an empty root execution state. This config is generated in the // `rootblock` alongside the root epoch and root protocol state ID for consistency. @@ -414,7 +447,7 @@ func generateExecutionStateEpochConfig( NumViewsInEpoch: cadence.UInt64(flagNumViewsInEpoch), NumViewsInStakingAuction: cadence.UInt64(flagNumViewsInStakingAuction), NumViewsInDKGPhase: cadence.UInt64(flagNumViewsInDKGPhase), - NumCollectorClusters: cadence.UInt16(flagCollectionClusters), + NumCollectorClusters: cadence.UInt16(len(clusterQCs)), RandomSource: cdcRandomSource, CollectorClusters: epochSetup.Assignments, ClusterQCs: clusterQCs, diff --git a/cmd/bootstrap/cmd/rootblock_test.go b/cmd/bootstrap/cmd/rootblock_test.go index c51c4f3d50d..186aa083294 100644 --- a/cmd/bootstrap/cmd/rootblock_test.go +++ b/cmd/bootstrap/cmd/rootblock_test.go @@ -28,14 +28,14 @@ const rootBlockHappyPathLogs = "collecting partner network and staking keys" + `removed 0 internal partner nodes` + `checking constraints on consensus nodes` + `assembling network and staking keys` + - `wrote file \S+/node-infos.pub.json` + `running DKG for consensus nodes` + `read \d+ node infos for DKG` + `will run DKG` + `finished running DKG` + `.+/random-beacon.priv.json` + `wrote file \S+/root-dkg-data.priv.json` + - `computing collection node clusters` + + `reading votes for collection node cluster root blocks` + + `read vote .+` + `constructing root blocks for collection node clusters` + `constructing root QCs for collection node clusters` + `producing QC for cluster .*` + @@ -59,11 +59,14 @@ func setupHappyPathFlags(bootDir, partnerDir, partnerWeights, internalPrivDir, c flagPartnerWeights = partnerWeights flagInternalNodePrivInfoDir = internalPrivDir + flagIntermediaryClusteringDataPath = filepath.Join(bootDir, model.PathClusteringData) + flagRootClusterBlockVotesDir = filepath.Join(bootDir, model.DirnameRootBlockVotes) + flagEpochCounter = 0 + flagRootParent = hex.EncodeToString(rootParent[:]) flagRootChain = "main" flagRootHeight = 12332 flagRootView = 1000 - flagEpochCounter = 0 flagNumViewsInEpoch = 100_000 flagNumViewsInStakingAuction = 50_000 flagNumViewsInDKGPhase = 2_000 @@ -81,6 +84,10 @@ func TestRootBlock_HappyPath(t *testing.T) { utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { setupHappyPathFlags(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath) + // clusterAssignment will generate the collector clusters + // In addition, it also generates votes from internal collector nodes + clusterAssignment(clusterAssignmentCmd, nil) + // KV store values (epoch extension view count and finalization safety threshold) must be explicitly set for mainnet require.NoError(t, rootBlockCmd.Flags().Set("kvstore-finalization-safety-threshold", "1000")) require.NoError(t, rootBlockCmd.Flags().Set("kvstore-epoch-extension-view-count", "100000")) diff --git a/cmd/bootstrap/run/cluster_block.go b/cmd/bootstrap/run/cluster_block.go index db3e0a9220c..4401f8b7cc2 100644 --- a/cmd/bootstrap/run/cluster_block.go +++ b/cmd/bootstrap/run/cluster_block.go @@ -16,7 +16,7 @@ func GenerateRootClusterBlocks(epoch uint64, clusters flow.ClusterList) []*clust panic(fmt.Sprintf("failed to get cluster by index: %v", i)) } - rootBlock, err := clusterstate.CanonicalRootBlock(epoch, cluster) + rootBlock, err := clusterstate.CanonicalRootBlock(epoch, cluster.NodeIDs()) if err != nil { panic(fmt.Errorf("failed to get canonical root block: %w", err)) } diff --git a/cmd/bootstrap/run/cluster_qc.go b/cmd/bootstrap/run/cluster_qc.go index 185e4d43c2d..83d0a779e14 100644 --- a/cmd/bootstrap/run/cluster_qc.go +++ b/cmd/bootstrap/run/cluster_qc.go @@ -25,10 +25,19 @@ func GenerateClusterRootQC(signers []bootstrap.NodeInfo, allCommitteeMembers flo clusterRootBlock := model.GenesisBlockFromFlow(clusterBlock.ToHeader()) // STEP 1: create votes for cluster root block - votes, err := createRootBlockVotes(signers, clusterRootBlock) + votes, err := CreateClusterRootBlockVotes(signers, clusterRootBlock) if err != nil { return nil, err } + return GenerateClusterRootQCFromVotes(signers, allCommitteeMembers, clusterBlock, votes) +} + +// GenerateClusterRootQCFromVotes generates a QC from the provided votes based on participant data +func GenerateClusterRootQCFromVotes(signers []bootstrap.NodeInfo, allCommitteeMembers flow.IdentitySkeletonList, clusterBlock *cluster.Block, votes []*model.Vote) (*flow.QuorumCertificate, error) { + if !allCommitteeMembers.Sorted(flow.Canonical[flow.IdentitySkeleton]) { + return nil, fmt.Errorf("can't create root cluster QC: committee members are not sorted in canonical order") + } + clusterRootBlock := model.GenesisBlockFromFlow(clusterBlock.ToHeader()) // STEP 1.5: patch committee to include dynamic identities. This is a temporary measure until bootstrapping is refactored. // We need a Committee for creating the cluster's root QC and the Committee requires dynamic identities to be instantiated. @@ -85,8 +94,8 @@ func createClusterValidator(committee hotstuff.DynamicCommittee) (hotstuff.Valid return hotstuffValidator, nil } -// createRootBlockVotes generates a vote for the rootBlock from each participant -func createRootBlockVotes(participants []bootstrap.NodeInfo, rootBlock *model.Block) ([]*model.Vote, error) { +// CreateClusterRootBlockVotes generates a vote for the rootBlock from each participant +func CreateClusterRootBlockVotes(participants []bootstrap.NodeInfo, rootBlock *model.Block) ([]*model.Vote, error) { votes := make([]*model.Vote, 0, len(participants)) for _, participant := range participants { // create the participant's local identity diff --git a/cmd/bootstrap/run/epochs.go b/cmd/bootstrap/run/epochs.go index 9b9f42266ec..fcd24f58e05 100644 --- a/cmd/bootstrap/run/epochs.go +++ b/cmd/bootstrap/run/epochs.go @@ -9,9 +9,8 @@ import ( "github.com/rs/zerolog" "golang.org/x/exp/slices" - "github.com/onflow/flow-go/state/protocol/prg" - "github.com/onflow/flow-go/cmd/util/cmd/common" + hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" @@ -19,6 +18,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/prg" ) // GenerateRecoverEpochTxArgs generates the required transaction arguments for the `recoverEpoch` transaction. @@ -303,8 +303,7 @@ func GenerateRecoverTxArgsWithDKG( // - nodeInfos: list of NodeInfos (must contain all internal nodes) // - clusterBlocks: list of root blocks (one for each cluster) // Returns: -// - flow.AssignmentList: the generated assignment list. -// - flow.ClusterList: the generate collection cluster list. +// - The list of quorum certificates for all clusters. func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate { if len(clusterBlocks) != len(clusterList) { log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)). @@ -326,6 +325,42 @@ func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterLis return qcs } +// ConstructClusterRootQCsFromVotes constructs a root QC for each cluster in the list, based on the provided votes. +// Args: +// - log: the logger instance. +// - clusterList: list of clusters +// - nodeInfos: list of NodeInfos (must contain all internal nodes) +// - clusterBlocks: list of root blocks (one for each cluster) +// - votes: lists of votes for each cluster (one list for each cluster) +// Returns: +// - the list of quorum certificates for all clusters +func ConstructClusterRootQCsFromVotes(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block, votes [][]*hotstuff.Vote) []*flow.QuorumCertificate { + if len(clusterBlocks) != len(clusterList) { + log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)). + Msg("number of clusters needs to equal number of cluster blocks") + } + if len(votes) != len(clusterList) { + log.Fatal().Int("len(votes)", len(votes)).Int("len(clusterList)", len(clusterList)). + Msg("number of groups of votes needs to equal number of clusters") + } + + qcs := make([]*flow.QuorumCertificate, len(clusterBlocks)) + for i, clusterMembers := range clusterList { + clusterBlock := clusterBlocks[i] + clusterVotes := votes[i] + signers := filterClusterSigners(clusterMembers, nodeInfos) + log.Info().Msgf("producing QC for cluster (index: %d, size: %d) from %d votes", i, len(clusterMembers), len(votes)) + + qc, err := GenerateClusterRootQCFromVotes(signers, clusterMembers, clusterBlock, clusterVotes) + if err != nil { + log.Fatal().Err(err).Int("cluster index", i).Msg("generating collector cluster root QC failed") + } + qcs[i] = qc + } + + return qcs +} + // Filters a list of nodes to include only nodes that will sign the QC for the // given cluster. The resulting list of nodes is only nodes that are in the // given cluster AND are not partner nodes (ie. we have the private keys). diff --git a/cmd/bootstrap/transit/README.md b/cmd/bootstrap/transit/README.md index 3ff8e94cb97..8173dedd5df 100644 --- a/cmd/bootstrap/transit/README.md +++ b/cmd/bootstrap/transit/README.md @@ -2,7 +2,8 @@ The transit script is an utility used by node operators to upload and download relevant data before and after a Flow spork. It is used to download the root snapshot after a spork. -Additionally, for a consensus node, it is used to upload transit keys and to submit root block votes. +Additionally, for a consensus node, it is used to upload transit keys and to submit root block votes, +and for a collection node, it is used to submit cluster root block votes. ## Server token @@ -83,3 +84,30 @@ Running `transit push-transit-key` will perform the following actions: - `transit-key.priv.` 1. Upload the node's public files to the server - `transit-key.pub.` + +## Collection nodes + +The transit script has three commands applicable to collection nodes: + +```shell +$ transit pull-clustering -t ${server-token} -d ${bootstrap-dir} +$ transit generate-cluster-block-vote -t ${server-token} -d ${bootstrap-dir} +$ transit push-cluster-block-vote -t ${server-token} -d ${bootstrap-dir} -v ${vote-file} +``` + +### Pull Clustering Assignment + +Running `transit pull-clustering` will perform the following actions: + +1. Fetch the assignment of collection nodes to clusters for the upcoming spork and write it to `/public-root-information/root-clustering.json` + +### Sign Cluster Root Block + +After the root block and random beacon key have been fetched, running `transit generate-cluster-block-vote` will: + +1. Create a signature over the cluster root block, for the cluster the node is assigned to, using the node's private staking key. +2. Store the resulting vote to the file `/private-root-information/private-node-info_/root-cluster-block-vote.json` + +### Upload Vote + +Once a vote has been generated, running `transit push-cluster-block-vote` will upload the vote file to the server. diff --git a/cmd/bootstrap/transit/cmd/generate_cluster_block_vote.go b/cmd/bootstrap/transit/cmd/generate_cluster_block_vote.go new file mode 100644 index 00000000000..87346c8a9fc --- /dev/null +++ b/cmd/bootstrap/transit/cmd/generate_cluster_block_vote.go @@ -0,0 +1,121 @@ +package cmd + +import ( + "encoding/json" + "fmt" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd" + cmd2 "github.com/onflow/flow-go/cmd/bootstrap/cmd" + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/consensus/hotstuff/verification" + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/local" + "github.com/onflow/flow-go/state/cluster" + "github.com/onflow/flow-go/utils/io" +) + +var generateClusterVoteCmd = &cobra.Command{ + Use: "generate-cluster-block-vote", + Short: "Generate cluster block vote", + Run: generateClusterVote, +} + +func init() { + rootCmd.AddCommand(generateClusterVoteCmd) + addGenerateClusterVoteCmdFlags() +} + +func addGenerateClusterVoteCmdFlags() { + generateClusterVoteCmd.Flags().StringVarP(&flagOutputDir, "outputDir", "o", "", "ouput directory for vote files; if not set defaults to bootstrap directory") +} + +func generateClusterVote(c *cobra.Command, args []string) { + log.Info().Msg("generating root block vote") + + nodeIDString, err := readNodeID() + if err != nil { + log.Fatal().Err(err).Msg("could not read node ID") + } + + nodeID, err := flow.HexStringToIdentifier(nodeIDString) + if err != nil { + log.Fatal().Err(err).Msg("could not parse node ID") + } + + nodeInfo, err := cmd.LoadPrivateNodeInfo(flagBootDir, nodeID) + if err != nil { + log.Fatal().Err(err).Msg("could not load private node info") + } + + stakingPrivKey := nodeInfo.StakingPrivKey.PrivateKey + identity := flow.IdentitySkeleton{ + NodeID: nodeID, + Address: nodeInfo.Address, + Role: nodeInfo.Role, + InitialWeight: flow.DefaultInitialWeight, + StakingPubKey: stakingPrivKey.PublicKey(), + NetworkPubKey: nodeInfo.NetworkPrivKey.PrivateKey.PublicKey(), + } + + me, err := local.New(identity, nodeInfo.StakingPrivKey.PrivateKey) + if err != nil { + log.Fatal().Err(err).Msg("creating local signer abstraction failed") + } + + path := filepath.Join(flagBootDir, bootstrap.PathClusteringData) + // If output directory is specified, use it for the root-clustering.json + if flagOutputDir != "" { + path = filepath.Join(flagOutputDir, "root-clustering.json") + } + + data, err := io.ReadFile(path) + if err != nil { + log.Fatal().Err(err).Msg("could not read clustering file") + } + + var clustering cmd2.IntermediaryClusteringData + err = json.Unmarshal(data, &clustering) + if err != nil { + log.Fatal().Err(err).Msg("could not unmarshal clustering data") + } + + var myCluster flow.IdentifierList + for _, assignment := range clustering.Assignments { + if assignment.Contains(me.NodeID()) { + myCluster = assignment + } + } + if myCluster == nil { + log.Fatal().Msg("node not a member of any clusters") + } + clusterBlock, err := cluster.CanonicalRootBlock(clustering.EpochCounter, myCluster) + if err != nil { + log.Fatal().Err(err).Msg("could not create canonical root cluster block") + } + + // generate root block vote + vote, err := verification.NewStakingSigner(me).CreateVote(model.GenesisBlockFromFlow(clusterBlock.ToHeader())) + if err != nil { + log.Fatal().Err(err).Msgf("could not create cluster vote for participant %v", me.NodeID()) + } + + voteFile := fmt.Sprintf(bootstrap.PathNodeRootClusterBlockVote, nodeID) + + // By default, use the bootstrap directory for storing the vote file + voteFilePath := filepath.Join(flagBootDir, voteFile) + + // If output directory is specified, use it for the vote file path + if flagOutputDir != "" { + voteFilePath = filepath.Join(flagOutputDir, "root-cluster-block-vote.json") + } + + if err = io.WriteJSON(voteFilePath, vote); err != nil { + log.Fatal().Err(err).Msg("could not write vote to file") + } + + log.Info().Msgf("node %v successfully generated vote file for root cluster block %v", nodeID, clusterBlock.ID()) +} diff --git a/cmd/bootstrap/transit/cmd/pull_clustering.go b/cmd/bootstrap/transit/cmd/pull_clustering.go new file mode 100644 index 00000000000..78f558db9b9 --- /dev/null +++ b/cmd/bootstrap/transit/cmd/pull_clustering.go @@ -0,0 +1,61 @@ +package cmd + +import ( + "context" + "path/filepath" + "time" + + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/bootstrap/gcs" + "github.com/onflow/flow-go/model/bootstrap" +) + +var pullClusteringCmd = &cobra.Command{ + Use: "pull-clustering", + Short: "Pull clustering assignment for the first epoch of this spork. This is used to generate a root cluster block vote for this node's assigned cluster.", + Run: pullClustering, +} + +func init() { + rootCmd.AddCommand(pullClusteringCmd) + addPullClusteringFlags() +} + +func addPullClusteringFlags() { + pullClusteringCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team to access the Transit server") + pullClusteringCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket for pulling root clustering`) + pullClusteringCmd.Flags().StringVarP(&flagOutputDir, "outputDir", "o", "", "output directory for clustering file; if not set defaults to bootstrap directory") + _ = pullClusteringCmd.MarkFlagRequired("token") +} + +func pullClustering(c *cobra.Command, args []string) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + // create new bucket instance with Flow Bucket name + bucket := gcs.NewGoogleBucket(flagBucketName) + + // initialize a new client to GCS + client, err := bucket.NewClient(ctx) + if err != nil { + log.Fatal().Err(err).Msgf("error trying get new google bucket client") + } + defer client.Close() + + log.Info().Msg("downloading clustering assignment") + + clusteringFile := filepath.Join(flagToken, bootstrap.PathClusteringData) + fullClusteringPath := filepath.Join(flagBootDir, bootstrap.PathClusteringData) + if flagOutputDir != "" { + fullClusteringPath = filepath.Join(flagOutputDir, "root-clustering.json") + } + + log.Info().Str("source", clusteringFile).Str("dest", fullClusteringPath).Msgf("downloading clustering file from transit servers") + err = bucket.DownloadFile(ctx, client, fullClusteringPath, clusteringFile) + if err != nil { + log.Fatal().Err(err).Msgf("could not download google bucket file") + } + + log.Info().Msg("successfully downloaded clustering") +} diff --git a/cmd/bootstrap/transit/cmd/push_cluster_block_vote.go b/cmd/bootstrap/transit/cmd/push_cluster_block_vote.go new file mode 100644 index 00000000000..0526b01468d --- /dev/null +++ b/cmd/bootstrap/transit/cmd/push_cluster_block_vote.go @@ -0,0 +1,88 @@ +package cmd + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/bootstrap/gcs" + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" +) + +var pushClusterVoteCmd = &cobra.Command{ + Use: "push-cluster-block-vote", + Short: "Push cluster block vote", + Run: pushClusterVote, +} + +func init() { + rootCmd.AddCommand(pushClusterVoteCmd) + addPushClusterVoteCmdFlags() +} + +func addPushClusterVoteCmdFlags() { + defaultVoteFilePath := fmt.Sprintf(bootstrap.PathNodeRootClusterBlockVote, "") + pushClusterVoteCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team to access the Transit server") + pushClusterVoteCmd.Flags().StringVarP(&flagVoteFile, "vote-file", "v", "", fmt.Sprintf("path under bootstrap directory of the vote file to upload (default: %s)", defaultVoteFilePath)) + pushClusterVoteCmd.Flags().StringVarP(&flagVoteFilePath, "vote-file-dir", "d", "", "directory for vote file to upload, ONLY for vote files outside the bootstrap directory") + pushClusterVoteCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket for pushing root cluster block vote files`) + + _ = pushClusterVoteCmd.MarkFlagRequired("token") + pushClusterVoteCmd.MarkFlagsMutuallyExclusive("vote-file", "vote-file-dir") +} + +func pushClusterVote(c *cobra.Command, args []string) { + nodeIDString, err := readNodeID() + if err != nil { + log.Fatal().Err(err).Msg("could not read node ID") + } + + nodeID, err := flow.HexStringToIdentifier(nodeIDString) + if err != nil { + log.Fatal().Err(err).Msg("could not parse node ID") + } + + voteFile := flagVoteFile + + // If --vote-file-dir is not specified, use the bootstrap directory + voteFilePath := filepath.Join(flagBootDir, voteFile) + + // if --vote-file is not specified, use default file name within bootstrap directory + if voteFile == "" { + voteFile = fmt.Sprintf(bootstrap.PathNodeRootClusterBlockVote, nodeID) + voteFilePath = filepath.Join(flagBootDir, voteFile) + } + + // If vote-file-dir is specified, use it to construct the full path to the vote file (with default file name) + if flagVoteFilePath != "" { + voteFilePath = filepath.Join(flagVoteFilePath, "root-cluster-block-vote.json") + } + + destination := filepath.Join(flagToken, fmt.Sprintf(bootstrap.FilenameRootClusterBlockVote, nodeID)) + + log.Info().Msg("pushing root cluster block vote") + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + // create new bucket instance with Flow Bucket name + bucket := gcs.NewGoogleBucket(flagBucketName) + + // initialize a new client to GCS + client, err := bucket.NewClient(ctx) + if err != nil { + log.Fatal().Err(err).Msgf("error trying get new google bucket client") + } + defer client.Close() + + err = bucket.UploadFile(ctx, client, destination, voteFilePath) + if err != nil { + log.Fatal().Err(err).Msg("failed to upload cluster vote file") + } + + log.Info().Msg("successfully pushed cluster vote file") +} diff --git a/consensus/hotstuff/committees/cluster_committee_test.go b/consensus/hotstuff/committees/cluster_committee_test.go index 1a199bd3557..c4343143485 100644 --- a/consensus/hotstuff/committees/cluster_committee_test.go +++ b/consensus/hotstuff/committees/cluster_committee_test.go @@ -49,7 +49,7 @@ func (suite *ClusterSuite) SetupTest() { suite.members = unittest.IdentityListFixture(5, unittest.WithRole(flow.RoleCollection)) suite.me = suite.members[0] counter := uint64(1) - rootBlock, err := clusterstate.CanonicalRootBlock(counter, suite.members.ToSkeleton()) + rootBlock, err := clusterstate.CanonicalRootBlock(counter, suite.members.ToSkeleton().NodeIDs()) suite.Require().NoError(err) suite.root = rootBlock diff --git a/engine/collection/test/cluster_switchover_test.go b/engine/collection/test/cluster_switchover_test.go index a0ae19371a2..8eb42d61314 100644 --- a/engine/collection/test/cluster_switchover_test.go +++ b/engine/collection/test/cluster_switchover_test.go @@ -173,7 +173,7 @@ func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) } // generate root cluster block - rootClusterBlock, err := cluster.CanonicalRootBlock(commit.Counter, model.ToIdentityList(signers).ToSkeleton()) + rootClusterBlock, err := cluster.CanonicalRootBlock(commit.Counter, clusterQC.VoterIDs) require.NoError(tc.T(), err) // generate cluster root qc qc, err := run.GenerateClusterRootQC(signers, model.ToIdentityList(signers).ToSkeleton(), rootClusterBlock) diff --git a/integration/benchnet2/Makefile b/integration/benchnet2/Makefile index 208a3ffd073..d716340997c 100644 --- a/integration/benchnet2/Makefile +++ b/integration/benchnet2/Makefile @@ -56,6 +56,15 @@ gen-bootstrap: -o ../../../bootstrap/keys echo {} > ./bootstrap/conf/partner-stakes.json mkdir ./bootstrap/partner-nodes + cd flow-go-bootstrap/cmd/bootstrap && go run . cluster-assignment \ + --epoch-counter 0 \ + --collection-clusters 1 \ + --clustering-random-seed 00000000000000000000000000000000000000000000000000000000deadbeef \ + --config ../../../bootstrap/conf/node-config.json \ + -o ../../../bootstrap/ \ + --partner-dir ../../../bootstrap/partner-nodes \ + --partner-weights ../../../bootstrap/conf/partner-stakes.json \ + --internal-priv-dir ../../../bootstrap/keys/private-root-information cd flow-go-bootstrap/cmd/bootstrap && go run . rootblock \ --root-chain bench \ --root-height 0 \ @@ -65,12 +74,15 @@ gen-bootstrap: --epoch-length $(EPOCH_LEN) \ --epoch-staking-phase-length $(EPOCH_STAKING_PHASE_LEN) \ --epoch-dkg-phase-length $(DKG_PHASE_LEN) \ + --random-seed 00000000000000000000000000000000000000000000000000000000deadbeef \ --kvstore-version=$(KVSTORE_VERSION) \ --kvstore-epoch-extension-view-count=$(EPOCH_EXTENSION_LEN) \ --kvstore-finalization-safety-threshold=$(FINALIZATION_SAFETY_THRESHOLD)\ --collection-clusters 1 \ --protocol-version=0 \ --use-default-epoch-timing \ + --intermediary-clustering-data ../../../bootstrap/public-root-information/root-clustering.json \ + --cluster-votes-dir ../../../bootstrap/public-root-information/root-block-votes/ \ --config ../../../bootstrap/conf/node-config.json \ -o ../../../bootstrap/ \ --partner-dir ../../../bootstrap/partner-nodes \ diff --git a/integration/epochs/epoch_qc_test.go b/integration/epochs/epoch_qc_test.go index afc1a34b9e0..209d829559e 100644 --- a/integration/epochs/epoch_qc_test.go +++ b/integration/epochs/epoch_qc_test.go @@ -66,7 +66,7 @@ func (s *Suite) TestEpochQuorumCertificate() { // find cluster and create root block cluster, _, _ := clustering.ByNodeID(node.NodeID) - rootBlock, err := clusterstate.CanonicalRootBlock(uint64(epochCounter), cluster) + rootBlock, err := clusterstate.CanonicalRootBlock(epochCounter, cluster.NodeIDs()) s.Require().NoError(err) key, signer := test.AccountKeyGenerator().NewWithSigner() diff --git a/integration/testnet/network.go b/integration/testnet/network.go index 5d59aae34a3..dcd683c95b0 100644 --- a/integration/testnet/network.go +++ b/integration/testnet/network.go @@ -1419,7 +1419,7 @@ func setupClusterGenesisBlockQCs(nClusters uint, epochCounter uint64, confs []Co participants := participantsUnsorted.Sort(flow.Canonical[flow.Identity]) collectors := participants.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() assignments := unittest.ClusterAssignment(nClusters, collectors) - clusters, err := factory.NewClusterList(assignments, collectors) + _, err := factory.NewClusterList(assignments, collectors) if err != nil { return nil, nil, nil, fmt.Errorf("could not create cluster list: %w", err) } @@ -1427,27 +1427,27 @@ func setupClusterGenesisBlockQCs(nClusters uint, epochCounter uint64, confs []Co rootBlocks := make([]*cluster.Block, 0, nClusters) qcs := make([]*flow.QuorumCertificate, 0, nClusters) - for _, cluster := range clusters { + for _, assignment := range assignments { // generate root cluster block - block, err := clusterstate.CanonicalRootBlock(epochCounter, cluster) + block, err := clusterstate.CanonicalRootBlock(epochCounter, assignment) if err != nil { return nil, nil, nil, fmt.Errorf("failed to generate canonical root block: %w", err) } lookup := make(map[flow.Identifier]struct{}) - for _, node := range cluster { - lookup[node.NodeID] = struct{}{} + for _, nodeID := range assignment { + lookup[nodeID] = struct{}{} } // gather cluster participants - clusterNodeInfos := make([]bootstrap.NodeInfo, 0, len(cluster)) + clusterNodeInfos := make([]bootstrap.NodeInfo, 0, len(assignment)) for _, conf := range confs { _, exists := lookup[conf.NodeID] if exists { clusterNodeInfos = append(clusterNodeInfos, conf.NodeInfo) } } - if len(cluster) != len(clusterNodeInfos) { // sanity check + if len(assignment) != len(clusterNodeInfos) { // sanity check return nil, nil, nil, fmt.Errorf("requiring a node info for each cluster participant") } diff --git a/integration/tests/collection/suite.go b/integration/tests/collection/suite.go index 218a9bca097..a8b5420d170 100644 --- a/integration/tests/collection/suite.go +++ b/integration/tests/collection/suite.go @@ -339,7 +339,7 @@ func (suite *CollectorSuite) ClusterStateFor(id flow.Identifier) *clusterstateim setup, ok := suite.net.Result().ServiceEvents[0].Event.(*flow.EpochSetup) suite.Require().True(ok, "could not get root seal setup") - rootBlock, err := clusterstate.CanonicalRootBlock(setup.Counter, myCluster) + rootBlock, err := clusterstate.CanonicalRootBlock(setup.Counter, myCluster.NodeIDs()) suite.Require().NoError(err) node := suite.net.ContainerByID(id) diff --git a/model/bootstrap/filenames.go b/model/bootstrap/filenames.go index 8da9f564fd4..038007e7cd5 100644 --- a/model/bootstrap/filenames.go +++ b/model/bootstrap/filenames.go @@ -23,6 +23,7 @@ var ( DirnameRootBlockVotes = filepath.Join(DirnamePublicBootstrap, "root-block-votes") FileNamePartnerWeights = "partner-weights.json" + PathClusteringData = filepath.Join(DirnamePublicBootstrap, "root-clustering.json") PathRootBlockData = filepath.Join(DirnamePublicBootstrap, "root-block.json") PathIntermediaryBootstrappingData = filepath.Join(DirnamePublicBootstrap, "intermediary-bootstrapping-data.json") PathRootProtocolStateSnapshot = filepath.Join(DirnamePublicBootstrap, "root-protocol-state-snapshot.json") @@ -36,12 +37,15 @@ var ( FilenameSecretsEncryptionKey = "secretsdb-key" PathPrivNodeInfoPrefix = "node-info.priv." FilenameRootBlockVotePrefix = "root-block-vote." + FilenameClusterBlockVotePrefix = "root-cluster-block-vote." PathRootDKGData = filepath.Join(DirPrivateRoot, "root-dkg-data.priv.json") PathNodeInfoPriv = filepath.Join(DirPrivateRoot, "private-node-info_%v", "node-info.priv.json") // %v will be replaced by NodeID PathNodeMachineAccountPrivateKey = filepath.Join(DirPrivateRoot, "private-node-info_%v", "node-machine-account-key.priv.json") // %v will be replaced by NodeID PathNodeMachineAccountInfoPriv = filepath.Join(DirPrivateRoot, "private-node-info_%v", "node-machine-account-info.priv.json") // %v will be replaced by NodeID PathRandomBeaconPriv = filepath.Join(DirPrivateRoot, "private-node-info_%v", FilenameRandomBeaconPriv) // %v will be replaced by NodeID PathNodeRootBlockVote = filepath.Join(DirPrivateRoot, "private-node-info_%v", "root-block-vote.json") + PathNodeRootClusterBlockVote = filepath.Join(DirPrivateRoot, "private-node-info_%v", "root-cluster-block-vote.json") // %v will be replaced by NodeID FilenameRootBlockVote = FilenameRootBlockVotePrefix + "%v.json" + FilenameRootClusterBlockVote = FilenameClusterBlockVotePrefix + "%v.json" PathSecretsEncryptionKey = filepath.Join(DirPrivateRoot, "private-node-info_%v", FilenameSecretsEncryptionKey) // %v will be replaced by NodeID ) diff --git a/module/epochs/qc_voter.go b/module/epochs/qc_voter.go index 82392c7f861..ff814645492 100644 --- a/module/epochs/qc_voter.go +++ b/module/epochs/qc_voter.go @@ -99,7 +99,7 @@ func (voter *RootQCVoter) Vote(ctx context.Context, epoch protocol.TentativeEpoc log.Info().Msg("preparing to generate vote for cluster root qc") // create the canonical root block for our cluster - root, err := clusterstate.CanonicalRootBlock(counter, cluster) + root, err := clusterstate.CanonicalRootBlock(counter, cluster.NodeIDs()) if err != nil { return fmt.Errorf("could not create cluster root block: %w", err) } diff --git a/state/cluster/root_block.go b/state/cluster/root_block.go index c43b9cd8167..7b408dc8d82 100644 --- a/state/cluster/root_block.go +++ b/state/cluster/root_block.go @@ -15,8 +15,8 @@ func CanonicalClusterID(epoch uint64, participants flow.IdentifierList) flow.Cha // CanonicalRootBlock returns the canonical root block for the given // cluster in the given epoch. It contains an empty collection referencing -func CanonicalRootBlock(epoch uint64, participants flow.IdentitySkeletonList) (*cluster.Block, error) { - chainID := CanonicalClusterID(epoch, participants.NodeIDs()) +func CanonicalRootBlock(epoch uint64, participants flow.IdentifierList) (*cluster.Block, error) { + chainID := CanonicalClusterID(epoch, participants) rootHeaderBody, err := flow.NewRootHeaderBody(flow.UntrustedHeaderBody{ ChainID: chainID, ParentID: flow.ZeroID, diff --git a/state/protocol/inmem/epoch.go b/state/protocol/inmem/epoch.go index dd86f938a20..14d8579f354 100644 --- a/state/protocol/inmem/epoch.go +++ b/state/protocol/inmem/epoch.go @@ -234,7 +234,7 @@ func (es *committedEpoch) Cluster(index uint) (protocol.Cluster, error) { return nil, fmt.Errorf("could not encode signer indices for rootQCVoteData.VoterIDs: %w", err) } - rootBlock, err := cluster.CanonicalRootBlock(epochCounter, members) + rootBlock, err := cluster.CanonicalRootBlock(epochCounter, members.NodeIDs()) if err != nil { return nil, fmt.Errorf("could not generate canonical root block: %w", err) }