Skip to content
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cmd/bootstrap/cmd/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,10 @@ func constructRootEpochEvents(
clusterQCs []*flow.QuorumCertificate,
dkgData dkg.ThresholdKeySet,
dkgIndexMap flow.DKGIndexMap,
csprg random.Rand,
rng random.Rand,
) (*flow.EpochSetup, *flow.EpochCommit, error) {
randomSource := make([]byte, flow.EpochSetupRandomSourceLength)
csprg.Read(randomSource)
rng.Read(randomSource)
epochSetup, err := flow.NewEpochSetup(
flow.UntrustedEpochSetup{
Counter: flagEpochCounter,
Expand Down
187 changes: 187 additions & 0 deletions cmd/bootstrap/cmd/clustering.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
package cmd
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The bootstrap CLI readme has some general documentation about this process. It also has some example commands which can be used to test the full bootstrapping flow. Could you update these example commands and the relevant documentation in the README?


import (
"fmt"
"path/filepath"

"github.com/spf13/cobra"

"github.com/onflow/flow-go/cmd"
"github.com/onflow/flow-go/cmd/bootstrap/run"
"github.com/onflow/flow-go/cmd/util/cmd/common"
hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model"
model "github.com/onflow/flow-go/model/bootstrap"
"github.com/onflow/flow-go/model/flow"
cluster2 "github.com/onflow/flow-go/state/cluster"
"github.com/onflow/flow-go/state/protocol/prg"
)

var (
flagClusteringRandomSeed []byte
)

// clusterAssignmentCmd represents the clusterAssignment command
var clusterAssignmentCmd = &cobra.Command{
Use: "cluster-assignment",
Short: "Generate cluster assignment",
Long: `Generate cluster assignment for collection nodes based on partner and internal node info and weights. Serialize into file with Epoch Counter`,
Run: clusterAssignment,
}

func init() {
rootCmd.AddCommand(clusterAssignmentCmd)
addClusterAssignmentCmdFlags()
}

func addClusterAssignmentCmdFlags() {
// required parameters for network configuration and generation of root node identities
clusterAssignmentCmd.Flags().StringVar(&flagConfig, "config", "",
"path to a JSON file containing multiple node configurations (fields Role, Address, Weight)")
clusterAssignmentCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+
"containing the output from the `keygen` command for internal nodes")
clusterAssignmentCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-dir", "", "path to directory "+
"containing one JSON file starting with node-info.pub.<NODE_ID>.json for every partner node (fields "+
" in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey)")
clusterAssignmentCmd.Flags().StringVar(&deprecatedFlagPartnerStakes, "partner-stakes", "", "deprecated: use --partner-weights")
clusterAssignmentCmd.Flags().StringVar(&flagPartnerWeights, "partner-weights", "", "path to a JSON file containing "+
"a map from partner node's NodeID to their stake")

cmd.MarkFlagRequired(clusterAssignmentCmd, "config")
cmd.MarkFlagRequired(clusterAssignmentCmd, "internal-priv-dir")
cmd.MarkFlagRequired(clusterAssignmentCmd, "partner-dir")
cmd.MarkFlagRequired(clusterAssignmentCmd, "partner-weights")

// required parameters for generation of cluster root blocks
clusterAssignmentCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "epoch counter for the epoch beginning with the root block")
cmd.MarkFlagRequired(clusterAssignmentCmd, "epoch-counter")

clusterAssignmentCmd.Flags().BytesHexVar(&flagClusteringRandomSeed, "clustering-random-seed", nil, "random seed to generate the clustering assignment")
cmd.MarkFlagRequired(clusterAssignmentCmd, "clustering-random-seed")

}

func clusterAssignment(cmd *cobra.Command, args []string) {
// maintain backward compatibility with old flag name
if deprecatedFlagPartnerStakes != "" {
log.Warn().Msg("using deprecated flag --partner-stakes (use --partner-weights instead)")
if flagPartnerWeights == "" {
flagPartnerWeights = deprecatedFlagPartnerStakes
} else {
log.Fatal().Msg("cannot use both --partner-stakes and --partner-weights flags (use only --partner-weights)")
}
}
// Read partner node's information and internal node's information.
// With "internal nodes" we reference nodes, whose private keys we have. In comparison,
// for "partner nodes" we generally do not have their keys. However, we allow some overlap,
// in that we tolerate a configuration where information about an "internal node" is also
// duplicated in the list of "partner nodes".
log.Info().Msg("collecting partner network and staking keys")
rawPartnerNodes, err := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir)
if err != nil {
log.Fatal().Err(err).Msg("failed to read full partner node infos")
}
log.Info().Msg("")

log.Info().Msg("generating internal private networking and staking keys")
internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig)
if err != nil {
log.Fatal().Err(err).Msg("failed to read full internal node infos")
}
log.Info().Msg("")

// we now convert to the strict meaning of: "internal nodes" vs "partner nodes"
// • "internal nodes" we have they private keys for
// • "partner nodes" we don't have the keys for
// • both sets are disjoint (no common nodes)
log.Info().Msg("remove internal partner nodes")
partnerNodes := common.FilterInternalPartners(rawPartnerNodes, internalNodes)
log.Info().Msgf("removed %d internal partner nodes", len(rawPartnerNodes)-len(partnerNodes))

log.Info().Msg("checking constraints on consensus nodes")
checkConstraints(partnerNodes, internalNodes)
log.Info().Msg("")

log.Info().Msg("assembling network and staking keys")
stakingNodes, err := mergeNodeInfos(internalNodes, partnerNodes)
if err != nil {
log.Fatal().Err(err).Msgf("failed to merge node infos")
}
publicInfo, err := model.ToPublicNodeInfoList(stakingNodes)
if err != nil {
log.Fatal().Msg("failed to read public node info")
}
err = common.WriteJSON(model.PathNodeInfosPub, flagOutdir, publicInfo)
if err != nil {
log.Fatal().Err(err).Msg("failed to write json")
}
log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfosPub)
log.Info().Msg("")

// Convert to IdentityList
partnerList := model.ToIdentityList(partnerNodes)
internalList := model.ToIdentityList(internalNodes)

clusteringPrg, err := prg.New(flagClusteringRandomSeed, prg.BootstrapClusterAssignment, nil)
if err != nil {
log.Fatal().Err(err).Msg("failed to initialize pseudorandom generator")
}

log.Info().Msg("computing collection node clusters")
assignments, clusters, err := common.ConstructClusterAssignment(log, partnerList, internalList, int(flagCollectionClusters), clusteringPrg)
if err != nil {
log.Fatal().Err(err).Msg("unable to generate cluster assignment")
}
log.Info().Msg("")

// Output assignment with epoch counter
output := IntermediaryClusteringData{
EpochCounter: flagEpochCounter,
Assignments: assignments,
Clusters: clusters,
}
err = common.WriteJSON(model.PathClusteringData, flagOutdir, output)
if err != nil {
log.Fatal().Err(err).Msg("failed to write json")
}
log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathClusteringData)
log.Info().Msg("")

log.Info().Msg("constructing and writing cluster block votes for internal nodes")
constructClusterRootVotes(
output,
model.FilterByRole(internalNodes, flow.RoleCollection),
)
log.Info().Msg("")
}

// constructClusterRootVotes generates and writes vote files for internal collector nodes with private keys available.
func constructClusterRootVotes(data IntermediaryClusteringData, internalCollectors []model.NodeInfo) {
for i := range data.Clusters {
clusterRootBlock, err := cluster2.CanonicalRootBlock(data.EpochCounter, data.Assignments[i])
if err != nil {
log.Fatal().Err(err).Msg("could not construct cluster root block")
}
block := hotstuff.GenesisBlockFromFlow(clusterRootBlock.ToHeader())
// collate private NodeInfos for internal nodes in this cluster
signers := make([]model.NodeInfo, 0)
for _, nodeID := range data.Assignments[i] {
for _, node := range internalCollectors {
if node.NodeID == nodeID {
signers = append(signers, node)
}
}
}
votes, err := run.CreateClusterRootBlockVotes(signers, block)
if err != nil {
log.Fatal().Err(err).Msg("could not create cluster root block votes")
}
for _, vote := range votes {
path := filepath.Join(model.DirnameRootBlockVotes, fmt.Sprintf(model.FilenameRootClusterBlockVote, vote.SignerID))
err = common.WriteJSON(path, flagOutdir, vote)
if err != nil {
log.Fatal().Err(err).Msg("failed to write json")
}
log.Info().Msgf("wrote file %s/%s", flagOutdir, path)
}
}
}
9 changes: 8 additions & 1 deletion cmd/bootstrap/cmd/finalize_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,19 @@ func TestFinalize_HappyPath(t *testing.T) {
flagPartnerWeights = partnerWeights
flagInternalNodePrivInfoDir = internalPrivDir

flagIntermediaryClusteringDataPath = filepath.Join(bootDir, model.PathClusteringData)
flagRootClusterBlockVotesDir = filepath.Join(bootDir, model.DirnameRootBlockVotes)
flagEpochCounter = epochCounter

// clusterAssignment will generate the collector clusters
// In addition, it also generates votes from internal collector nodes
clusterAssignment(clusterAssignmentCmd, nil)

flagRootChain = chainName
flagRootParent = hex.EncodeToString(rootParent[:])
flagRootHeight = rootHeight
flagRootView = 1_000
flagRootCommit = hex.EncodeToString(rootCommit[:])
flagEpochCounter = epochCounter
flagNumViewsInEpoch = 100_000
flagNumViewsInStakingAuction = 50_000
flagNumViewsInDKGPhase = 2_000
Expand Down
11 changes: 10 additions & 1 deletion cmd/bootstrap/cmd/intermediary.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (
)

// IntermediaryBootstrappingData stores data which needs to be passed between the
// 2 steps of the bootstrapping process: `rootblock` and `finalize`.
// last 2 steps of the bootstrapping process: `rootblock` and `finalize`.
// This structure is created in `rootblock`, written to disk, then read in `finalize`.
type IntermediaryBootstrappingData struct {
IntermediaryParamsData
Expand All @@ -31,3 +31,12 @@ type IntermediaryEpochData struct {
RootEpochSetup *flow.EpochSetup
RootEpochCommit *flow.EpochCommit
}

// IntermediaryClusteringData stores the collector cluster assignment and epoch counter.
// This is used for the collection nodes to construct and vote on their cluster root blocks,
// and also to pass data between the clustering command and the rootblock command.
type IntermediaryClusteringData struct {
EpochCounter uint64
Assignments flow.AssignmentList
Clusters flow.ClusterList
}
Loading
Loading