diff --git a/driver/csiplugin/gpfs.go b/driver/csiplugin/gpfs.go index 891f4f81a..d64226257 100644 --- a/driver/csiplugin/gpfs.go +++ b/driver/csiplugin/gpfs.go @@ -103,6 +103,7 @@ type ScaleDriver struct { ids *ScaleIdentityServer ns *ScaleNodeServer cs *ScaleControllerServer + gcs *ScaleGroupControllerServer connmap map[string]connectors.SpectrumScaleConnector cmap settings.ScaleSettingsConfigMap @@ -115,9 +116,10 @@ type ScaleDriver struct { // clusterMap map stores the cluster name as key and cluster details as value. clusterMap sync.Map - vcap []*csi.VolumeCapability_AccessMode - cscap []*csi.ControllerServiceCapability - nscap []*csi.NodeServiceCapability + vcap []*csi.VolumeCapability_AccessMode + cscap []*csi.ControllerServiceCapability + nscap []*csi.NodeServiceCapability + gcscap []*csi.GroupControllerServiceCapability } func GetScaleDriver(ctx context.Context) *ScaleDriver { @@ -150,6 +152,13 @@ func NewNodeServer(ctx context.Context, d *ScaleDriver) *ScaleNodeServer { } } +func NewGroupControllerServer(ctx context.Context, d *ScaleDriver) *ScaleGroupControllerServer { + klog.V(4).Infof("[%s] Starting NewGroupControllerServer", utils.GetLoggerId(ctx)) + return &ScaleGroupControllerServer{ + Driver: d, + } +} + func (driver *ScaleDriver) AddVolumeCapabilityAccessModes(ctx context.Context, vc []csi.VolumeCapability_AccessMode_Mode) error { klog.V(4).Infof("[%s] AddVolumeCapabilityAccessModes", utils.GetLoggerId(ctx)) var vca []*csi.VolumeCapability_AccessMode @@ -183,6 +192,17 @@ func (driver *ScaleDriver) AddNodeServiceCapabilities(ctx context.Context, nl [] return nil } +func (driver *ScaleDriver) AddGroupControllerServiceCapabilities(ctx context.Context, nl []csi.GroupControllerServiceCapability_RPC_Type) error { + klog.V(4).Infof("[%s] AddGroupControllerServiceCapabilities", utils.GetLoggerId(ctx)) + var gcs []*csi.GroupControllerServiceCapability + for _, n := range nl { + klog.V(4).Infof("[%s] Enabling group controller service capability: %v", utils.GetLoggerId(ctx), n.String()) + gcs = append(gcs, NewGroupControllerServiceCapability(n)) + } + driver.gcscap = gcs + return nil +} + func (driver *ScaleDriver) ValidateControllerServiceRequest(ctx context.Context, c csi.ControllerServiceCapability_RPC_Type) error { klog.Infof("[%s] ValidateControllerServiceRequest", utils.GetLoggerId(ctx)) if c == csi.ControllerServiceCapability_RPC_UNKNOWN { @@ -237,9 +257,15 @@ func (driver *ScaleDriver) SetupScaleDriver(ctx context.Context, name, vendorVer } _ = driver.AddNodeServiceCapabilities(ctx, ns) + gsc := []csi.GroupControllerServiceCapability_RPC_Type{ + csi.GroupControllerServiceCapability_RPC_CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT, + } + _ = driver.AddGroupControllerServiceCapabilities(ctx, gsc) + driver.ids = NewIdentityServer(ctx, driver) driver.ns = NewNodeServer(ctx, driver) driver.cs = NewControllerServer(ctx, driver, scmap, cmap, primary) + driver.gcs = NewGroupControllerServer(ctx, driver) return nil } @@ -287,7 +313,7 @@ func (driver *ScaleDriver) PluginInitialize(ctx context.Context) (map[string]con func (driver *ScaleDriver) Run(ctx context.Context, endpoint string) { s := NewNonBlockingGRPCServer() - s.Start(endpoint, driver.ids, driver.cs, driver.ns) + s.Start(endpoint, driver.ids, driver.cs, driver.ns, driver.gcs) s.Wait() } diff --git a/driver/csiplugin/groupcontrollerserver.go b/driver/csiplugin/groupcontrollerserver.go new file mode 100644 index 000000000..533a041ad --- /dev/null +++ b/driver/csiplugin/groupcontrollerserver.go @@ -0,0 +1,588 @@ +/** + * Copyright 2019 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package scale + +import ( + "fmt" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/IBM/ibm-spectrum-scale-csi/driver/csiplugin/connectors" + "github.com/IBM/ibm-spectrum-scale-csi/driver/csiplugin/utils" + "github.com/container-storage-interface/spec/lib/go/csi" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + "k8s.io/klog/v2" +) + +const () + +type ScaleGroupControllerServer struct { + Driver *ScaleDriver +} + +// GroupControllerGetCapabilities implements the default GRPC callout. +func (gs *ScaleGroupControllerServer) GroupControllerGetCapabilities(ctx context.Context, req *csi.GroupControllerGetCapabilitiesRequest) (*csi.GroupControllerGetCapabilitiesResponse, error) { + loggerId := utils.GetLoggerId(ctx) + klog.Infof("[%s] GroupControllerGetCapabilities called with req: %#v", loggerId, req) + return &csi.GroupControllerGetCapabilitiesResponse{ + Capabilities: gs.Driver.gcscap, + }, nil +} + +// CreateVolumeGroupSnapshot Create VolumeGroup Snapshot +func (gs *ScaleGroupControllerServer) CreateVolumeGroupSnapshot(ctx context.Context, req *csi.CreateVolumeGroupSnapshotRequest) (*csi.CreateVolumeGroupSnapshotResponse, error) { //nolint:gocyclo,funlen + loggerId := utils.GetLoggerId(ctx) + klog.Infof("[%s] CreateVolumeGroupSnapshot - create CreateVolumeGroupSnapshot req: %v", loggerId, req) + + // req.SourceVolumeIds: [1;1;16603246530329299476;F0070B0A:6683CB02;0f7c070e-6183-462f-9573-38e7ae124e2a-ibm-spectrum-scale-csi-driver;pvc-1cae06c9-a419-43c4-a9d0-32673e50eeb3;/ibm/fs1/0f7c070e-6183-462f-9573-38e7ae124e2a-ibm-spectrum-scale-csi-driver/pvc-1cae06c9-a419-43c4-a9d0-32673e50eeb3] + if req == nil { + return nil, status.Error(codes.InvalidArgument, "CreateVolumeGroupSnapshot - Request cannot be empty") + } + + volIDs := req.GetSourceVolumeIds() + if len(volIDs) == 0 { + return nil, status.Error(codes.InvalidArgument, "CreateVolumeGroupSnapshot - Source Volume IDs is a required field") + } + var volIDMemberStr []string + for _, volID := range volIDs { + + volumeIDMember, err := volIDGroupParse(volID) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolumeGroupSnapshot - Error in parsing source Volume ID %v: %v", volID, err)) + } + volIDMemberStr = append(volIDMemberStr, volumeIDMember) + } + klog.Infof("[%s] CreateVolumeGroupSnapshot - SourceVolumeParsed: %v", loggerId, volIDMemberStr) + if !volGroupMemberValidation(volIDMemberStr) { + return nil, status.Error(codes.InvalidArgument, "CreateVolumeGroupSnapshot - Source Volume IDs must belong to same consistency group") + } + var Snapshots []*csi.Snapshot + //for _, volID := range volIDs { + volID := volIDs[0] + scaleVolId, err := getVolIDMembers(volID) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolumeGroupSnapshot - Error in source Volume ID %v: %v", volID, err)) + } + klog.Infof("[%s] CreateVolumeGroupSnapshot - volIDs: %v", loggerId, volIDs) + klog.Infof("[%s] CreateVolumeGroupSnapshot - scaleVolId: %v", loggerId, scaleVolId) + snapshot, err := gs.commonSnapshotFunction(ctx, scaleVolId, volID, req) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolumeGroupSnapshot - Error in snapshot create %v: %v", volID, err)) + } + klog.Infof("[%s] CreateVolumeGroupSnapshot - snapshot response : %v", loggerId, snapshot) + //Snapshots = append(Snapshots, snapshot) + for _, sourceVolID := range volIDs { + snapshot.SourceVolumeId = sourceVolID + snapID := "" + + splitVid := strings.Split(sourceVolID, ";") + + // storageclass_type;volumeType;clusterId;FSUUID;consistency_group;filesetName;snapshotName;metaSnapshotName + snapID = fmt.Sprintf("%s;%s;%s;%s;%s;%s;%s;%s", scaleVolId.StorageClassType, scaleVolId.VolType, scaleVolId.ClusterId, scaleVolId.FsUUID, scaleVolId.ConsistencyGroup, splitVid[5], req.GetName(), req.GetName()) + snapshot.SnapshotId = snapID + Snapshots = append(Snapshots, snapshot) + } + //} + return &csi.CreateVolumeGroupSnapshotResponse{ + GroupSnapshot: &csi.VolumeGroupSnapshot{ + GroupSnapshotId: req.GetName(), + Snapshots: Snapshots, + ReadyToUse: true, + CreationTime: timestamppb.Now(), + }, + }, nil +} + +// GetVolumeGroupSnapshot Get VolumeGroup Snapshot +func (gs *ScaleGroupControllerServer) GetVolumeGroupSnapshot(ctx context.Context, req *csi.GetVolumeGroupSnapshotRequest) (*csi.GetVolumeGroupSnapshotResponse, error) { //nolint:gocyclo,funlen + loggerId := utils.GetLoggerId(ctx) + klog.Infof("[%s] GetVolumeGroupSnapshot - GetVolumeGroupSnapshot req: %v", loggerId, req) + + return &csi.GetVolumeGroupSnapshotResponse{ + GroupSnapshot: &csi.VolumeGroupSnapshot{}, + }, nil +} + +// DeleteVolumeGroupSnapshot Delete VolumeGroup Snapshot +func (gs *ScaleGroupControllerServer) DeleteVolumeGroupSnapshot(ctx context.Context, req *csi.DeleteVolumeGroupSnapshotRequest) (*csi.DeleteVolumeGroupSnapshotResponse, error) { //nolint:gocyclo,funlen + loggerId := utils.GetLoggerId(ctx) + klog.Infof("[%s] DeleteVolumeGroupSnapshot - DeleteVolumeGroupSnapshot req: %v", loggerId, req) + + snapIDs := req.GetSnapshotIds() + if len(snapIDs) == 0 { + return nil, status.Error(codes.InvalidArgument, "CreateVolumeGroupSnapshot - Source Volume IDs is a required field") + } + + snapIdMembers, err := gs.GetSnapIdMembers(snapIDs[0]) + if err != nil { + klog.Errorf("[%s] Invalid snapshot IDs %s [%v]", loggerId, snapIDs, err) + return nil, err + } + conn, err := gs.getConnFromClusterID(ctx, snapIdMembers.ClusterId) + if err != nil { + return nil, err + } + filesystemName, err := conn.GetFilesystemName(ctx, snapIdMembers.FsUUID) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteSnapshot - unable to get filesystem Name for Filesystem UID [%v] and clusterId [%v]. Error [%v]", snapIdMembers.FsUUID, snapIdMembers.ClusterId, err)) + } + filesetName := snapIdMembers.ConsistencyGroup + klog.Infof("[%s] DeleteVolumeGroupSnapshot - deleting snapshot [%s] from fileset [%s] under filesystem [%s]", loggerId, snapIdMembers.SnapName, filesetName, filesystemName) + + snaperr := conn.DeleteSnapshot(ctx, filesystemName, filesetName, snapIdMembers.SnapName) + if snaperr != nil { + klog.Errorf("[%s] DeleteVolumeGroupSnapshot - error deleting snapshot %s: %v", loggerId, snapIdMembers.SnapName, snaperr) + return nil, snaperr + } + klog.Infof("[%s] DeleteVolumeGroupSnapshot - successfully deleted snapshot [%s] from fileset [%s] under filesystem [%s]", loggerId, snapIdMembers.SnapName, filesetName, filesystemName) + return &csi.DeleteVolumeGroupSnapshotResponse{}, nil +} + +func (gs *ScaleGroupControllerServer) commonSnapshotFunction(ctx context.Context, scaleVolId scaleVolId, volID string, req *csi.CreateVolumeGroupSnapshotRequest) (*csi.Snapshot, error) { + loggerId := utils.GetLoggerId(ctx) + klog.Infof("[%s] CreateVolumeGroupSnapshot - commonSnapshotFunction scaleVolId: %v volID: %v", loggerId, scaleVolId, volID) + if scaleVolId.StorageClassType != STORAGECLASS_ADVANCED { + + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolumeGroupSnapshot - volume [%s] - Volume snapshot can only be created when source volume is version 2 fileset", volID)) + + } + conn, err := gs.getConnFromClusterID(ctx, scaleVolId.ClusterId) + if err != nil { + return nil, err + } + assembledScaleversion, err := gs.assembledScaleVersion(ctx, conn) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("the IBM Storage Scale version check for permissions failed with error %s", err)) + } + /* Check if IBM Storage Scale supports Snapshot */ + chkSnapshotErr := checkSnapshotSupport(assembledScaleversion) + if chkSnapshotErr != nil { + return nil, chkSnapshotErr + } + + primaryConn, isprimaryConnPresent := gs.Driver.connmap["primary"] + if !isprimaryConnPresent { + klog.Errorf("[%s] CreateSnapshot - unable to get connector for primary cluster", loggerId) + return nil, status.Error(codes.Internal, "CreateSnapshot - unable to find primary cluster details in custom resource") + } + + filesystemName, err := primaryConn.GetFilesystemName(ctx, scaleVolId.FsUUID) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateSnapshot - Unable to get filesystem Name for Filesystem Uid [%v] and clusterId [%v]. Error [%v]", scaleVolId.FsUUID, scaleVolId.ClusterId, err)) + } + + mountInfo, err := primaryConn.GetFilesystemMountDetails(ctx, filesystemName) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateSnapshot - unable to get mount info for FS [%v] in primary cluster", filesystemName)) + } + + filesetResp := connectors.Fileset_v2{} + filesystemName = getRemoteFsName(mountInfo.RemoteDeviceName) + if scaleVolId.FsetName != "" { + filesetResp, err = conn.GetFileSetResponseFromName(ctx, filesystemName, scaleVolId.FsetName) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateSnapshot - Unable to get Fileset response for Fileset [%v] FS [%v] ClusterId [%v]", scaleVolId.FsetName, filesystemName, scaleVolId.ClusterId)) + } + } else { + filesetResp, err = conn.GetFileSetResponseFromId(ctx, filesystemName, scaleVolId.FsetId) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateSnapshot - Unable to get Fileset response for Fileset Id [%v] FS [%v] ClusterId [%v]", scaleVolId.FsetId, filesystemName, scaleVolId.ClusterId)) + } + } + + filesetName := filesetResp.FilesetName + relPath := "" + if scaleVolId.StorageClassType == STORAGECLASS_ADVANCED { + klog.V(4).Infof("[%s] CreateSnapshot - creating snapshot for advanced storageClass", loggerId) + relPath = strings.Replace(scaleVolId.Path, mountInfo.MountPoint, "", 1) + } + relPath = strings.Trim(relPath, "!/") + + /* Confirm it is same fileset which was created for this PV */ + pvName := filepath.Base(relPath) + if pvName != filesetName { + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateSnapshot - PV name from path [%v] does not match with filesetName [%v].", pvName, filesetName)) + } + + filesetName = scaleVolId.ConsistencyGroup + + snapName := req.GetName() + snapWindowInt := 0 + + snapParams := req.GetParameters() + snapWindow, snapWindowSpecified := snapParams[connectors.UserSpecifiedSnapWindow] + if !snapWindowSpecified { + // use default snapshot window for consistency group + snapWindow = defaultSnapWindow + klog.Infof("[%s] SnapWindow not specified. Using default snapWindow: [%s] for for fileset[%s:%s]", loggerId, snapWindow, filesetResp.FilesetName, filesystemName) + } + snapWindowInt, err = strconv.Atoi(snapWindow) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("CreateSnapshot [%s] - invalid snapWindow value: [%v]", snapName, snapWindow)) + } + + // Additional check for RDR fileset in secondary mode + AFMMode, err := gs.GetAFMMode(ctx, filesystemName, filesetName, conn) + if err != nil { + return nil, err + } + if AFMMode == connectors.AFMModeSecondary { + klog.Errorf("[%s] snapshot is not supported for AFM Secondary mode of ConsistencyGroup fileset [%v]", loggerId, filesetName) + return nil, status.Error(codes.Internal, fmt.Sprintf("snapshot is not supported for AFM Secondary mode of ConsistencyGroup fileset [%v]", filesetName)) + } + + snapExist, err := conn.CheckIfSnapshotExist(ctx, filesystemName, filesetName, snapName) + if err != nil { + klog.Errorf("[%s] CreateSnapshot [%s] - Unable to get the snapshot details. Error [%v]", loggerId, snapName, err) + return nil, status.Error(codes.Internal, fmt.Sprintf("Unable to get the snapshot details for [%s]. Error [%v]", snapName, err)) + } + + if !snapExist { + /* For new storageClass check last snapshot creation time, if time passed is less than + * snapWindow then return existing snapshot */ + createNewSnap := true + + cgSnapName, err := gs.CheckNewSnapRequired(ctx, conn, filesystemName, filesetName, snapWindowInt) + if err != nil { + klog.Errorf("[%s] CreateSnapshot [%s] - unable to check if snapshot is required for new storageClass for fileset [%s:%s]. Error: [%v]", loggerId, snapName, filesystemName, filesetName, err) + return nil, err + } + if cgSnapName != "" { + usable, err := gs.isExistingSnapUseableForVol(ctx, conn, filesystemName, filesetName, filesetResp.FilesetName, cgSnapName) + if !usable { + return nil, err + } + createNewSnap = false + snapName = cgSnapName + } else { + klog.Infof("[%s] CreateSnapshot - creating new snapshot for consistency group for fileset: [%s:%s]", loggerId, filesystemName, filesetName) + } + + if createNewSnap { + snapshotList, err := conn.ListFilesetSnapshots(ctx, filesystemName, filesetName) + if err != nil { + klog.Errorf("[%s] CreateSnapshot [%s] - unable to list snapshots for fileset [%s:%s]. Error: [%v]", loggerId, snapName, filesystemName, filesetName, err) + return nil, status.Error(codes.Internal, fmt.Sprintf("unable to list snapshots for fileset [%s:%s]. Error: [%v]", filesystemName, filesetName, err)) + } + + if len(snapshotList) >= 256 { + klog.Errorf("[%s] CreateSnapshot [%s] - max limit of snapshots reached for fileset [%s:%s]. No more snapshots can be created for this fileset.", loggerId, snapName, filesystemName, filesetName) + return nil, status.Error(codes.OutOfRange, fmt.Sprintf("max limit of snapshots reached for fileset [%s:%s]. No more snapshots can be created for this fileset.", filesystemName, filesetName)) + } + klog.Infof("[%s] commonSnapshotFunction - creating new snapshot CreateSnapshot filesystemName: %s, filesetName:%s ,snapName: %s", loggerId, filesystemName, filesetName, snapName) + snaperr := conn.CreateSnapshot(ctx, filesystemName, filesetName, snapName) + if snaperr != nil { + klog.Errorf("[%s] Snapshot [%s] - Unable to create snapshot. Error [%v]", loggerId, snapName, snaperr) + return nil, status.Error(codes.Internal, fmt.Sprintf("unable to create snapshot [%s]. Error [%v]", snapName, snaperr)) + } + } + } + + snapID := "" + // storageclass_type;volumeType;clusterId;FSUUID;consistency_group;filesetName;snapshotName;metaSnapshotName + snapID = fmt.Sprintf("%s;%s;%s;%s;%s;%s;%s;%s", scaleVolId.StorageClassType, scaleVolId.VolType, scaleVolId.ClusterId, scaleVolId.FsUUID, filesetName, filesetResp.FilesetName, snapName, req.GetName()) + + timestamp, err := gs.getSnapshotCreateTimestamp(ctx, conn, filesystemName, filesetName, snapName) + if err != nil { + klog.Errorf("[%s] Error getting create timestamp for snapshot %s:%s:%s", loggerId, filesystemName, filesetName, snapName) + return nil, err + } + + restoreSize, err := gs.getSnapRestoreSize(ctx, conn, filesystemName, filesetResp.FilesetName) + if err != nil { + klog.Errorf("[%s] Error getting the snapshot restore size for snapshot %s:%s:%s", loggerId, filesystemName, filesetResp.FilesetName, snapName) + return nil, err + } + + err = gs.MakeSnapMetadataDir(ctx, conn, filesystemName, filesetResp.FilesetName, filesetName, snapName, req.GetName()) + if err != nil { + klog.Errorf("[%s] Error in creating directory for storing metadata information for advanced storageClass. Error: [%v]", loggerId, err) + return nil, err + } + + return &csi.Snapshot{ + SnapshotId: snapID, + SourceVolumeId: volID, + ReadyToUse: true, + CreationTime: ×tamp, + SizeBytes: restoreSize, + }, nil +} + +func volIDGroupParse(vID string) (string, error) { + splitVid := strings.Split(vID, ";") + //var vIdMem scaleVolId + toValidateSameCGVolMember := "" + + if len(splitVid) == 7 { + /* Volume ID created from CSI 2.5.0 onwards */ + /* VolID: ;;;;;; */ + + toValidateSameCGVolMember = splitVid[0] + splitVid[1] + splitVid[2] + splitVid[3] + splitVid[4] + return toValidateSameCGVolMember, nil + + } + + return toValidateSameCGVolMember, status.Error(codes.Internal, fmt.Sprintf("Invalid Volume Id : [%v]", vID)) +} + +func volGroupMemberValidation(volIDMemberStr []string) bool { + + for _, v := range volIDMemberStr { + if v != volIDMemberStr[0] { + return false + } + } + return true + +} + +func (gs *ScaleGroupControllerServer) getConnFromClusterID(ctx context.Context, cid string) (connectors.SpectrumScaleConnector, error) { + loggerId := utils.GetLoggerId(ctx) + connector, isConnPresent := gs.Driver.connmap[cid] + if isConnPresent { + return connector, nil + } + klog.Errorf("[%s] unable to get connector for cluster ID %v", loggerId, cid) + return nil, status.Error(codes.Internal, fmt.Sprintf("unable to find cluster [%v] details in custom resource", cid)) +} + +func (gs *ScaleGroupControllerServer) assembledScaleVersion(ctx context.Context, conn connectors.SpectrumScaleConnector) (string, error) { + assembledScaleVer := "" + scaleVersion, err := conn.GetScaleVersion(ctx) + if err != nil { + return assembledScaleVer, err + } + /* Assuming IBM Storage Scale version is in a format like 5.0.0-0_170818.165000 */ + // "serverVersion" : "5.1.1.1-developer build", + splitScaleVer := strings.Split(scaleVersion, ".") + if len(splitScaleVer) < 3 { + return assembledScaleVer, status.Error(codes.Internal, fmt.Sprintf("invalid IBM Storage Scale version - %s", scaleVersion)) + } + var splitMinorVer []string + if len(splitScaleVer) == 4 { + //dev build e.g. "5.1.5.0-developer build" + splitMinorVer = strings.Split(splitScaleVer[3], "-") + assembledScaleVer = splitScaleVer[0] + splitScaleVer[1] + splitScaleVer[2] + splitMinorVer[0] + } else { + //GA build e.g. "5.1.5-0" + splitMinorVer = strings.Split(splitScaleVer[2], "-") + assembledScaleVer = splitScaleVer[0] + splitScaleVer[1] + splitMinorVer[0] + splitMinorVer[1][0:1] + } + return assembledScaleVer, nil +} + +func checkSnapshotSupport(assembledScaleversion string) error { + /* Verify IBM Storage Scale Version is not below 5.1.1-0 */ + versionCheck := checkMinScaleVersionValid(assembledScaleversion, "5110") + if !versionCheck { + return status.Error(codes.FailedPrecondition, "the minimum required IBM Storage Scale version for snapshot support with CSI is 5.1.1-0") + } + return nil +} + +func (gs *ScaleGroupControllerServer) getPrimaryFSMountPoint(ctx context.Context) (string, error) { + loggerId := utils.GetLoggerId(ctx) + klog.Infof("[%s] getPrimaryFSMountPoint", loggerId) + + primaryConn := gs.Driver.connmap["primary"] + primaryFS := gs.Driver.primary.GetPrimaryFs() + fsMountInfo, err := primaryConn.GetFilesystemMountDetails(ctx, primaryFS) + if err != nil { + klog.Errorf("[%s] Failed to get details of primary filesystem %s:Error: %v", loggerId, primaryFS, err) + return "", status.Error(codes.NotFound, fmt.Sprintf("Failed to get details of primary filesystem %s. Error: %v", primaryFS, err)) + + } + return fsMountInfo.MountPoint, nil +} + +// GetAFMMode returns the AFM mode of the fileset and also the error +// if there is any (including the fileset not found error) while getting +// the fileset info +func (gs *ScaleGroupControllerServer) GetAFMMode(ctx context.Context, filesystemName string, filesetName string, conn connectors.SpectrumScaleConnector) (string, error) { + loggerId := utils.GetLoggerId(ctx) + filesetDetails, err := conn.ListFileset(ctx, filesystemName, filesetName) + if err != nil { + return "", status.Error(codes.Internal, fmt.Sprintf("failed to get fileset info, filesystem: [%v], fileset: [%v], error: [%v]", filesystemName, filesetName, err)) + } + + klog.V(4).Infof("[%s] AFM mode of the fileset [%v] is [%v]", loggerId, filesetName, filesetDetails.AFM.AFMMode) + return filesetDetails.AFM.AFMMode, nil +} + +func (gs *ScaleGroupControllerServer) CheckNewSnapRequired(ctx context.Context, conn connectors.SpectrumScaleConnector, filesystemName string, filesetName string, snapWindow int) (string, error) { + loggerId := utils.GetLoggerId(ctx) + latestSnapList, err := conn.GetLatestFilesetSnapshots(ctx, filesystemName, filesetName) + if err != nil { + klog.Errorf("[%s] CheckNewSnapRequired - getting latest snapshot list failed for fileset: [%s:%s]. Error: [%v]", loggerId, filesystemName, filesetName, err) + return "", err + } + + if len(latestSnapList) == 0 { + // No snapshot exists, so create new one + return "", nil + } + + timestamp, err := gs.getSnapshotCreateTimestamp(ctx, conn, filesystemName, filesetName, latestSnapList[0].SnapshotName) + if err != nil { + klog.Errorf("[%s] Error getting create timestamp for snapshot %s:%s:%s", loggerId, filesystemName, filesetName, latestSnapList[0].SnapshotName) + return "", err + } + + var timestampSecs int64 = timestamp.GetSeconds() + lastSnapTime := time.Unix(timestampSecs, 0) + passedTime := time.Since(lastSnapTime).Seconds() + klog.Infof("[%s] Fileset [%s:%s], last snapshot time: [%v], current time: [%v], passed time: %v seconds, snapWindow: %v minutes", loggerId, filesystemName, filesetName, lastSnapTime, time.Now(), int64(passedTime), snapWindow) + + snapWindowSeconds := snapWindow * 60 + + if passedTime < float64(snapWindowSeconds) { + // we don't need to take new snapshot + klog.Infof("[%s] CheckNewSnapRequired - for fileset [%s:%s], using existing snapshot [%s]", loggerId, filesystemName, filesetName, latestSnapList[0].SnapshotName) + return latestSnapList[0].SnapshotName, nil + } + + klog.Infof("[%s] CheckNewSnapRequired - for fileset [%s:%s] we need to create new snapshot", loggerId, filesystemName, filesetName) + return "", nil +} + +func (gs *ScaleGroupControllerServer) MakeSnapMetadataDir(ctx context.Context, conn connectors.SpectrumScaleConnector, filesystemName string, filesetName string, indepFileset string, cgSnapName string, metaSnapName string) error { + loggerId := utils.GetLoggerId(ctx) + path := fmt.Sprintf("%s/%s/%s", indepFileset, cgSnapName, metaSnapName) + klog.Infof("[%s] MakeSnapMetadataDir - creating directory [%s] for fileset: [%s:%s]", loggerId, path, filesystemName, filesetName) + err := conn.MakeDirectory(ctx, filesystemName, path, "0", "0") + if err != nil { + // Directory creation failed + klog.Errorf("[%s] Volume:[%v] - unable to create directory [%v] in filesystem [%v]. Error : %v", loggerId, filesetName, path, filesystemName, err) + return fmt.Errorf("unable to create directory [%v] in filesystem [%v]. Error : %v", path, filesystemName, err) + } + return nil +} + +func (gs *ScaleGroupControllerServer) isExistingSnapUseableForVol(ctx context.Context, conn connectors.SpectrumScaleConnector, filesystemName string, consistencyGroup string, filesetName string, cgSnapName string) (bool, error) { + pathDir := fmt.Sprintf("%s/.snapshots/%s/%s", consistencyGroup, cgSnapName, filesetName) + _, err := conn.StatDirectory(ctx, filesystemName, pathDir) + if err != nil { + if strings.Contains(err.Error(), "EFSSG0264C") || + strings.Contains(err.Error(), "does not exist") { // directory does not exist + return false, status.Error(codes.Internal, fmt.Sprintf("snapshot for volume [%v] in filesystem [%v] is not taken. Wait till current snapWindow expires.", filesetName, filesystemName)) + } else { + return false, err + } + } + return true, nil +} + +func (gs *ScaleGroupControllerServer) getSnapshotCreateTimestamp(ctx context.Context, conn connectors.SpectrumScaleConnector, fs string, fset string, snap string) (timestamppb.Timestamp, error) { + var timestamp timestamppb.Timestamp + + createTS, err := conn.GetSnapshotCreateTimestamp(ctx, fs, fset, snap) + if err != nil { + klog.Errorf("[%s]snapshot [%s] - Unable to get snapshot create timestamp", utils.GetLoggerId(ctx), snap) + return timestamp, err + } + + timezoneOffset, err := conn.GetTimeZoneOffset(ctx) + if err != nil { + klog.Errorf("[%s] snapshot [%s] - Unable to get cluster timezone", utils.GetLoggerId(ctx), snap) + return timestamp, err + } + + // for GMT, REST API returns Z instead of 00:00 + if timezoneOffset == "Z" { + timezoneOffset = "+00:00" + } + + // Rest API returns create timestamp in the format 2006-01-02 15:04:05,000 + // irrespective of the cluster timezone. We replace the last part of this date + // with the timezone offset returned by cluster config REST API and then parse + // the timestamp with correct zone info + const longForm = "2006-01-02 15:04:05-07:00" + //nolint::staticcheck + + createTSTZ := strings.Replace(createTS, ",000", timezoneOffset, 1) + t, err := time.Parse(longForm, createTSTZ) + if err != nil { + klog.Errorf("[%s] snapshot - for fileset [%s:%s] error in parsing timestamp: [%v]. Error: [%v]", utils.GetLoggerId(ctx), fs, fset, createTS, err) + return timestamp, err + } + timestamp.Seconds = t.Unix() + timestamp.Nanos = 0 + + klog.Infof("[%s] getSnapshotCreateTimestamp: for fileset [%s:%s] snapshot creation timestamp: [%v]", utils.GetLoggerId(ctx), fs, fset, createTSTZ) + return timestamp, nil +} + +func (gs *ScaleGroupControllerServer) getSnapRestoreSize(ctx context.Context, conn connectors.SpectrumScaleConnector, filesystemName string, filesetName string) (int64, error) { + quotaResp, err := conn.GetFilesetQuotaDetails(ctx, filesystemName, filesetName) + + if err != nil { + return 0, err + } + + if quotaResp.BlockLimit < 0 { + klog.Errorf("[%s] getSnapRestoreSize: Invalid block limit [%v] for fileset [%s:%s] found", utils.GetLoggerId(ctx), quotaResp.BlockLimit, filesystemName, filesetName) + return 0, status.Error(codes.Internal, fmt.Sprintf("invalid block limit [%v] for fileset [%s:%s] found", quotaResp.BlockLimit, filesystemName, filesetName)) + } + + // REST API returns block limit in kb, convert it to bytes and return + return int64(quotaResp.BlockLimit * 1024), nil +} + +func (gs *ScaleGroupControllerServer) GetSnapIdMembers(sId string) (scaleSnapId, error) { + splitSid := strings.Split(sId, ";") + var sIdMem scaleSnapId + + if len(splitSid) < 4 { + return scaleSnapId{}, status.Error(codes.Internal, fmt.Sprintf("Invalid Snapshot Id : [%v]", sId)) + } + + if len(splitSid) >= 8 { + /* storageclass_type;volumeType;clusterId;FSUUID;consistency_group;filesetName;snapshotName;path */ + sIdMem.StorageClassType = splitSid[0] + sIdMem.VolType = splitSid[1] + sIdMem.ClusterId = splitSid[2] + sIdMem.FsUUID = splitSid[3] + sIdMem.ConsistencyGroup = splitSid[4] + sIdMem.FsetName = splitSid[5] + sIdMem.SnapName = splitSid[6] + sIdMem.MetaSnapName = splitSid[7] + if len(splitSid) == 9 && splitSid[8] != "" { + sIdMem.Path = splitSid[8] + } else { + sIdMem.Path = "/" + } + } else { + /* clusterId;FSUUID;filesetName;snapshotName;path */ + sIdMem.ClusterId = splitSid[0] + sIdMem.FsUUID = splitSid[1] + sIdMem.FsetName = splitSid[2] + sIdMem.SnapName = splitSid[3] + if len(splitSid) == 5 && splitSid[4] != "" { + sIdMem.Path = splitSid[4] + } else { + sIdMem.Path = "/" + } + sIdMem.StorageClassType = STORAGECLASS_CLASSIC + } + return sIdMem, nil +} diff --git a/driver/csiplugin/identityserver.go b/driver/csiplugin/identityserver.go index 4cb05629f..425a4230b 100644 --- a/driver/csiplugin/identityserver.go +++ b/driver/csiplugin/identityserver.go @@ -41,6 +41,13 @@ func (is *ScaleIdentityServer) GetPluginCapabilities(ctx context.Context, req *c }, }, }, + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_GROUP_CONTROLLER_SERVICE, + }, + }, + }, }, }, nil } diff --git a/driver/csiplugin/server.go b/driver/csiplugin/server.go index 2401671dd..aa50eef9f 100644 --- a/driver/csiplugin/server.go +++ b/driver/csiplugin/server.go @@ -32,7 +32,7 @@ import ( // Defines Non blocking GRPC server interfaces type NonBlockingGRPCServer interface { // Start services at the endpoint - Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) + Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, gcs csi.GroupControllerServer) // Waits for the service to stop Wait() // Stops the service gracefully @@ -51,10 +51,10 @@ type nonBlockingGRPCServer struct { server *grpc.Server } -func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { +func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, gcs csi.GroupControllerServer) { s.wg.Add(1) - go s.serve(endpoint, ids, cs, ns) + go s.serve(endpoint, ids, cs, ns, gcs) } func (s *nonBlockingGRPCServer) Wait() { @@ -69,7 +69,7 @@ func (s *nonBlockingGRPCServer) ForceStop() { s.server.Stop() } -func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { +func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, gcs csi.GroupControllerServer) { opts := []grpc.ServerOption{ grpc.UnaryInterceptor(logGRPC), @@ -115,6 +115,11 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, c csi.RegisterNodeServer(server, ns) } + if gcs != nil { + klog.Infof("Starting RegisterGroupControllerServer on %#v", gcs) + csi.RegisterGroupControllerServer(server, gcs) + } + klog.Infof("Started listening on %#v", listener.Addr()) if err := server.Serve(listener); err != nil { diff --git a/driver/csiplugin/utils.go b/driver/csiplugin/utils.go index 468fe9359..2af7b3c96 100644 --- a/driver/csiplugin/utils.go +++ b/driver/csiplugin/utils.go @@ -53,6 +53,16 @@ func NewNodeServiceCapability(cap csi.NodeServiceCapability_RPC_Type) *csi.NodeS } } +func NewGroupControllerServiceCapability(cap csi.GroupControllerServiceCapability_RPC_Type) *csi.GroupControllerServiceCapability { + return &csi.GroupControllerServiceCapability{ + Type: &csi.GroupControllerServiceCapability_Rpc{ + Rpc: &csi.GroupControllerServiceCapability_RPC{ + Type: cap, + }, + }, + } +} + func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { newCtx := utils.SetLoggerId(ctx) loggerId := utils.GetLoggerId(newCtx) diff --git a/driver/examples/version2/groupsnapshot/pvc.yaml b/driver/examples/version2/groupsnapshot/pvc.yaml new file mode 100644 index 000000000..f13b4810f --- /dev/null +++ b/driver/examples/version2/groupsnapshot/pvc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: scale-fset-pvc-1 + labels: + app.kubernetes.io/name: sg-group2 +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi + storageClassName: ibm-spectrum-scale-csi-advance + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: scale-fset-pvc-2 + labels: + app.kubernetes.io/name: sg-group2 +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi + storageClassName: ibm-spectrum-scale-csi-advance \ No newline at end of file diff --git a/driver/examples/version2/groupsnapshot/pvcfrmgroupsnap.yaml b/driver/examples/version2/groupsnapshot/pvcfrmgroupsnap.yaml new file mode 100644 index 000000000..defdcb75d --- /dev/null +++ b/driver/examples/version2/groupsnapshot/pvcfrmgroupsnap.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ibm-spectrum-scale-pvc-1-from-groupsnapshot +spec: + storageClassName: ibm-spectrum-scale-csi-advance + dataSource: + name: snapshot-88f427eafa6a6f8044b66259dd801d5f046063c2f473c54c528bec9e551da98b-2024-08-05-10.16.31 + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ibm-spectrum-scale-pvc-2-from-groupsnapshot +spec: + storageClassName: ibm-spectrum-scale-csi-advance + dataSource: + name: snapshot-88f427eafa6a6f8044b66259dd801d5f046063c2f473c54c528bec9e551da98b-2024-08-05-10.16.32 + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/driver/examples/version2/groupsnapshot/volsnap.yaml b/driver/examples/version2/groupsnapshot/volsnap.yaml new file mode 100644 index 000000000..f59cd3d8d --- /dev/null +++ b/driver/examples/version2/groupsnapshot/volsnap.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: groupsnapshot.storage.k8s.io/v1alpha1 +kind: VolumeGroupSnapshot +metadata: + name: new-groupsnapshot-demo-5 +spec: + source: + selector: + matchLabels: + # The PVCs will need to have this label for it to be + # included in the VolumeGroupSnapshot + app.kubernetes.io/name: sg-group2 + volumeGroupSnapshotClassName: ibm-spectrum-scale-groupsnapclass \ No newline at end of file diff --git a/driver/examples/version2/groupsnapshot/volsnapclass.yaml b/driver/examples/version2/groupsnapshot/volsnapclass.yaml new file mode 100644 index 000000000..8d2fcacca --- /dev/null +++ b/driver/examples/version2/groupsnapshot/volsnapclass.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: groupsnapshot.storage.k8s.io/v1alpha1 +kind: VolumeGroupSnapshotClass +metadata: + name: ibm-spectrum-scale-groupsnapclass +driver: spectrumscale.csi.ibm.com +deletionPolicy: Delete \ No newline at end of file diff --git a/operator/controllers/config/constants.go b/operator/controllers/config/constants.go index 315d8ff68..88c0d4631 100644 --- a/operator/controllers/config/constants.go +++ b/operator/controllers/config/constants.go @@ -90,18 +90,18 @@ const ( // Default images for containers CSIDriverPluginImage = "quay.io/ibm-spectrum-scale/ibm-spectrum-scale-csi-driver:v2.12.0" - // registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1 - CSINodeDriverRegistrarImage = "registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:f25af73ee708ff9c82595ae99493cdef9295bd96953366cddf36305f82555dac" // #nosec G101 false positive - // registry.k8s.io/sig-storage/livenessprobe:v2.12.0 - LivenessProbeImage = "registry.k8s.io/sig-storage/livenessprobe@sha256:5baeb4a6d7d517434292758928bb33efc6397368cbb48c8a4cf29496abf4e987" // #nosec G101 false positive - // registry.k8s.io/sig-storage/csi-attacher:v4.6.1 - CSIAttacherImage = "registry.k8s.io/sig-storage/csi-attacher@sha256:b4d611100ece2f9bc980d1cb19c2285b8868da261e3b1ee8f45448ab5512ab94" // #nosec G101 false positive - // registry.k8s.io/sig-storage/csi-provisioner:v4.0.1 - CSIProvisionerImage = "registry.k8s.io/sig-storage/csi-provisioner@sha256:bf5a235b67d8aea00f5b8ec24d384a2480e1017d5458d8a63b361e9eeb1608a9" // #nosec G101 false positive - // registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1 - CSISnapshotterImage = "registry.k8s.io/sig-storage/csi-snapshotter@sha256:2e04046334baf9be425bb0fa1d04c2d1720d770825eedbdbcdb10d430da4ad8c" // #nosec G101 false positive - // registry.k8s.io/sig-storage/csi-resizer:v1.11.1 - CSIResizerImage = "registry.k8s.io/sig-storage/csi-resizer@sha256:a541e6cc2d8b011bb21b1d4ffec6b090e85270cce6276ee302d86153eec0af43" // #nosec G101 false positive + // registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1 + CSINodeDriverRegistrarImage = "registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:f25af73ee708ff9c82595ae99493cdef9295bd96953366cddf36305f82555dac" // #nosec G101 false positive + // registry.k8s.io/sig-storage/livenessprobe:v2.12.0 + LivenessProbeImage = "registry.k8s.io/sig-storage/livenessprobe@sha256:5baeb4a6d7d517434292758928bb33efc6397368cbb48c8a4cf29496abf4e987" // #nosec G101 false positive + // registry.k8s.io/sig-storage/csi-attacher:v4.6.1 + CSIAttacherImage = "registry.k8s.io/sig-storage/csi-attacher@sha256:b4d611100ece2f9bc980d1cb19c2285b8868da261e3b1ee8f45448ab5512ab94" // #nosec G101 false positive + // registry.k8s.io/sig-storage/csi-provisioner:v4.0.1 + CSIProvisionerImage = "registry.k8s.io/sig-storage/csi-provisioner@sha256:bf5a235b67d8aea00f5b8ec24d384a2480e1017d5458d8a63b361e9eeb1608a9" // #nosec G101 false positive + // registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1 + CSISnapshotterImage = "registry.k8s.io/sig-storage/csi-snapshotter@sha256:2e04046334baf9be425bb0fa1d04c2d1720d770825eedbdbcdb10d430da4ad8c" // #nosec G101 false positive + // registry.k8s.io/sig-storage/csi-resizer:v1.11.1 + CSIResizerImage = "registry.k8s.io/sig-storage/csi-resizer@sha256:a541e6cc2d8b011bb21b1d4ffec6b090e85270cce6276ee302d86153eec0af43" // #nosec G101 false positive //ImagePullPolicies for containers CSIDriverImagePullPolicy = "IfNotPresent" diff --git a/operator/controllers/internal/csiscaleoperator/csiscaleoperator_package.go b/operator/controllers/internal/csiscaleoperator/csiscaleoperator_package.go index 6676533d4..044f20b37 100644 --- a/operator/controllers/internal/csiscaleoperator/csiscaleoperator_package.go +++ b/operator/controllers/internal/csiscaleoperator/csiscaleoperator_package.go @@ -30,39 +30,44 @@ import ( ) const ( - snapshotStorageApiGroup string = "snapshot.storage.k8s.io" - securityOpenshiftApiGroup string = "security.openshift.io" - storageApiGroup string = "storage.k8s.io" - rbacAuthorizationApiGroup string = "rbac.authorization.k8s.io" - coordinationApiGroup string = "coordination.k8s.io" - podSecurityPolicyApiGroup string = "extensions" - storageClassesResource string = "storageclasses" - persistentVolumesResource string = "persistentvolumes" - persistentVolumeClaimsResource string = "persistentvolumeclaims" - persistentVolumeClaimsStatusResource string = "persistentvolumeclaims/status" - podsResource string = "pods" - volumeAttachmentsResource string = "volumeattachments" - volumeAttachmentsStatusResource string = "volumeattachments/status" - volumeSnapshotClassesResource string = "volumesnapshotclasses" - volumeSnapshotsResource string = "volumesnapshots" - volumeSnapshotContentsResource string = "volumesnapshotcontents" - volumeSnapshotContentsStatusResource string = "volumesnapshotcontents/status" - eventsResource string = "events" - nodesResource string = "nodes" - csiNodesResource string = "csinodes" - namespacesResource string = "namespaces" - securityContextConstraintsResource string = "securitycontextconstraints" - podSecurityPolicyResource string = "podsecuritypolicies" - leaseResource string = "leases" - secretResource string = "secrets" - verbGet string = "get" - verbList string = "list" - verbWatch string = "watch" - verbCreate string = "create" - verbUpdate string = "update" - verbPatch string = "patch" - verbDelete string = "delete" - verbUse string = "use" + snapshotStorageApiGroup string = "snapshot.storage.k8s.io" + groupsnapshotStorageApiGroup string = "groupsnapshot.storage.k8s.io" + securityOpenshiftApiGroup string = "security.openshift.io" + storageApiGroup string = "storage.k8s.io" + rbacAuthorizationApiGroup string = "rbac.authorization.k8s.io" + coordinationApiGroup string = "coordination.k8s.io" + podSecurityPolicyApiGroup string = "extensions" + storageClassesResource string = "storageclasses" + persistentVolumesResource string = "persistentvolumes" + persistentVolumeClaimsResource string = "persistentvolumeclaims" + persistentVolumeClaimsStatusResource string = "persistentvolumeclaims/status" + podsResource string = "pods" + volumeAttachmentsResource string = "volumeattachments" + volumeAttachmentsStatusResource string = "volumeattachments/status" + volumeSnapshotClassesResource string = "volumesnapshotclasses" + volumeSnapshotsResource string = "volumesnapshots" + volumeSnapshotContentsResource string = "volumesnapshotcontents" + volumeSnapshotContentsStatusResource string = "volumesnapshotcontents/status" + volumeGroupSnapshotClassesResource string = "volumegroupsnapshotclasses" + volumeGroupSnapshotsResource string = "volumegroupsnapshot" + volumeGroupSnapshotContentsResource string = "volumegroupsnapshotcontents" + volumeGroupSnapshotContentsStatusResource string = "volumegroupsnapshotcontents/status" + eventsResource string = "events" + nodesResource string = "nodes" + csiNodesResource string = "csinodes" + namespacesResource string = "namespaces" + securityContextConstraintsResource string = "securitycontextconstraints" + podSecurityPolicyResource string = "podsecuritypolicies" + leaseResource string = "leases" + secretResource string = "secrets" + verbGet string = "get" + verbList string = "list" + verbWatch string = "watch" + verbCreate string = "create" + verbUpdate string = "update" + verbPatch string = "patch" + verbDelete string = "delete" + verbUse string = "use" ) // GenerateCSIDriver returns a non-namespaced CSIDriver object. @@ -216,6 +221,16 @@ func (c *CSIScaleOperator) GenerateProvisionerClusterRole() *rbacv1.ClusterRole Resources: []string{volumeSnapshotContentsResource}, Verbs: []string{verbGet, verbList}, }, + { + APIGroups: []string{groupsnapshotStorageApiGroup}, + Resources: []string{volumeGroupSnapshotsResource}, + Verbs: []string{verbGet, verbList}, + }, + { + APIGroups: []string{groupsnapshotStorageApiGroup}, + Resources: []string{volumeGroupSnapshotContentsResource}, + Verbs: []string{verbGet, verbList}, + }, { APIGroups: []string{storageApiGroup}, Resources: []string{csiNodesResource}, @@ -357,6 +372,16 @@ func (c *CSIScaleOperator) GenerateSnapshotterClusterRole() *rbacv1.ClusterRole Labels: c.GetLabels(), }, Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{persistentVolumesResource}, + Verbs: []string{verbGet, verbList, verbWatch, verbCreate, verbDelete}, + }, + { + APIGroups: []string{""}, + Resources: []string{persistentVolumeClaimsResource}, + Verbs: []string{verbGet, verbList, verbWatch, verbUpdate}, + }, { APIGroups: []string{""}, Resources: []string{eventsResource}, @@ -370,13 +395,33 @@ func (c *CSIScaleOperator) GenerateSnapshotterClusterRole() *rbacv1.ClusterRole { APIGroups: []string{snapshotStorageApiGroup}, Resources: []string{volumeSnapshotContentsResource}, - Verbs: []string{verbGet, verbList, verbWatch, verbUpdate, verbPatch}, + Verbs: []string{verbGet, verbList, verbWatch, verbUpdate, verbPatch, verbCreate}, }, { APIGroups: []string{snapshotStorageApiGroup}, Resources: []string{volumeSnapshotContentsStatusResource}, Verbs: []string{verbUpdate, verbPatch}, }, + { + APIGroups: []string{groupsnapshotStorageApiGroup}, + Resources: []string{volumeGroupSnapshotClassesResource}, + Verbs: []string{verbGet, verbList, verbWatch}, + }, + { + APIGroups: []string{snapshotStorageApiGroup}, + Resources: []string{volumeSnapshotsResource}, + Verbs: []string{verbGet, verbList, verbCreate}, + }, + { + APIGroups: []string{groupsnapshotStorageApiGroup}, + Resources: []string{volumeGroupSnapshotContentsResource}, + Verbs: []string{verbGet, verbList, verbWatch, verbUpdate, verbPatch}, + }, + { + APIGroups: []string{groupsnapshotStorageApiGroup}, + Resources: []string{volumeGroupSnapshotContentsStatusResource}, + Verbs: []string{verbUpdate, verbPatch}, + }, { APIGroups: []string{coordinationApiGroup}, Resources: []string{leaseResource}, diff --git a/operator/controllers/syncer/csi_syncer.go b/operator/controllers/syncer/csi_syncer.go index 69d4e7900..315cedd8d 100644 --- a/operator/controllers/syncer/csi_syncer.go +++ b/operator/controllers/syncer/csi_syncer.go @@ -582,6 +582,7 @@ func (s *csiControllerSyncer) ensureSnapshotterContainersSpec(cpuLimits string, "--leader-election=true", "--leader-election-lease-duration=$(LEADER_ELECTION_LEASE_DURATION)", "--leader-election-renew-deadline=$(LEADER_ELECTION_RENEW_DEADLINE)", "--leader-election-retry-period=$(LEADER_ELECTION_RETRY_PERIOD)", + "--enable-volume-group-snapshots=true", "--http-endpoint=:" + fmt.Sprint(config.LeaderLivenessPort)}, cpuLimits, memoryLimits, ) diff --git a/operator/go.mod b/operator/go.mod index da668b4da..29278fcbf 100644 --- a/operator/go.mod +++ b/operator/go.mod @@ -1,6 +1,6 @@ module github.com/IBM/ibm-spectrum-scale-csi/operator -go 1.22.3 +go 1.22.5 require ( github.com/IBM/ibm-spectrum-scale-csi/driver v0.0.0-20240509034444-7a7f88d1cb10