|
| 1 | +/* |
| 2 | +Copyright 2025 The Kubernetes Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +// Package controllers provides a way to reconcile ROSA resources. |
| 18 | +package controllers |
| 19 | + |
| 20 | +import ( |
| 21 | + "context" |
| 22 | + "testing" |
| 23 | + "time" |
| 24 | + |
| 25 | + "github.com/golang/mock/gomock" |
| 26 | + . "github.com/onsi/gomega" |
| 27 | + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" |
| 28 | + corev1 "k8s.io/api/core/v1" |
| 29 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 30 | + "k8s.io/apimachinery/pkg/runtime" |
| 31 | + "k8s.io/apimachinery/pkg/types" |
| 32 | + "k8s.io/client-go/tools/record" |
| 33 | + ctrl "sigs.k8s.io/controller-runtime" |
| 34 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 35 | + |
| 36 | + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" |
| 37 | + rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" |
| 38 | + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" |
| 39 | + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" |
| 40 | + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" |
| 41 | + stsiface "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts" |
| 42 | + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" |
| 43 | + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" |
| 44 | + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" |
| 45 | + "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" |
| 46 | + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" |
| 47 | + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" |
| 48 | + "sigs.k8s.io/cluster-api/util/patch" |
| 49 | +) |
| 50 | + |
| 51 | +func TestRosaClusterReconcile(t *testing.T) { |
| 52 | + t.Run("Reconcile Rosa Cluster", func(t *testing.T) { |
| 53 | + g := NewWithT(t) |
| 54 | + ns, err := testEnv.CreateNamespace(ctx, "test-namespace") |
| 55 | + g.Expect(err).ToNot(HaveOccurred()) |
| 56 | + |
| 57 | + secret := &corev1.Secret{ |
| 58 | + ObjectMeta: metav1.ObjectMeta{ |
| 59 | + Name: "rosa-secret", |
| 60 | + Namespace: ns.Name, |
| 61 | + }, |
| 62 | + Data: map[string][]byte{ |
| 63 | + "ocmToken": []byte("secret-ocm-token-string"), |
| 64 | + }, |
| 65 | + } |
| 66 | + |
| 67 | + identity := &infrav1.AWSClusterControllerIdentity{ |
| 68 | + ObjectMeta: metav1.ObjectMeta{ |
| 69 | + Name: "default", |
| 70 | + }, |
| 71 | + Spec: infrav1.AWSClusterControllerIdentitySpec{ |
| 72 | + AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{ |
| 73 | + AllowedNamespaces: &infrav1.AllowedNamespaces{}, |
| 74 | + }, |
| 75 | + }, |
| 76 | + } |
| 77 | + identity.SetGroupVersionKind(infrav1.GroupVersion.WithKind("AWSClusterStaticIdentity")) |
| 78 | + |
| 79 | + rosaClusterName := "rosa-controlplane-1" |
| 80 | + rosaControlPlane := &rosacontrolplanev1.ROSAControlPlane{ |
| 81 | + ObjectMeta: metav1.ObjectMeta{ |
| 82 | + Name: rosaClusterName, |
| 83 | + Namespace: ns.Name, |
| 84 | + }, |
| 85 | + TypeMeta: metav1.TypeMeta{ |
| 86 | + Kind: "ROSAControlPlane", |
| 87 | + APIVersion: rosacontrolplanev1.GroupVersion.String(), |
| 88 | + }, |
| 89 | + Spec: rosacontrolplanev1.RosaControlPlaneSpec{ |
| 90 | + RosaClusterName: rosaClusterName, |
| 91 | + Subnets: []string{"subnet-0ac99a6230b408813", "subnet-1ac99a6230b408811"}, |
| 92 | + AvailabilityZones: []string{"az-1", "az-2"}, |
| 93 | + Network: &rosacontrolplanev1.NetworkSpec{ |
| 94 | + MachineCIDR: "10.0.0.0/16", |
| 95 | + PodCIDR: "10.128.0.0/14", |
| 96 | + ServiceCIDR: "172.30.0.0/16", |
| 97 | + }, |
| 98 | + Region: "us-east-1", |
| 99 | + Version: "4.19.20", |
| 100 | + ChannelGroup: "stable", |
| 101 | + RolesRef: rosacontrolplanev1.AWSRolesRef{}, |
| 102 | + OIDCID: "oidcid1", |
| 103 | + InstallerRoleARN: "arn1", |
| 104 | + WorkerRoleARN: "arn2", |
| 105 | + SupportRoleARN: "arn3", |
| 106 | + CredentialsSecretRef: &corev1.LocalObjectReference{ |
| 107 | + Name: secret.Name, |
| 108 | + }, |
| 109 | + VersionGate: "Acknowledge", |
| 110 | + IdentityRef: &infrav1.AWSIdentityReference{ |
| 111 | + Name: identity.Name, |
| 112 | + Kind: infrav1.ControllerIdentityKind, |
| 113 | + }, |
| 114 | + }, |
| 115 | + } |
| 116 | + |
| 117 | + rosaCluster := &expinfrav1.ROSACluster{ |
| 118 | + TypeMeta: metav1.TypeMeta{ |
| 119 | + Kind: "ROSACluster", |
| 120 | + APIVersion: expinfrav1.GroupVersion.String(), |
| 121 | + }, |
| 122 | + ObjectMeta: metav1.ObjectMeta{ |
| 123 | + Name: "rosa-cluster", |
| 124 | + Namespace: ns.Name, |
| 125 | + }, |
| 126 | + } |
| 127 | + |
| 128 | + capiCluster := &clusterv1.Cluster{ |
| 129 | + TypeMeta: metav1.TypeMeta{ |
| 130 | + Kind: "Cluster", |
| 131 | + APIVersion: clusterv1.GroupVersion.String(), |
| 132 | + }, |
| 133 | + ObjectMeta: metav1.ObjectMeta{ |
| 134 | + Name: "capi-cluster-1", |
| 135 | + Namespace: ns.Name, |
| 136 | + UID: types.UID("capi-cluster-1"), |
| 137 | + }, |
| 138 | + Spec: clusterv1.ClusterSpec{ |
| 139 | + InfrastructureRef: &corev1.ObjectReference{ |
| 140 | + Name: rosaCluster.Name, |
| 141 | + Kind: "ROSACluster", |
| 142 | + APIVersion: expinfrav1.GroupVersion.String(), |
| 143 | + Namespace: ns.Name, |
| 144 | + }, |
| 145 | + ControlPlaneRef: &corev1.ObjectReference{ |
| 146 | + Name: rosaControlPlane.Name, |
| 147 | + Kind: "ROSAControlPlane", |
| 148 | + APIVersion: rosacontrolplanev1.GroupVersion.String(), |
| 149 | + Namespace: ns.Name, |
| 150 | + }, |
| 151 | + Paused: false, |
| 152 | + }, |
| 153 | + } |
| 154 | + |
| 155 | + rosaCluster.OwnerReferences = []metav1.OwnerReference{ |
| 156 | + { |
| 157 | + Name: capiCluster.Name, |
| 158 | + Kind: "Cluster", |
| 159 | + APIVersion: clusterv1.GroupVersion.String(), |
| 160 | + UID: capiCluster.UID, |
| 161 | + }, |
| 162 | + } |
| 163 | + |
| 164 | + createObject(g, secret, ns.Name) |
| 165 | + createObject(g, identity, ns.Name) |
| 166 | + createObject(g, capiCluster, ns.Name) |
| 167 | + createObject(g, rosaControlPlane, ns.Name) |
| 168 | + createObject(g, rosaCluster, ns.Name) |
| 169 | + |
| 170 | + // set controlplane status |
| 171 | + rosaCPPatch, err := patch.NewHelper(rosaControlPlane, testEnv) |
| 172 | + rosaControlPlane.Status.Ready = true |
| 173 | + rosaControlPlane.Status.Version = "4.19.20" |
| 174 | + rosaControlPlane.Status.ID = rosaClusterName |
| 175 | + g.Expect(rosaCPPatch.Patch(ctx, rosaControlPlane)).To(Succeed()) |
| 176 | + g.Expect(err).ShouldNot(HaveOccurred()) |
| 177 | + |
| 178 | + // set rosaCluster pause conditions |
| 179 | + rosaClsPatch, err := patch.NewHelper(rosaCluster, testEnv) |
| 180 | + rosaCluster.Status.Conditions = clusterv1.Conditions{ |
| 181 | + clusterv1.Condition{ |
| 182 | + Type: clusterv1.PausedV1Beta2Condition, |
| 183 | + Status: corev1.ConditionFalse, |
| 184 | + Reason: clusterv1.NotPausedV1Beta2Reason, |
| 185 | + Message: "", |
| 186 | + }, |
| 187 | + } |
| 188 | + g.Expect(rosaClsPatch.Patch(ctx, rosaCluster)).To(Succeed()) |
| 189 | + g.Expect(err).ShouldNot(HaveOccurred()) |
| 190 | + |
| 191 | + // set capiCluster pause condition |
| 192 | + clsPatch, err := patch.NewHelper(capiCluster, testEnv) |
| 193 | + capiCluster.Status.Conditions = clusterv1.Conditions{ |
| 194 | + clusterv1.Condition{ |
| 195 | + Type: clusterv1.PausedV1Beta2Condition, |
| 196 | + Status: corev1.ConditionFalse, |
| 197 | + Reason: clusterv1.NotPausedV1Beta2Reason, |
| 198 | + Message: "", |
| 199 | + }, |
| 200 | + } |
| 201 | + g.Expect(clsPatch.Patch(ctx, capiCluster)).To(Succeed()) |
| 202 | + g.Expect(err).ShouldNot(HaveOccurred()) |
| 203 | + |
| 204 | + // patching is not reliably synchronous |
| 205 | + time.Sleep(50 * time.Millisecond) |
| 206 | + |
| 207 | + mockCtrl := gomock.NewController(t) |
| 208 | + recorder := record.NewFakeRecorder(10) |
| 209 | + ctx := context.TODO() |
| 210 | + ocmMock := mocks.NewMockOCMClient(mockCtrl) |
| 211 | + stsMock := mock_stsiface.NewMockSTSClient(mockCtrl) |
| 212 | + stsMock.EXPECT().GetCallerIdentity(gomock.Any(), gomock.Any()).AnyTimes() |
| 213 | + |
| 214 | + nodePoolName := "nodepool-1" |
| 215 | + expect := func(m *mocks.MockOCMClientMockRecorder) { |
| 216 | + m.GetNodePools(gomock.Any()).AnyTimes().DoAndReturn(func(clusterId string) ([]*cmv1.NodePool, error) { |
| 217 | + // Build a NodePool. |
| 218 | + builder := cmv1.NewNodePool(). |
| 219 | + ID(nodePoolName). |
| 220 | + Version(cmv1.NewVersion().ID("openshift-v4.15.0")). |
| 221 | + AvailabilityZone("us-east-1a"). |
| 222 | + Subnet("subnet-12345"). |
| 223 | + Labels(map[string]string{"role": "worker"}). |
| 224 | + AutoRepair(true). |
| 225 | + TuningConfigs("tuning1"). |
| 226 | + AWSNodePool( |
| 227 | + cmv1.NewAWSNodePool(). |
| 228 | + InstanceType("m5.large"). |
| 229 | + AdditionalSecurityGroupIds("sg-123", "sg-456"). |
| 230 | + RootVolume(cmv1.NewAWSVolume().Size(120)), |
| 231 | + ). |
| 232 | + Taints( |
| 233 | + cmv1.NewTaint().Key("dedicated").Value("gpu").Effect(string(corev1.TaintEffectNoSchedule)), |
| 234 | + ). |
| 235 | + NodeDrainGracePeriod( |
| 236 | + cmv1.NewValue().Value(10), |
| 237 | + ). |
| 238 | + ManagementUpgrade( |
| 239 | + cmv1.NewNodePoolManagementUpgrade(). |
| 240 | + MaxSurge("1"). |
| 241 | + MaxUnavailable("2"), |
| 242 | + ). |
| 243 | + Replicas(2). |
| 244 | + Status( |
| 245 | + cmv1.NewNodePoolStatus(). |
| 246 | + Message(""). |
| 247 | + CurrentReplicas(2), |
| 248 | + ) |
| 249 | + |
| 250 | + nodePool, err := builder.Build() |
| 251 | + g.Expect(err).ToNot(HaveOccurred()) |
| 252 | + return []*cmv1.NodePool{nodePool}, err |
| 253 | + }) |
| 254 | + } |
| 255 | + expect(ocmMock.EXPECT()) |
| 256 | + |
| 257 | + r := ROSAClusterReconciler{ |
| 258 | + Recorder: recorder, |
| 259 | + WatchFilterValue: "", |
| 260 | + Endpoints: []scope.ServiceEndpoint{}, |
| 261 | + Client: testEnv, |
| 262 | + NewStsClient: func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSClient { |
| 263 | + return stsMock |
| 264 | + }, |
| 265 | + NewOCMClient: func(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (rosa.OCMClient, error) { |
| 266 | + return ocmMock, nil |
| 267 | + }, |
| 268 | + } |
| 269 | + |
| 270 | + req := ctrl.Request{ |
| 271 | + NamespacedName: types.NamespacedName{ |
| 272 | + Namespace: rosaCluster.Namespace, |
| 273 | + Name: rosaCluster.Name, |
| 274 | + }, |
| 275 | + } |
| 276 | + |
| 277 | + _, err = r.Reconcile(ctx, req) |
| 278 | + g.Expect(err).ToNot(HaveOccurred()) |
| 279 | + |
| 280 | + rosaMachinePool := &expinfrav1.ROSAMachinePool{} |
| 281 | + keyRosaMP := client.ObjectKey{Name: nodePoolName, Namespace: ns.Name} |
| 282 | + errRosaMP := testEnv.Get(ctx, keyRosaMP, rosaMachinePool) |
| 283 | + g.Expect(errRosaMP).ToNot(HaveOccurred()) |
| 284 | + |
| 285 | + machinePool := &expclusterv1.MachinePool{} |
| 286 | + keyMP := client.ObjectKey{Name: nodePoolName, Namespace: ns.Name} |
| 287 | + errMP := testEnv.Get(ctx, keyMP, machinePool) |
| 288 | + g.Expect(errMP).ToNot(HaveOccurred()) |
| 289 | + |
| 290 | + // Test get getRosaMachinePoolNames |
| 291 | + rosaMachinePools, err := r.getRosaMachinePoolNames(ctx, capiCluster) |
| 292 | + g.Expect(err).ToNot(HaveOccurred()) |
| 293 | + g.Expect(len(rosaMachinePools)).To(Equal(1)) |
| 294 | + |
| 295 | + cleanupObject(g, rosaMachinePool) |
| 296 | + cleanupObject(g, machinePool) |
| 297 | + cleanupObject(g, rosaCluster) |
| 298 | + cleanupObject(g, rosaControlPlane) |
| 299 | + cleanupObject(g, capiCluster) |
| 300 | + mockCtrl.Finish() |
| 301 | + }) |
| 302 | +} |
0 commit comments