Skip to content

Commit c5ef0be

Browse files
committed
add e2es & fix integration test
Signed-off-by: Britania Rodriguez Reyes <[email protected]>
1 parent ccd0b25 commit c5ef0be

File tree

5 files changed

+706
-54
lines changed

5 files changed

+706
-54
lines changed

pkg/controllers/updaterun/execution.go

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ func (r *Reconciler) executeUpdatingStage(
123123
// Go through each cluster in the stage and check if it's updating/succeeded/failed.
124124
for i := 0; i < len(updatingStageStatus.Clusters) && clusterUpdatingCount < maxConcurrency; i++ {
125125
clusterStatus := &updatingStageStatus.Clusters[i]
126-
126+
127127
// Process cluster status to determine if we should skip or handle errors
128128
processResult := r.processClusterStatus(clusterStatus, updatingStageStatus, updateRunRef)
129129
if processResult.skip {
@@ -210,22 +210,22 @@ func (r *Reconciler) processClusterStatus(
210210
updateRunRef klog.ObjectRef,
211211
) clusterProcessResult {
212212
clusterUpdateSucceededCond := meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded))
213-
213+
214214
if clusterUpdateSucceededCond != nil && clusterUpdateSucceededCond.Status == metav1.ConditionTrue {
215215
// The cluster has been updated successfully.
216216
return clusterProcessResult{finished: true, skip: true}
217217
}
218-
218+
219219
if clusterUpdateSucceededCond != nil && clusterUpdateSucceededCond.Status == metav1.ConditionFalse {
220-
// The cluster is marked as failed to update, this cluster is counted as updating cluster
220+
// The cluster is marked as failed to update, this cluster is counted as updating cluster
221221
// since it's not finished to avoid processing more clusters than maxConcurrency in this round.
222222
failedErr := fmt.Errorf("the cluster `%s` in the stage %s has failed", clusterStatus.ClusterName, updatingStageStatus.StageName)
223223
klog.ErrorS(failedErr, "The cluster has failed to be updated", "updateRun", updateRunRef)
224224
return clusterProcessResult{
225225
err: fmt.Errorf("%w: %s", errStagedUpdatedAborted, failedErr.Error()),
226226
}
227227
}
228-
228+
229229
// The cluster needs to be processed.
230230
return clusterProcessResult{}
231231
}
@@ -242,7 +242,7 @@ func (r *Reconciler) updateBindingForCluster(
242242
) error {
243243
updateRunRef := klog.KObj(updateRun)
244244
updateRunStatus := updateRun.GetUpdateRunStatus()
245-
245+
246246
if !isBindingSyncedWithClusterStatus(resourceSnapshotName, updateRun, binding, clusterStatus) {
247247
klog.V(2).InfoS("Found the first cluster that needs to be updated", "cluster", clusterStatus.ClusterName, "stage", updatingStageStatus.StageName, "updateRun", updateRunRef)
248248
// The binding is not up-to-date with the cluster status.
@@ -259,7 +259,7 @@ func (r *Reconciler) updateBindingForCluster(
259259
klog.V(2).InfoS("Updated the status of a binding to bound", "binding", klog.KObj(binding), "cluster", clusterStatus.ClusterName, "stage", updatingStageStatus.StageName, "updateRun", updateRunRef)
260260
return r.updateBindingRolloutStarted(ctx, binding, updateRun)
261261
}
262-
262+
263263
// The binding is synced but needs other updates
264264
klog.V(2).InfoS("Found the first binding that is updating but the cluster status has not been updated", "cluster", clusterStatus.ClusterName, "stage", updatingStageStatus.StageName, "updateRun", updateRunRef)
265265
bindingSpec := binding.GetBindingSpec()
@@ -292,12 +292,12 @@ func (r *Reconciler) validateUpdatingCluster(
292292
updatingStageStatus *placementv1beta1.StageUpdatingStatus,
293293
) error {
294294
updateRunRef := klog.KObj(updateRun)
295-
295+
296296
// Now the cluster has to be updating, the binding should point to the right resource snapshot and the binding should be bound.
297297
inSync := isBindingSyncedWithClusterStatus(resourceSnapshotName, updateRun, binding, clusterStatus)
298298
rolloutStarted := condition.IsConditionStatusTrue(meta.FindStatusCondition(binding.GetBindingStatus().Conditions, string(placementv1beta1.ResourceBindingRolloutStarted)), binding.GetGeneration())
299299
bindingSpec := binding.GetBindingSpec()
300-
300+
301301
if !inSync || !rolloutStarted || bindingSpec.State != placementv1beta1.BindingStateBound {
302302
// This issue mostly happens when there are concurrent updateRuns referencing the same clusterResourcePlacement but releasing different versions.
303303
// After the 1st updateRun updates the binding, and before the controller re-checks the binding status, the 2nd updateRun updates the same binding, and thus the 1st updateRun is preempted and observes the binding not matching the desired state.
@@ -310,7 +310,7 @@ func (r *Reconciler) validateUpdatingCluster(
310310
markClusterUpdatingFailed(clusterStatus, updateRun.GetGeneration(), preemptedErr.Error())
311311
return fmt.Errorf("%w: %s", errStagedUpdatedAborted, preemptedErr.Error())
312312
}
313-
313+
314314
return nil
315315
}
316316

@@ -323,12 +323,12 @@ func (r *Reconciler) handleStageCompletion(
323323
updatingStageStatus *placementv1beta1.StageUpdatingStatus,
324324
) (time.Duration, error) {
325325
updateRunRef := klog.KObj(updateRun)
326-
326+
327327
// All the clusters in the stage have been updated.
328328
markUpdateRunWaiting(updateRun, updatingStageStatus.StageName)
329329
markStageUpdatingWaiting(updatingStageStatus, updateRun.GetGeneration())
330330
klog.V(2).InfoS("The stage has finished all cluster updating", "stage", updatingStageStatus.StageName, "updateRun", updateRunRef)
331-
331+
332332
// Check if the after stage tasks are ready.
333333
approved, waitTime, err := r.checkAfterStageTasksStatus(ctx, updatingStageIndex, updateRun)
334334
if err != nil {

pkg/controllers/updaterun/initialization_integration_test.go

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ const (
4949
regionWestus = "westus"
5050
)
5151

52-
var _ = Describe("Updaterun initialization tests", func() {
52+
var _ = FDescribe("Updaterun initialization tests", func() {
5353
var updateRun *placementv1beta1.ClusterStagedUpdateRun
5454
var crp *placementv1beta1.ClusterResourcePlacement
5555
var policySnapshot *placementv1beta1.ClusterSchedulingPolicySnapshot
@@ -942,12 +942,19 @@ var _ = Describe("Updaterun initialization tests", func() {
942942
})
943943
})
944944

945-
It("Should not initialize if updateRun state is Abandoned", func() {
945+
It("Should not initialize if updateRun is created with state Abandoned", func() {
946946
By("Creating a new clusterStagedUpdateRun in Abandoned state")
947947
updateRun.Spec.State = placementv1beta1.StateAbandoned
948948
Expect(k8sClient.Create(ctx, updateRun)).To(Succeed())
949949

950950
By("Validating the updateRun is not initialized")
951+
// Populate the cache first.
952+
Eventually(func() error {
953+
if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil {
954+
return err
955+
}
956+
return nil
957+
}, timeout, interval).Should(Succeed(), "failed to get the updateRun")
951958
Consistently(func() error {
952959
if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil {
953960
return err
@@ -960,12 +967,19 @@ var _ = Describe("Updaterun initialization tests", func() {
960967
}, duration, interval).Should(Succeed(), "the abandoned updateRun should not be initialized")
961968
})
962969

963-
It("Should not initialize if updateRun state is Stopped", func() {
970+
It("Should not initialize if updateRun is created with state Stopped ", func() {
964971
By("Creating a new clusterStagedUpdateRun in Stopped state")
965972
updateRun.Spec.State = placementv1beta1.StateStopped
966973
Expect(k8sClient.Create(ctx, updateRun)).To(Succeed())
967974

968975
By("Validating the updateRun is not initialized")
976+
// Populate the cache first.
977+
Eventually(func() error {
978+
if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil {
979+
return err
980+
}
981+
return nil
982+
}, timeout, interval).Should(Succeed(), "failed to get the updateRun")
969983
Consistently(func() error {
970984
if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil {
971985
return err

0 commit comments

Comments
 (0)