diff --git a/config/crds/core.kcp.io_logicalclusters.yaml b/config/crds/core.kcp.io_logicalclusters.yaml index 738f8aaf1b4..359b6280a49 100644 --- a/config/crds/core.kcp.io_logicalclusters.yaml +++ b/config/crds/core.kcp.io_logicalclusters.yaml @@ -119,6 +119,17 @@ spec: - resource - uid type: object + terminators: + description: |- + Terminators are set on creation by the system and copied to status when + termination starts. + items: + description: |- + LogicalClusterTerminator is a unique string corresponding to a logical cluster + terminator controller. + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(:[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(:[a-z0-9][a-z0-9]([-a-z0-9]*[a-z0-9])?))|(system:.+)$ + type: string + type: array type: object status: default: {} @@ -197,6 +208,18 @@ spec: - Ready - Unavailable type: string + terminators: + description: |- + Terminators are set on creation by the system and must be cleared + by a controller before the logical cluster can be deleted. The LogicalCluster object + will stay in the phase "Deleting" until all terminator are cleared. + items: + description: |- + LogicalClusterTerminator is a unique string corresponding to a logical cluster + terminator controller. + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(:[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(:[a-z0-9][a-z0-9]([-a-z0-9]*[a-z0-9])?))|(system:.+)$ + type: string + type: array type: object type: object served: true diff --git a/config/crds/tenancy.kcp.io_workspaces.yaml b/config/crds/tenancy.kcp.io_workspaces.yaml index 4fe949c7fda..dc1b63e8670 100644 --- a/config/crds/tenancy.kcp.io_workspaces.yaml +++ b/config/crds/tenancy.kcp.io_workspaces.yaml @@ -299,6 +299,17 @@ spec: - Ready - Unavailable type: string + terminators: + description: |- + terminators must be cleared by a controller before the workspace is being + deleted. + items: + description: |- + LogicalClusterTerminator is a unique string corresponding to a logical cluster + terminator controller. + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(:[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(:[a-z0-9][a-z0-9]([-a-z0-9]*[a-z0-9])?))|(system:.+)$ + type: string + type: array type: object required: - spec diff --git a/config/crds/tenancy.kcp.io_workspacetypes.yaml b/config/crds/tenancy.kcp.io_workspacetypes.yaml index 7ab29cb9957..6ab554bbb06 100644 --- a/config/crds/tenancy.kcp.io_workspacetypes.yaml +++ b/config/crds/tenancy.kcp.io_workspacetypes.yaml @@ -249,6 +249,18 @@ spec: minItems: 1 type: array type: object + terminator: + description: |- + Terminator determines if this WorkspaceType has an associated terminating + controller. These controllers are used to add functionality to a Workspace; + all controllers must finish their work before the Workspace is being deleted. + + One terminating controller is supported per WorkspaceType; the identifier + for this terminator will be a colon-delimited string using the workspace in which + the WorkspaceType is defined, and the type's name. For example, if a + WorkspaceType `example` is created in the `root:org` workspace, the implicit + terminator name is `root:org:example`. + type: boolean type: object status: description: WorkspaceTypeStatus defines the observed state of WorkspaceType. diff --git a/config/root-phase0/apiexport-tenancy.kcp.io.yaml b/config/root-phase0/apiexport-tenancy.kcp.io.yaml index 1ae07983824..7c54b938f44 100644 --- a/config/root-phase0/apiexport-tenancy.kcp.io.yaml +++ b/config/root-phase0/apiexport-tenancy.kcp.io.yaml @@ -14,12 +14,12 @@ spec: crd: {} - group: tenancy.kcp.io name: workspaces - schema: v250421-25d98218b.workspaces.tenancy.kcp.io + schema: v251015-1d163d0e5.workspaces.tenancy.kcp.io storage: crd: {} - group: tenancy.kcp.io name: workspacetypes - schema: v250806-4c99c4583.workspacetypes.tenancy.kcp.io + schema: v251015-1d163d0e5.workspacetypes.tenancy.kcp.io storage: crd: {} status: {} diff --git a/config/root-phase0/apiresourceschema-logicalclusters.core.kcp.io.yaml b/config/root-phase0/apiresourceschema-logicalclusters.core.kcp.io.yaml index 8c5b3304c23..06881ad2dda 100644 --- a/config/root-phase0/apiresourceschema-logicalclusters.core.kcp.io.yaml +++ b/config/root-phase0/apiresourceschema-logicalclusters.core.kcp.io.yaml @@ -2,7 +2,7 @@ apiVersion: apis.kcp.io/v1alpha1 kind: APIResourceSchema metadata: creationTimestamp: null - name: v241020-fce06d31d.logicalclusters.core.kcp.io + name: v251016-6e24e0d49.logicalclusters.core.kcp.io spec: group: core.kcp.io names: @@ -117,6 +117,17 @@ spec: - resource - uid type: object + terminators: + description: |- + Terminators are set on creation by the system and copied to status when + termination starts. + items: + description: |- + LogicalClusterTerminator is a unique string corresponding to a logical cluster + terminator controller. + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(:[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(:[a-z0-9][a-z0-9]([-a-z0-9]*[a-z0-9])?))|(system:.+)$ + type: string + type: array type: object status: default: {} @@ -195,6 +206,18 @@ spec: - Ready - Unavailable type: string + terminators: + description: |- + Terminators are set on creation by the system and must be cleared + by a controller before the logical cluster can be deleted. The LogicalCluster object + will stay in the phase "Deleting" until all terminator are cleared. + items: + description: |- + LogicalClusterTerminator is a unique string corresponding to a logical cluster + terminator controller. + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(:[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(:[a-z0-9][a-z0-9]([-a-z0-9]*[a-z0-9])?))|(system:.+)$ + type: string + type: array type: object type: object served: true diff --git a/config/root-phase0/apiresourceschema-workspaces.tenancy.kcp.io.yaml b/config/root-phase0/apiresourceschema-workspaces.tenancy.kcp.io.yaml index 804fa3259a8..9cdb85e7de7 100644 --- a/config/root-phase0/apiresourceschema-workspaces.tenancy.kcp.io.yaml +++ b/config/root-phase0/apiresourceschema-workspaces.tenancy.kcp.io.yaml @@ -2,7 +2,7 @@ apiVersion: apis.kcp.io/v1alpha1 kind: APIResourceSchema metadata: creationTimestamp: null - name: v250421-25d98218b.workspaces.tenancy.kcp.io + name: v251015-1d163d0e5.workspaces.tenancy.kcp.io spec: group: tenancy.kcp.io names: @@ -297,6 +297,17 @@ spec: - Ready - Unavailable type: string + terminators: + description: |- + terminators must be cleared by a controller before the workspace is being + deleted. + items: + description: |- + LogicalClusterTerminator is a unique string corresponding to a logical cluster + terminator controller. + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(:[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(:[a-z0-9][a-z0-9]([-a-z0-9]*[a-z0-9])?))|(system:.+)$ + type: string + type: array type: object required: - spec diff --git a/config/root-phase0/apiresourceschema-workspacetypes.tenancy.kcp.io.yaml b/config/root-phase0/apiresourceschema-workspacetypes.tenancy.kcp.io.yaml index 4d3f37d9dca..97c843ffbf4 100644 --- a/config/root-phase0/apiresourceschema-workspacetypes.tenancy.kcp.io.yaml +++ b/config/root-phase0/apiresourceschema-workspacetypes.tenancy.kcp.io.yaml @@ -2,7 +2,7 @@ apiVersion: apis.kcp.io/v1alpha1 kind: APIResourceSchema metadata: creationTimestamp: null - name: v250806-4c99c4583.workspacetypes.tenancy.kcp.io + name: v251015-1d163d0e5.workspacetypes.tenancy.kcp.io spec: group: tenancy.kcp.io names: @@ -247,6 +247,18 @@ spec: minItems: 1 type: array type: object + terminator: + description: |- + Terminator determines if this WorkspaceType has an associated terminating + controller. These controllers are used to add functionality to a Workspace; + all controllers must finish their work before the Workspace is being deleted. + + One terminating controller is supported per WorkspaceType; the identifier + for this terminator will be a colon-delimited string using the workspace in which + the WorkspaceType is defined, and the type's name. For example, if a + WorkspaceType `example` is created in the `root:org` workspace, the implicit + terminator name is `root:org:example`. + type: boolean type: object status: description: WorkspaceTypeStatus defines the observed state of WorkspaceType. diff --git a/pkg/admission/logicalcluster/admission.go b/pkg/admission/logicalcluster/admission.go index 75aba4cec2c..ed46990c621 100644 --- a/pkg/admission/logicalcluster/admission.go +++ b/pkg/admission/logicalcluster/admission.go @@ -111,6 +111,8 @@ func (o *plugin) Admit(ctx context.Context, a admission.Attributes, _ admission. logicalCluster.Status.Initializers = logicalCluster.Spec.Initializers + logicalCluster.Status.Terminators = logicalCluster.Spec.Terminators + return updateUnstructured(u, logicalCluster) } diff --git a/pkg/openapi/zz_generated.openapi.go b/pkg/openapi/zz_generated.openapi.go index 377b8130a6c..7c3e6b34b3c 100644 --- a/pkg/openapi/zz_generated.openapi.go +++ b/pkg/openapi/zz_generated.openapi.go @@ -3837,6 +3837,21 @@ func schema_sdk_apis_core_v1alpha1_LogicalClusterSpec(ref common.ReferenceCallba }, }, }, + "terminators": { + SchemaProps: spec.SchemaProps{ + Description: "Terminators are set on creation by the system and copied to status when termination starts.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, }, }, @@ -3895,6 +3910,21 @@ func schema_sdk_apis_core_v1alpha1_LogicalClusterStatus(ref common.ReferenceCall }, }, }, + "terminators": { + SchemaProps: spec.SchemaProps{ + Description: "Terminators are set on creation by the system and must be cleared by a controller before the logical cluster can be deleted. The LogicalCluster object will stay in the phase \"Deleting\" until all terminator are cleared.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, }, }, @@ -4846,6 +4876,21 @@ func schema_sdk_apis_tenancy_v1alpha1_WorkspaceStatus(ref common.ReferenceCallba }, }, }, + "terminators": { + SchemaProps: spec.SchemaProps{ + Description: "terminators must be cleared by a controller before the workspace is being deleted.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, }, }, @@ -5057,6 +5102,13 @@ func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeSpec(ref common.ReferenceCall Format: "", }, }, + "terminator": { + SchemaProps: spec.SchemaProps{ + Description: "Terminator determines if this WorkspaceType has an associated terminating controller. These controllers are used to add functionality to a Workspace; all controllers must finish their work before the Workspace is being deleted.\n\nOne terminating controller is supported per WorkspaceType; the identifier for this terminator will be a colon-delimited string using the workspace in which the WorkspaceType is defined, and the type's name. For example, if a WorkspaceType `example` is created in the `root:org` workspace, the implicit terminator name is `root:org:example`.", + Type: []string{"boolean"}, + Format: "", + }, + }, "extend": { SchemaProps: spec.SchemaProps{ Description: "extend is a list of other WorkspaceTypes whose initializers and limitAllowedChildren and limitAllowedParents this WorkspaceType is inheriting. By (transitively) extending another WorkspaceType, this WorkspaceType will be considered as that other type in evaluation of limitAllowedChildren and limitAllowedParents constraints.\n\nA dependency cycle stop this WorkspaceType from being admitted as the type of a Workspace.\n\nA non-existing dependency stop this WorkspaceType from being admitted as the type of a Workspace.", diff --git a/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile.go b/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile.go index f4e455e7465..8ffe68b399a 100644 --- a/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile.go +++ b/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile.go @@ -36,8 +36,11 @@ type reconciler interface { } func (c *Controller) reconcile(ctx context.Context, logicalCluster *corev1alpha1.LogicalCluster) (bool, error) { + // reconcilers which modify Status should be last + // reconcilers which modify ObjectMeta, need to return reconcileStatusStopAndRequeue on change reconcilers := []reconciler{ &metaDataReconciler{}, + &terminatorReconciler{}, &phaseReconciler{}, &urlReconciler{shardExternalURL: c.shardExternalURL}, } diff --git a/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_metadata.go b/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_metadata.go index 9cf01cda64a..54a92cea17f 100644 --- a/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_metadata.go +++ b/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_metadata.go @@ -27,6 +27,7 @@ import ( corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization" + "github.com/kcp-dev/kcp/sdk/apis/tenancy/termination" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" ) @@ -39,7 +40,7 @@ func (r *metaDataReconciler) reconcile(ctx context.Context, logicalCluster *core expected := string(logicalCluster.Status.Phase) if !logicalCluster.DeletionTimestamp.IsZero() { - expected = "Deleting" + expected = string(corev1alpha1.LogicalClusterPhaseDeleting) } if got := logicalCluster.Labels[tenancyv1alpha1.WorkspacePhaseLabel]; got != expected { if logicalCluster.Labels == nil { @@ -49,6 +50,7 @@ func (r *metaDataReconciler) reconcile(ctx context.Context, logicalCluster *core changed = true } + // add initializers from the status as hashed labels initializerKeys := sets.New[string]() for _, initializer := range logicalCluster.Status.Initializers { key, value := initialization.InitializerToLabel(initializer) @@ -62,6 +64,22 @@ func (r *metaDataReconciler) reconcile(ctx context.Context, logicalCluster *core } } + // add terminators from the status as hashed labels + terminatorKeys := sets.New[string]() + for _, terminator := range logicalCluster.Status.Terminators { + key, value := termination.TerminatorToLabel(terminator) + terminatorKeys.Insert(key) + if got, expected := logicalCluster.Labels[key], value; got != expected { + if logicalCluster.Labels == nil { + logicalCluster.Labels = map[string]string{} + } + logicalCluster.Labels[key] = value + changed = true + } + } + + // remove any initializers/terminators from labels, which have been + // removed in the status for key := range logicalCluster.Labels { if strings.HasPrefix(key, tenancyv1alpha1.WorkspaceInitializerLabelPrefix) { if !initializerKeys.Has(key) { @@ -69,6 +87,13 @@ func (r *metaDataReconciler) reconcile(ctx context.Context, logicalCluster *core changed = true } } + + if strings.HasPrefix(key, tenancyv1alpha1.WorkspaceTerminatorLabelPrefix) { + if !terminatorKeys.Has(key) { + delete(logicalCluster.Labels, key) + changed = true + } + } } if logicalCluster.Status.Phase == corev1alpha1.LogicalClusterPhaseReady { diff --git a/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_metadata_test.go b/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_metadata_test.go index bd4e7499162..401d5a16d94 100644 --- a/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_metadata_test.go +++ b/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_metadata_test.go @@ -47,11 +47,17 @@ func TestReconcileMetadata(t *testing.T) { Initializers: []corev1alpha1.LogicalClusterInitializer{ "pluto", "venus", "apollo", }, + Terminators: []corev1alpha1.LogicalClusterTerminator{ + "pluto", "venus", "apollo", + }, }, }, expected: metav1.ObjectMeta{ Labels: map[string]string{ "tenancy.kcp.io/phase": "Ready", + "terminator.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cba": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", + "terminator.internal.kcp.io/aceeb26461953562d30366db65b200f64241": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", + "terminator.internal.kcp.io/ccf53a4988ae8515ee77131ef507cabaf188": "ccf53a4988ae8515ee77131ef507cabaf18822766c2a4cff33b24eb8", "initializer.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cb": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", "initializer.internal.kcp.io/aceeb26461953562d30366db65b200f6424": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", "initializer.internal.kcp.io/ccf53a4988ae8515ee77131ef507cabaf18": "ccf53a4988ae8515ee77131ef507cabaf18822766c2a4cff33b24eb8", @@ -72,7 +78,7 @@ func TestReconcileMetadata(t *testing.T) { expected: metav1.ObjectMeta{ DeletionTimestamp: &metav1.Time{Time: date}, Labels: map[string]string{ - "tenancy.kcp.io/phase": "Deleting", + "tenancy.kcp.io/phase": string(corev1alpha1.LogicalClusterPhaseDeleting), }, }, wantStatus: reconcileStatusStopAndRequeue, @@ -85,6 +91,8 @@ func TestReconcileMetadata(t *testing.T) { "tenancy.kcp.io/phase": "Ready", "initializer.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cb": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", "initializer.internal.kcp.io/aceeb26461953562d30366db65b200f6424": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", + "terminator.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cba": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", + "terminator.internal.kcp.io/aceeb26461953562d30366db65b200f64241": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", }, }, Status: corev1alpha1.LogicalClusterStatus{ @@ -92,11 +100,17 @@ func TestReconcileMetadata(t *testing.T) { Initializers: []corev1alpha1.LogicalClusterInitializer{ "pluto", "venus", "apollo", }, + Terminators: []corev1alpha1.LogicalClusterTerminator{ + "pluto", "venus", "apollo", + }, }, }, expected: metav1.ObjectMeta{ Labels: map[string]string{ "tenancy.kcp.io/phase": "Ready", + "terminator.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cba": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", + "terminator.internal.kcp.io/aceeb26461953562d30366db65b200f64241": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", + "terminator.internal.kcp.io/ccf53a4988ae8515ee77131ef507cabaf188": "ccf53a4988ae8515ee77131ef507cabaf18822766c2a4cff33b24eb8", "initializer.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cb": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", "initializer.internal.kcp.io/aceeb26461953562d30366db65b200f6424": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", "initializer.internal.kcp.io/ccf53a4988ae8515ee77131ef507cabaf18": "ccf53a4988ae8515ee77131ef507cabaf18822766c2a4cff33b24eb8", @@ -112,6 +126,8 @@ func TestReconcileMetadata(t *testing.T) { "tenancy.kcp.io/phase": "Ready", "initializer.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cb": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", "initializer.internal.kcp.io/aceeb26461953562d30366db65b200f6424": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", + "terminator.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cba": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", + "terminator.internal.kcp.io/aceeb26461953562d30366db65b200f64241": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", }, }, Status: corev1alpha1.LogicalClusterStatus{ @@ -119,12 +135,16 @@ func TestReconcileMetadata(t *testing.T) { Initializers: []corev1alpha1.LogicalClusterInitializer{ "pluto", }, + Terminators: []corev1alpha1.LogicalClusterTerminator{ + "pluto", + }, }, }, expected: metav1.ObjectMeta{ Labels: map[string]string{ "tenancy.kcp.io/phase": "Ready", "initializer.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cb": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", + "terminator.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cba": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", }, }, wantStatus: reconcileStatusStopAndRequeue, @@ -135,6 +155,9 @@ func TestReconcileMetadata(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ "tenancy.kcp.io/phase": "Ready", + "terminator.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cba": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", + "terminator.internal.kcp.io/aceeb26461953562d30366db65b200f64241": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", + "terminator.internal.kcp.io/ccf53a4988ae8515ee77131ef507cabaf188": "ccf53a4988ae8515ee77131ef507cabaf18822766c2a4cff33b24eb8", "initializer.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cb": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", "initializer.internal.kcp.io/aceeb26461953562d30366db65b200f6424": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", "initializer.internal.kcp.io/ccf53a4988ae8515ee77131ef507cabaf18": "ccf53a4988ae8515ee77131ef507cabaf18822766c2a4cff33b24eb8", @@ -145,11 +168,17 @@ func TestReconcileMetadata(t *testing.T) { Initializers: []corev1alpha1.LogicalClusterInitializer{ "pluto", "venus", "apollo", }, + Terminators: []corev1alpha1.LogicalClusterTerminator{ + "pluto", "venus", "apollo", + }, }, }, expected: metav1.ObjectMeta{ Labels: map[string]string{ "tenancy.kcp.io/phase": "Ready", + "terminator.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cba": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", + "terminator.internal.kcp.io/aceeb26461953562d30366db65b200f64241": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", + "terminator.internal.kcp.io/ccf53a4988ae8515ee77131ef507cabaf188": "ccf53a4988ae8515ee77131ef507cabaf18822766c2a4cff33b24eb8", "initializer.internal.kcp.io/2eadcbf778956517ec99fd1c1c32a9b13cb": "2eadcbf778956517ec99fd1c1c32a9b13cbae759770fc37c341c7fe8", "initializer.internal.kcp.io/aceeb26461953562d30366db65b200f6424": "aceeb26461953562d30366db65b200f64241f9e5fe888892d52eea5c", "initializer.internal.kcp.io/ccf53a4988ae8515ee77131ef507cabaf18": "ccf53a4988ae8515ee77131ef507cabaf18822766c2a4cff33b24eb8", @@ -158,7 +187,7 @@ func TestReconcileMetadata(t *testing.T) { wantStatus: reconcileStatusContinue, }, { - name: "removes everything but owner username when ready", + name: "removes everything from owner annotation but owner username when ready", input: &corev1alpha1.LogicalCluster{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ diff --git a/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_terminator.go b/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_terminator.go new file mode 100644 index 00000000000..66132dcbf29 --- /dev/null +++ b/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_terminator.go @@ -0,0 +1,67 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logicalcluster + +import ( + "context" + + corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" +) + +const LogicalClusterHasTerminatorFinalizer = "kcp.io/has-terminators" + +// terminatorReconciler will place the LogicalClusterHasTerminatorFinalizer finalizer on the LogicalCluster +// in order to prevent a logicalcluster from being deleted while it still has terminators set. +type terminatorReconciler struct{} + +func (r *terminatorReconciler) reconcile(ctx context.Context, logicalCluster *corev1alpha1.LogicalCluster) (reconcileStatus, error) { + var changed bool + // if there are still terminators, ensure that the finalizer is present + if len(logicalCluster.Status.Terminators) != 0 { + logicalCluster.Finalizers, changed = addUnique(logicalCluster.Finalizers, LogicalClusterHasTerminatorFinalizer) + // if not make sure that it is removed + } else { + logicalCluster.Finalizers, changed = removeByValue(logicalCluster.Finalizers, LogicalClusterHasTerminatorFinalizer) + } + + if changed { + // first update ObjectMeta before other reconcilers change status + return reconcileStatusStopAndRequeue, nil + } + + return reconcileStatusContinue, nil +} + +// addUnique adds t to s if not already present, returning the new slice and whether it was added. +func addUnique[T comparable](s []T, t T) ([]T, bool) { + for _, elem := range s { + if elem == t { + return s, false + } + } + return append(s, t), true +} + +// removeByValue removes t from s if present, returning the new slice and whether it was removed. +func removeByValue[T comparable](s []T, t T) ([]T, bool) { + for i, other := range s { + if other == t { + return append(s[:i], s[i+1:]...), true + } + } + return s, false +} diff --git a/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_terminator_test.go b/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_terminator_test.go new file mode 100644 index 00000000000..2101fd78b07 --- /dev/null +++ b/pkg/reconciler/core/logicalcluster/logicalcluster_reconcile_terminator_test.go @@ -0,0 +1,98 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logicalcluster + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" +) + +func TestReconcile(t *testing.T) { + tests := []struct { + name string + logicalCluster *corev1alpha1.LogicalCluster + expectedFinalizers []string + }{ + { + name: "remove finalizer if there are no terminators", + logicalCluster: &corev1alpha1.LogicalCluster{ + Status: corev1alpha1.LogicalClusterStatus{ + Terminators: []corev1alpha1.LogicalClusterTerminator{}, + }, + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{LogicalClusterHasTerminatorFinalizer}, + }, + }, + expectedFinalizers: []string{}, + }, + { + name: "has terminators, finalizer added", + logicalCluster: &corev1alpha1.LogicalCluster{ + Status: corev1alpha1.LogicalClusterStatus{ + Terminators: []corev1alpha1.LogicalClusterTerminator{"terminator1"}, + }, + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{}, + }, + }, + expectedFinalizers: []string{LogicalClusterHasTerminatorFinalizer}, + }, + { + name: "do nothing if finalizers already present and terminators exist", + logicalCluster: &corev1alpha1.LogicalCluster{ + Status: corev1alpha1.LogicalClusterStatus{ + Terminators: []corev1alpha1.LogicalClusterTerminator{"terminator1"}, + }, + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{LogicalClusterHasTerminatorFinalizer}, + }, + }, + expectedFinalizers: []string{LogicalClusterHasTerminatorFinalizer}, + }, + { + name: "do nothing if finalizers already absent and no terminators exist", + logicalCluster: &corev1alpha1.LogicalCluster{ + Status: corev1alpha1.LogicalClusterStatus{ + Terminators: []corev1alpha1.LogicalClusterTerminator{}, + }, + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{}, + }, + }, + expectedFinalizers: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reconciler := &terminatorReconciler{} + _, err := reconciler.reconcile(context.Background(), tt.logicalCluster) + if err != nil { + t.Errorf("unexpected reconcile error %v", err) + } + if diff := cmp.Diff(tt.expectedFinalizers, tt.logicalCluster.Finalizers); diff != "" { + t.Errorf("unexpected finalizer diff:\n%s", diff) + } + }) + } +} diff --git a/pkg/reconciler/tenancy/replicateclusterrole/replicateclusterrole_controller.go b/pkg/reconciler/tenancy/replicateclusterrole/replicateclusterrole_controller.go index bda348654d5..8a91e2935fe 100644 --- a/pkg/reconciler/tenancy/replicateclusterrole/replicateclusterrole_controller.go +++ b/pkg/reconciler/tenancy/replicateclusterrole/replicateclusterrole_controller.go @@ -57,7 +57,7 @@ func HasUseRule(clusterName logicalcluster.Name, cr *rbacv1.ClusterRole) bool { } resources := sets.New[string](rule.Resources...) verbs := sets.New[string](rule.Verbs...) - if (resources.Has("workspacetypes") || resources.Has("*")) && (verbs.Has("use") || verbs.Has("initialize") || verbs.Has("*")) { + if (resources.Has("workspacetypes") || resources.Has("*")) && (verbs.Has("use") || verbs.Has("initialize") || verbs.Has("terminate") || verbs.Has("*")) { return true } } diff --git a/pkg/reconciler/tenancy/workspace/workspace_reconcile_deletion.go b/pkg/reconciler/tenancy/workspace/workspace_reconcile_deletion.go index f8e4ff5b635..b2496ce7704 100644 --- a/pkg/reconciler/tenancy/workspace/workspace_reconcile_deletion.go +++ b/pkg/reconciler/tenancy/workspace/workspace_reconcile_deletion.go @@ -52,7 +52,7 @@ func (r *deletionReconciler) reconcile(ctx context.Context, workspace *tenancyv1 finSet := sets.New(workspace.Finalizers...) // we want our finalizer to be removed last, so check if other finalizers exist - if finSet.Has(corev1alpha1.LogicalClusterFinalizer) && finSet.Len() > 1 { + if finSet.Has(corev1alpha1.LogicalClusterFinalizerName) && finSet.Len() > 1 { return reconcileStatusContinue, nil } @@ -64,7 +64,7 @@ func (r *deletionReconciler) reconcile(ctx context.Context, workspace *tenancyv1 // if the logicalcluster was never created, we can directly remove the // workspace finalizer if !ok { - workspace.Finalizers = sets.List(finSet.Delete(corev1alpha1.LogicalClusterFinalizer)) + workspace.Finalizers = sets.List(finSet.Delete(corev1alpha1.LogicalClusterFinalizerName)) return reconcileStatusContinue, nil } @@ -100,9 +100,9 @@ func (r *deletionReconciler) reconcile(ctx context.Context, workspace *tenancyv1 // fall-through } if apierrors.IsNotFound(getErr) { - if finSet.Has(corev1alpha1.LogicalClusterFinalizer) { - logger.Info(fmt.Sprintf("Removing finalizer %s", corev1alpha1.LogicalClusterFinalizer)) - workspace.Finalizers = sets.List(finSet.Delete(corev1alpha1.LogicalClusterFinalizer)) + if finSet.Has(corev1alpha1.LogicalClusterFinalizerName) { + logger.Info(fmt.Sprintf("Removing finalizer %s", corev1alpha1.LogicalClusterFinalizerName)) + workspace.Finalizers = sets.List(finSet.Delete(corev1alpha1.LogicalClusterFinalizerName)) return reconcileStatusStopAndRequeue, nil // spec change } return reconcileStatusContinue, nil diff --git a/pkg/reconciler/tenancy/workspace/workspace_reconcile_deletion_test.go b/pkg/reconciler/tenancy/workspace/workspace_reconcile_deletion_test.go index 835a03c44b4..b8e771b414b 100644 --- a/pkg/reconciler/tenancy/workspace/workspace_reconcile_deletion_test.go +++ b/pkg/reconciler/tenancy/workspace/workspace_reconcile_deletion_test.go @@ -93,13 +93,13 @@ func TestReconcile(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: ptr.To(metav1.Now()), Finalizers: []string{ - corev1alpha1.LogicalClusterFinalizer, + corev1alpha1.LogicalClusterFinalizerName, "other-finalizer", }, }, }, expFinalizers: []string{ - corev1alpha1.LogicalClusterFinalizer, + corev1alpha1.LogicalClusterFinalizerName, "other-finalizer", }, }, @@ -110,7 +110,7 @@ func TestReconcile(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: ptr.To(metav1.Now()), Finalizers: []string{ - corev1alpha1.LogicalClusterFinalizer, + corev1alpha1.LogicalClusterFinalizerName, }, }, Status: tenancyv1alpha1.WorkspaceStatus{ @@ -128,7 +128,7 @@ func TestReconcile(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: ptr.To(metav1.Now()), Finalizers: []string{ - corev1alpha1.LogicalClusterFinalizer, + corev1alpha1.LogicalClusterFinalizerName, }, Annotations: map[string]string{ workspaceClusterAnnotationKey: "test", @@ -144,7 +144,7 @@ func TestReconcile(t *testing.T) { // we do not remove our finalizers in this case, as we wait // for another reconciliation loop expFinalizers: []string{ - corev1alpha1.LogicalClusterFinalizer, + corev1alpha1.LogicalClusterFinalizerName, }, // we do expect our logicalCluster to be removed expLogicalClusters: map[string]*corev1alpha1.LogicalCluster{}, @@ -156,7 +156,7 @@ func TestReconcile(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: ptr.To(metav1.Now()), Finalizers: []string{ - corev1alpha1.LogicalClusterFinalizer, + corev1alpha1.LogicalClusterFinalizerName, }, }, }, diff --git a/pkg/reconciler/tenancy/workspace/workspace_reconcile_phase.go b/pkg/reconciler/tenancy/workspace/workspace_reconcile_phase.go index 456b755add7..a55489da2b4 100644 --- a/pkg/reconciler/tenancy/workspace/workspace_reconcile_phase.go +++ b/pkg/reconciler/tenancy/workspace/workspace_reconcile_phase.go @@ -60,6 +60,9 @@ func (r *phaseReconciler) reconcile(ctx context.Context, workspace *tenancyv1alp return reconcileStatusContinue, nil } + // set terminators during initializiation, as we want to show them to the user already + workspace.Status.Terminators = logicalCluster.Status.Terminators + workspace.Status.Initializers = logicalCluster.Status.Initializers if initializers := workspace.Status.Initializers; len(initializers) > 0 { diff --git a/pkg/reconciler/tenancy/workspace/workspace_reconcile_scheduling.go b/pkg/reconciler/tenancy/workspace/workspace_reconcile_scheduling.go index 0c14248bcf5..e69d41ff73d 100644 --- a/pkg/reconciler/tenancy/workspace/workspace_reconcile_scheduling.go +++ b/pkg/reconciler/tenancy/workspace/workspace_reconcile_scheduling.go @@ -45,6 +45,7 @@ import ( "github.com/kcp-dev/kcp/sdk/apis/core" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization" + "github.com/kcp-dev/kcp/sdk/apis/tenancy/termination" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" @@ -141,7 +142,7 @@ func (r *schedulingReconciler) reconcile(ctx context.Context, workspace *tenancy shardNameHash, hasShard := workspace.Annotations[WorkspaceShardHashAnnotationKey] clusterNameString, hasCluster := workspace.Annotations[workspaceClusterAnnotationKey] clusterName := logicalcluster.Name(clusterNameString) - hasFinalizer := sets.New[string](workspace.Finalizers...).Has(corev1alpha1.LogicalClusterFinalizer) + hasFinalizer := sets.New[string](workspace.Finalizers...).Has(corev1alpha1.LogicalClusterFinalizerName) parentThis, err := r.getLogicalCluster(logicalcluster.From(workspace)) if err != nil && !apierrors.IsNotFound(err) { @@ -180,7 +181,7 @@ func (r *schedulingReconciler) reconcile(ctx context.Context, workspace *tenancy workspace.Annotations[workspaceClusterAnnotationKey] = cluster.String() } if !hasFinalizer { - workspace.Finalizers = append(workspace.Finalizers, corev1alpha1.LogicalClusterFinalizer) + workspace.Finalizers = append(workspace.Finalizers, corev1alpha1.LogicalClusterFinalizerName) } if !hasShard || !hasCluster || !hasFinalizer { // this is the first part of our two-phase commit @@ -319,6 +320,12 @@ func (r *schedulingReconciler) createLogicalCluster(ctx context.Context, shard * return err } + // add terminators + logicalCluster.Spec.Terminators, err = LogicalClusterTerminators(r.transitiveTypeResolver, r.getWorkspaceType, logicalcluster.NewPath(workspace.Spec.Type.Path), string(workspace.Spec.Type.Name)) + if err != nil { + return err + } + logicalClusterAdminClient, err := r.kcpLogicalClusterAdminClientFor(shard) if err != nil { return err @@ -371,6 +378,33 @@ func LogicalClustersInitializers( return initializers, nil } +// LogicalClusterTerminators returns the terminators for a LogicalCluster of a given +// fully-qualified WorkspaceType reference. +func LogicalClusterTerminators( + resolver workspacetypeexists.TransitiveTypeResolver, + getWorkspaceType func(clusterName logicalcluster.Path, name string) (*tenancyv1alpha1.WorkspaceType, error), + typePath logicalcluster.Path, typeName string, +) ([]corev1alpha1.LogicalClusterTerminator, error) { + wt, err := getWorkspaceType(typePath, typeName) + if err != nil { + return nil, err + } + wtAliases, err := resolver.Resolve(wt) + if err != nil { + return nil, err + } + + terminators := make([]corev1alpha1.LogicalClusterTerminator, 0, len(wtAliases)) + + for _, alias := range wtAliases { + if alias.Spec.Terminator { + terminators = append(terminators, termination.TerminatorForType(alias)) + } + } + + return terminators, nil +} + func (r *schedulingReconciler) updateLogicalClusterPhase(ctx context.Context, shard *corev1alpha1.Shard, cluster logicalcluster.Path, phase corev1alpha1.LogicalClusterPhaseType) error { logicalClusterAdminClient, err := r.kcpLogicalClusterAdminClientFor(shard) if err != nil { diff --git a/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile.go b/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile.go index 1f211fcbe6b..3fcf45ca224 100644 --- a/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile.go +++ b/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile.go @@ -27,7 +27,9 @@ import ( virtualworkspacesoptions "github.com/kcp-dev/kcp/cmd/virtual-workspaces/options" "github.com/kcp-dev/kcp/pkg/virtual/initializingworkspaces" + "github.com/kcp-dev/kcp/pkg/virtual/terminatingworkspaces" "github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization" + "github.com/kcp-dev/kcp/sdk/apis/tenancy/termination" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" @@ -73,19 +75,31 @@ func (c *controller) updateVirtualWorkspaceURLs(ctx context.Context, wt *tenancy continue } + base := u.Path + // add initializing workspace URLs u.Path = path.Join( - u.Path, + base, virtualworkspacesoptions.DefaultRootPathPrefix, initializingworkspaces.VirtualWorkspaceName, string(initialization.InitializerForType(wt)), ) desiredURLs.Insert(u.String()) + + // add finalizing workspace URLs + u.Path = path.Join( + base, + virtualworkspacesoptions.DefaultRootPathPrefix, + terminatingworkspaces.VirtualWorkspaceName, + string(termination.TerminatorForType(wt)), + ) + + desiredURLs.Insert(u.String()) } wt.Status.VirtualWorkspaces = nil - for _, u := range sets.List[string](desiredURLs) { + for _, u := range sets.List(desiredURLs) { wt.Status.VirtualWorkspaces = append(wt.Status.VirtualWorkspaces, tenancyv1alpha1.VirtualWorkspace{ URL: u, }) diff --git a/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile_test.go b/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile_test.go index def0e5937d3..56178cc5516 100644 --- a/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile_test.go +++ b/pkg/reconciler/tenancy/workspacetype/workspacetype_controller_reconcile_test.go @@ -139,8 +139,11 @@ func TestReconcile(t *testing.T) { Status: tenancyv1alpha1.WorkspaceTypeStatus{ VirtualWorkspaces: []tenancyv1alpha1.VirtualWorkspace{ {URL: "https://item.com/services/initializingworkspaces/root:org:team:ws:sometype"}, + {URL: "https://item.com/services/terminatingworkspaces/root:org:team:ws:sometype"}, {URL: "https://something.com/services/initializingworkspaces/root:org:team:ws:sometype"}, + {URL: "https://something.com/services/terminatingworkspaces/root:org:team:ws:sometype"}, {URL: "https://whatever.com/services/initializingworkspaces/root:org:team:ws:sometype"}, + {URL: "https://whatever.com/services/terminatingworkspaces/root:org:team:ws:sometype"}, }, Conditions: conditionsv1alpha1.Conditions{ { @@ -195,8 +198,11 @@ func TestReconcile(t *testing.T) { Status: tenancyv1alpha1.WorkspaceTypeStatus{ VirtualWorkspaces: []tenancyv1alpha1.VirtualWorkspace{ {URL: "https://item.com/services/initializingworkspaces/root:org:team:ws:sometype"}, + {URL: "https://item.com/services/terminatingworkspaces/root:org:team:ws:sometype"}, {URL: "https://something.com/services/initializingworkspaces/root:org:team:ws:sometype"}, + {URL: "https://something.com/services/terminatingworkspaces/root:org:team:ws:sometype"}, {URL: "https://whatever.com/services/initializingworkspaces/root:org:team:ws:sometype"}, + {URL: "https://whatever.com/services/terminatingworkspaces/root:org:team:ws:sometype"}, }, Conditions: conditionsv1alpha1.Conditions{ { diff --git a/pkg/virtual/framework/forwardingregistry/wrappers.go b/pkg/virtual/framework/forwardingregistry/wrappers.go index d0e7e04502b..1ac31b78c79 100644 --- a/pkg/virtual/framework/forwardingregistry/wrappers.go +++ b/pkg/virtual/framework/forwardingregistry/wrappers.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -87,3 +88,73 @@ func WithLabelSelector(labelSelectorFrom func(ctx context.Context) labels.Requir } }) } + +// WithDeletionTimestamp creates a StorageWrapper which only returns objects which are marked +// for deletion and have a deletion timestamp. +func WithDeletionTimestamp() StorageWrapper { + return StorageWrapperFunc(func(groupResource schema.GroupResource, storage *StoreFuncs) { + delegateGetter := storage.GetterFunc + storage.GetterFunc = func(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { + obj, err := delegateGetter.Get(ctx, name, options) + if err != nil { + return nil, err + } + + if !hasDeletionTimestamp(ctx, obj) { + return nil, errors.NewNotFound(groupResource, name) + } + + return obj, err + } + + delegateLister := storage.ListerFunc + storage.ListerFunc = func(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) { + result, err := delegateLister.List(ctx, options) + if err != nil { + return nil, err + } + + ul, _ := result.(*unstructured.UnstructuredList) + + filtered := []unstructured.Unstructured{} + for i, item := range ul.Items { + if hasDeletionTimestamp(ctx, &item) { + filtered = append(filtered, ul.Items[i]) + } + } + + ul.Items = filtered + + return ul, nil + } + + delegateWatcher := storage.WatcherFunc + storage.WatcherFunc = func(ctx context.Context, options *internalversion.ListOptions) (watch.Interface, error) { + wi, err := delegateWatcher.Watch(ctx, options) + if err != nil { + return nil, err + } + + filtered := watch.Filter(wi, func(in watch.Event) (out watch.Event, keep bool) { + return in, hasDeletionTimestamp(ctx, in.Object) + }) + + return filtered, nil + } + }) +} + +// hasDeletionTimestamp returns whether a runtime.object has a deletion timestamp +// wrapping the meta.Objects deletiontimestamp functionality. +func hasDeletionTimestamp(ctx context.Context, obj runtime.Object) bool { + metaObj, ok := obj.(metav1.Object) + if !ok { + // should never happen + return false + } + + if !metaObj.GetDeletionTimestamp().IsZero() { + return true + } + return false +} diff --git a/pkg/virtual/framework/internalapis/fixtures/workspaces.yaml b/pkg/virtual/framework/internalapis/fixtures/workspaces.yaml index 238a02fe68a..4342e6bd8f0 100644 --- a/pkg/virtual/framework/internalapis/fixtures/workspaces.yaml +++ b/pkg/virtual/framework/internalapis/fixtures/workspaces.yaml @@ -203,6 +203,12 @@ spec: phase: description: Phase of the workspace (Scheduling, Initializing, Ready). type: string + terminators: + description: terminators must be cleared by a controller before the + workspace is being deleted. + items: + type: string + type: array type: object required: - spec diff --git a/pkg/virtual/options/options.go b/pkg/virtual/options/options.go index 7e42a5060ac..0d17b7d96ae 100644 --- a/pkg/virtual/options/options.go +++ b/pkg/virtual/options/options.go @@ -29,6 +29,7 @@ import ( "github.com/kcp-dev/kcp/pkg/virtual/framework/rootapiserver" initializingworkspacesoptions "github.com/kcp-dev/kcp/pkg/virtual/initializingworkspaces/options" replicationoptions "github.com/kcp-dev/kcp/pkg/virtual/replication/options" + terminatingworkspaceoptions "github.com/kcp-dev/kcp/pkg/virtual/terminatingworkspaces/options" kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" ) @@ -37,12 +38,14 @@ const virtualWorkspacesFlagPrefix = "virtual-workspaces-" type Options struct { APIExport *apiexportoptions.APIExport InitializingWorkspaces *initializingworkspacesoptions.InitializingWorkspaces + TerminatingWorkspaces *terminatingworkspaceoptions.TerminatingWorkspaces } func NewOptions() *Options { return &Options{ APIExport: apiexportoptions.New(), InitializingWorkspaces: initializingworkspacesoptions.New(), + TerminatingWorkspaces: terminatingworkspaceoptions.New(), } } @@ -51,12 +54,14 @@ func (o *Options) Validate() []error { errs = append(errs, o.APIExport.Validate(virtualWorkspacesFlagPrefix)...) errs = append(errs, o.InitializingWorkspaces.Validate(virtualWorkspacesFlagPrefix)...) + errs = append(errs, o.TerminatingWorkspaces.Validate(virtualWorkspacesFlagPrefix)...) return errs } func (o *Options) AddFlags(fs *pflag.FlagSet) { o.InitializingWorkspaces.AddFlags(fs, virtualWorkspacesFlagPrefix) + o.TerminatingWorkspaces.AddFlags(fs, virtualWorkspacesFlagPrefix) } func (o *Options) NewVirtualWorkspaces( @@ -86,10 +91,16 @@ func (o *Options) NewVirtualWorkspaces( return nil, err } - all, err := Merge(apiexports, initializingworkspaces, replications) + terminatingworkspaces, err := o.TerminatingWorkspaces.NewVirtualWorkspaces(rootPathPrefix, config, wildcardKcpInformers) if err != nil { return nil, err } + + all, err := Merge(apiexports, initializingworkspaces, replications, terminatingworkspaces) + if err != nil { + return nil, err + } + return all, nil } diff --git a/pkg/virtual/terminatingworkspaces/builder/build.go b/pkg/virtual/terminatingworkspaces/builder/build.go new file mode 100644 index 00000000000..63c8c5512c4 --- /dev/null +++ b/pkg/virtual/terminatingworkspaces/builder/build.go @@ -0,0 +1,317 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authorization/authorizer" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + + kcpdynamic "github.com/kcp-dev/client-go/dynamic" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" + "github.com/kcp-dev/logicalcluster/v3" + + rootphase0 "github.com/kcp-dev/kcp/config/root-phase0" + "github.com/kcp-dev/kcp/pkg/authorization/delegated" + "github.com/kcp-dev/kcp/pkg/server/requestinfo" + "github.com/kcp-dev/kcp/pkg/virtual/framework" + virtualworkspacesdynamic "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic" + "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/apidefinition" + "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/apiserver" + dynamiccontext "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/context" + "github.com/kcp-dev/kcp/pkg/virtual/framework/rootapiserver" + "github.com/kcp-dev/kcp/pkg/virtual/terminatingworkspaces" + apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" + corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + "github.com/kcp-dev/kcp/sdk/apis/tenancy/termination" + tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" +) + +const ( + wildcardLogicalClustersName = terminatingworkspaces.VirtualWorkspaceName + "-wildcard-logicalclusters" + logicalClustersName = terminatingworkspaces.VirtualWorkspaceName + "-logicalclusters" +) + +func BuildVirtualWorkspace( + cfg *rest.Config, + rootPathPrefix string, + dynamicClusterClient kcpdynamic.ClusterInterface, + kubeClusterClient kcpkubernetesclientset.ClusterInterface, +) ([]rootapiserver.NamedVirtualWorkspace, error) { + if !strings.HasSuffix(rootPathPrefix, "/") { + rootPathPrefix += "/" + } + + logicalClusterResource := apisv1alpha1.APIResourceSchema{} + if err := rootphase0.Unmarshal("apiresourceschema-logicalclusters.core.kcp.io.yaml", &logicalClusterResource); err != nil { + return nil, fmt.Errorf("failed to unmarshal logicalclusters resource: %w", err) + } + bs, err := json.Marshal(&apiextensionsv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: ptr.To(true), + }) + if err != nil { + return nil, err + } + for i := range logicalClusterResource.Spec.Versions { + v := &logicalClusterResource.Spec.Versions[i] + v.Schema.Raw = bs // wipe schemas. We don't want validation here. + } + + cachingAuthorizer := delegated.NewCachingAuthorizer(kubeClusterClient, authorizerWithCache, delegated.CachingOptions{}) + wildcardLogicalClusters := &virtualworkspacesdynamic.DynamicVirtualWorkspace{ + RootPathResolver: framework.RootPathResolverFunc(func(urlPath string, requestContext context.Context) (accepted bool, prefixToStrip string, completedContext context.Context) { + cluster, apiDomain, prefixToStrip, ok := digestUrl(urlPath, rootPathPrefix) + if !ok { + return false, "", requestContext + } + + if !cluster.Wildcard { + // this virtual workspace requires that a wildcard be provided + return false, "", requestContext + } + + completedContext = genericapirequest.WithCluster(requestContext, cluster) + completedContext = dynamiccontext.WithAPIDomainKey(completedContext, apiDomain) + return true, prefixToStrip, completedContext + }), + Authorizer: cachingAuthorizer, + ReadyChecker: framework.ReadyFunc(func() error { + return nil + }), + BootstrapAPISetManagement: func(mainConfig genericapiserver.CompletedConfig) (apidefinition.APIDefinitionSetGetter, error) { + return &singleResourceAPIDefinitionSetProvider{ + config: mainConfig, + dynamicClusterClient: dynamicClusterClient, + exposeSubresources: false, + resource: &logicalClusterResource, + storageProvider: filteredLogicalClusterReadOnlyRestStorage, + }, nil + }, + } + + logicalClusters := &virtualworkspacesdynamic.DynamicVirtualWorkspace{ + RootPathResolver: framework.RootPathResolverFunc(func(urlPath string, requestContext context.Context) (accepted bool, prefixToStrip string, completedContext context.Context) { + cluster, apiDomain, prefixToStrip, ok := digestUrl(urlPath, rootPathPrefix) + if !ok { + return false, "", requestContext + } + + if cluster.Wildcard { + // this virtual workspace requires that a specific cluster be provided + return false, "", requestContext + } + + // this delegating server only works for logicalclusters.core.kcp.io and discovery calls + resourceURL := strings.TrimPrefix(urlPath, prefixToStrip) + if !isLogicalClusterRequest(resourceURL) && !isDiscoveryRequest(resourceURL) { + return false, "", requestContext + } + + completedContext = genericapirequest.WithCluster(requestContext, cluster) + completedContext = dynamiccontext.WithAPIDomainKey(completedContext, apiDomain) + return true, prefixToStrip, completedContext + }), + Authorizer: cachingAuthorizer, + ReadyChecker: framework.ReadyFunc(func() error { + return nil + }), + BootstrapAPISetManagement: func(mainConfig genericapiserver.CompletedConfig) (apidefinition.APIDefinitionSetGetter, error) { + return &singleResourceAPIDefinitionSetProvider{ + config: mainConfig, + dynamicClusterClient: dynamicClusterClient, + exposeSubresources: true, + resource: &logicalClusterResource, + storageProvider: filteredLogicalClusterStatusWriteOnly, + }, nil + }, + } + + return []rootapiserver.NamedVirtualWorkspace{ + {Name: wildcardLogicalClustersName, VirtualWorkspace: wildcardLogicalClusters}, + {Name: logicalClustersName, VirtualWorkspace: logicalClusters}, + }, nil +} + +var resolver = requestinfo.NewFactory() + +func isLogicalClusterRequest(path string) bool { + info, err := resolver.NewRequestInfo(&http.Request{URL: &url.URL{Path: path}}) + if err != nil { + return false + } + return info.IsResourceRequest && info.APIGroup == corev1alpha1.SchemeGroupVersion.Group && info.Resource == "logicalclusters" +} + +func digestUrl(urlPath, rootPathPrefix string) ( + cluster genericapirequest.Cluster, + key dynamiccontext.APIDomainKey, + logicalPath string, + accepted bool, +) { + if !strings.HasPrefix(urlPath, rootPathPrefix) { + return genericapirequest.Cluster{}, dynamiccontext.APIDomainKey(""), "", false + } + withoutRootPathPrefix := strings.TrimPrefix(urlPath, rootPathPrefix) + + // Incoming requests to this virtual workspace will look like: + // /services/terminatingworkspace//clusters//apis/core.kcp.io/v1alpha1/logicalclusters + // └───────────┐ + // Where the withoutRootPathPrefix starts here: ┘ + parts := strings.SplitN(withoutRootPathPrefix, "/", 2) + if len(parts) < 2 { + return genericapirequest.Cluster{}, "", "", false + } + + terminatorName := parts[0] + if terminatorName == "" { + return genericapirequest.Cluster{}, "", "", false + } + + realPath := "/" + parts[1] + + // /services/terminatingworkspace//clusters//apis/core.kcp.io/v1alpha1/logicalclusters + // ┌─────────────────────────────┘ + // We are now here: ┘ + // Now, we parse out the logical cluster. + if !strings.HasPrefix(realPath, "/clusters/") { + return genericapirequest.Cluster{}, "", "", false // don't accept + } + + withoutClustersPrefix := strings.TrimPrefix(realPath, "/clusters/") + parts = strings.SplitN(withoutClustersPrefix, "/", 2) + logicalclusterPath := logicalcluster.NewPath(parts[0]) + realPath = "/" + if len(parts) > 1 { + realPath += parts[1] + } + + cluster = genericapirequest.Cluster{} + if logicalclusterPath == logicalcluster.Wildcard { + cluster.Wildcard = true + } else { + var ok bool + cluster.Name, ok = logicalclusterPath.Name() + if !ok { + return genericapirequest.Cluster{}, "", "", false + } + } + + return cluster, dynamiccontext.APIDomainKey(terminatorName), strings.TrimSuffix(urlPath, realPath), true +} + +type singleResourceAPIDefinitionSetProvider struct { + config genericapiserver.CompletedConfig + dynamicClusterClient kcpdynamic.ClusterInterface + resource *apisv1alpha1.APIResourceSchema + exposeSubresources bool + storageProvider func(ctx context.Context, clusterClient kcpdynamic.ClusterInterface, terminator corev1alpha1.LogicalClusterTerminator) (apiserver.RestProviderFunc, error) +} + +func (a *singleResourceAPIDefinitionSetProvider) GetAPIDefinitionSet(ctx context.Context, key dynamiccontext.APIDomainKey) (apis apidefinition.APIDefinitionSet, apisExist bool, err error) { + restProvider, err := a.storageProvider(ctx, a.dynamicClusterClient, corev1alpha1.LogicalClusterTerminator(key)) + if err != nil { + return nil, false, err + } + + apiDefinition, err := apiserver.CreateServingInfoFor( + a.config, + a.resource, + corev1alpha1.SchemeGroupVersion.Version, + restProvider, + ) + if err != nil { + return nil, false, fmt.Errorf("failed to create serving info: %w", err) + } + + apis = apidefinition.APIDefinitionSet{ + schema.GroupVersionResource{ + Group: corev1alpha1.SchemeGroupVersion.Group, + Version: corev1alpha1.SchemeGroupVersion.Version, + Resource: "logicalclusters", + }: apiDefinition, + } + + return apis, len(apis) > 0, nil +} + +var _ apidefinition.APIDefinitionSetGetter = &singleResourceAPIDefinitionSetProvider{} + +func authorizerWithCache(ctx context.Context, cache delegated.Cache, attr authorizer.Attributes) (authorizer.Decision, string, error) { + clusterName, name, err := termination.TypeFrom(corev1alpha1.LogicalClusterTerminator(dynamiccontext.APIDomainKeyFrom(ctx))) + if err != nil { + klog.FromContext(ctx).V(2).Info(err.Error()) + return authorizer.DecisionNoOpinion, "unable to determine terminator", fmt.Errorf("access not permitted") + } + + authz, err := cache.Get(clusterName) + if err != nil { + return authorizer.DecisionNoOpinion, "error", err + } + + SARAttributes := authorizer.AttributesRecord{ + APIGroup: tenancyv1alpha1.SchemeGroupVersion.Group, + APIVersion: tenancyv1alpha1.SchemeGroupVersion.Version, + User: attr.GetUser(), + Verb: "terminate", + Name: name, + Resource: "workspacetypes", + ResourceRequest: true, + } + + return authz.Authorize(ctx, SARAttributes) +} + +// isDiscoverRequest determines if a request is a discovery request or discovery +// caching request. Specifically, it allows: +// * /api +// * /api/{version} +// * /apis +// * /apis/{api-group} +// * /apis/{api-group}/{version} +// as long as they have no sub-paths. +func isDiscoveryRequest(path string) bool { + info, err := resolver.NewRequestInfo(&http.Request{URL: &url.URL{Path: path}}) + if err != nil { + return false + } + + // eliminate any resource paths directly + if info.IsResourceRequest { + return false + } + + // eliminate any non-resource paths which are not part + // of discovery or discovery caching + if info.Path == "/healthz" || info.Path == "/" { + return false + } + + return true +} diff --git a/pkg/virtual/terminatingworkspaces/builder/build_test.go b/pkg/virtual/terminatingworkspaces/builder/build_test.go new file mode 100644 index 00000000000..35b4f6f04a0 --- /dev/null +++ b/pkg/virtual/terminatingworkspaces/builder/build_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "fmt" + "testing" +) + +func TestIsDiscoveryRequest(t *testing.T) { + tt := []struct { + path string + exp bool + }{ + {"/api", true}, + {"/api/v1", true}, + {"/api/somegroup", true}, + {"/apis/somegroup", true}, + {"/apis/somegroup/v1", true}, + {"/healthz", false}, + {"/", false}, + {"/api/v1/namespace", false}, + {"/apis/somegroup/v1/namespaces", false}, + } + for _, tc := range tt { + t.Run(fmt.Sprintf("%q", tc.path), func(t *testing.T) { + if res := isDiscoveryRequest(tc.path); res != tc.exp { + t.Errorf("Exp %t, got %t", tc.exp, res) + } + }) + } +} diff --git a/pkg/virtual/terminatingworkspaces/builder/forwarding.go b/pkg/virtual/terminatingworkspaces/builder/forwarding.go new file mode 100644 index 00000000000..174c8755695 --- /dev/null +++ b/pkg/virtual/terminatingworkspaces/builder/forwarding.go @@ -0,0 +1,249 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "context" + "fmt" + "slices" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" + "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" + "k8s.io/apiextensions-apiserver/pkg/registry/customresource" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/validation/path" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/klog/v2" + + kcpdynamic "github.com/kcp-dev/client-go/dynamic" + + "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/apiserver" + registry "github.com/kcp-dev/kcp/pkg/virtual/framework/forwardingregistry" + corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + "github.com/kcp-dev/kcp/sdk/apis/tenancy/termination" +) + +// filteredLogicalClusterReadWriteRestStorage creates a RestProvider which will +// return LogicalClusters marked for deletion. +func filteredLogicalClusterReadOnlyRestStorage( + ctx context.Context, + clusterClient kcpdynamic.ClusterInterface, + terminator corev1alpha1.LogicalClusterTerminator, +) (apiserver.RestProviderFunc, error) { + labelRequirement, err := terminatorLabelSetRequirement(terminator) + if err != nil { + return nil, err + } + + return registry.ProvideReadOnlyRestStorage( + ctx, + func(ctx context.Context) (kcpdynamic.ClusterInterface, error) { return clusterClient, nil }, + ®istry.StorageWrappers{ + registry.WithDeletionTimestamp(), + registry.WithStaticLabelSelector(labelRequirement), + }, + nil, + ) +} + +// filteredLogicalClusterStatusWriteOnly creates a RestProvider which will +// return LogicalClusters marked for deletion. Updates can only be made against +// the supplied terminator. +func filteredLogicalClusterStatusWriteOnly( + ctx context.Context, + clusterclient kcpdynamic.ClusterInterface, + terminator corev1alpha1.LogicalClusterTerminator, +) (apiserver.RestProviderFunc, error) { + labelRequirement, err := terminatorLabelSetRequirement(terminator) + if err != nil { + return nil, err + } + + return func( + resource schema.GroupVersionResource, + kind schema.GroupVersionKind, + listKind schema.GroupVersionKind, + typer runtime.ObjectTyper, + tableConvertor rest.TableConvertor, + namespaceScoped bool, + schemaValidator validation.SchemaValidator, + subresourcesSchemaValidator map[string]validation.SchemaValidator, + structuralSchema *structuralschema.Structural, + ) (mainStorage rest.Storage, subresourceStorages map[string]rest.Storage) { + statusSchemaValidate, statusEnabled := subresourcesSchemaValidator["status"] + + var statusSpec *apiextensions.CustomResourceSubresourceStatus + if statusEnabled { + statusSpec = &apiextensions.CustomResourceSubresourceStatus{} + } + + strategy := customresource.NewStrategy( + typer, + namespaceScoped, + kind, + path.ValidatePathSegmentName, + schemaValidator, + statusSchemaValidate, + structuralSchema, + statusSpec, + nil, // no scale subresource needed + []apiextensionsv1.SelectableField{}, + ) + + storage, statusStorage := registry.NewStorage( + ctx, + resource, + "", // no hash, as this is not backed by an APIExport + kind, + listKind, + strategy, + nil, // currently we are not using any categories + tableConvertor, + nil, // we are not using any replicasPathMapping + func(ctx context.Context) (kcpdynamic.ClusterInterface, error) { return clusterclient, nil }, + nil, // use the default retryBackoff + ®istry.StorageWrappers{ + registry.WithDeletionTimestamp(), + registry.WithStaticLabelSelector(labelRequirement), + withUpdateValidation(terminator), + }, + ) + + // we want to expose some but not all the allowed endpoints, so filter by exposing just the funcs we need + subresourceStorages = make(map[string]rest.Storage) + if statusEnabled { + subresourceStorages["status"] = &struct { + registry.FactoryFunc + registry.DestroyerFunc + + registry.GetterFunc + registry.UpdaterFunc + // patch is implicit as we have get + update + + registry.TableConvertorFunc + registry.CategoriesProviderFunc + registry.ResetFieldsStrategyFunc + }{ + FactoryFunc: statusStorage.FactoryFunc, + DestroyerFunc: statusStorage.DestroyerFunc, + + GetterFunc: statusStorage.GetterFunc, + UpdaterFunc: statusStorage.UpdaterFunc, + + TableConvertorFunc: statusStorage.TableConvertorFunc, + CategoriesProviderFunc: statusStorage.CategoriesProviderFunc, + ResetFieldsStrategyFunc: statusStorage.ResetFieldsStrategyFunc, + } + } + + // only expose GET on the regular storage + storages := &struct { + registry.FactoryFunc + registry.ListFactoryFunc + registry.DestroyerFunc + + registry.GetterFunc + + registry.TableConvertorFunc + registry.CategoriesProviderFunc + registry.ResetFieldsStrategyFunc + }{ + FactoryFunc: storage.FactoryFunc, + ListFactoryFunc: storage.ListFactoryFunc, + DestroyerFunc: storage.DestroyerFunc, + + GetterFunc: storage.GetterFunc, + + TableConvertorFunc: storage.TableConvertorFunc, + CategoriesProviderFunc: storage.CategoriesProviderFunc, + ResetFieldsStrategyFunc: storage.ResetFieldsStrategyFunc, + } + return storages, subresourceStorages + }, nil +} + +// withUpdateValidation wraps validateTerminatorStatusUpdate. +func withUpdateValidation(terminator corev1alpha1.LogicalClusterTerminator) registry.StorageWrapper { + return registry.StorageWrapperFunc(func(groupResource schema.GroupResource, storage *registry.StoreFuncs) { + delegateUpdater := storage.UpdaterFunc + storage.UpdaterFunc = func(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *v1.UpdateOptions) (runtime.Object, bool, error) { + // we only need to validate the status sub-resource, as any other types of updates are not possible on a storage layer + validationFunc := validateTerminatorStatusUpdate(terminator, name) + return delegateUpdater.Update(ctx, name, objInfo, createValidation, validationFunc, forceAllowCreate, options) + } + }) +} + +// validateTerminatorStatusUpdate validates that an update to a LogicalCluster only removes the passed terminator and only that terminator. +func validateTerminatorStatusUpdate(terminator corev1alpha1.LogicalClusterTerminator, name string) rest.ValidateObjectUpdateFunc { + return func(ctx context.Context, obj, old runtime.Object) error { + logger := klog.FromContext(ctx) + previous, _, err := unstructured.NestedStringSlice(old.(*unstructured.Unstructured).UnstructuredContent(), "status", "terminators") + if err != nil { + logger.Error(err, "error accessing terminators from old object") + return errors.NewInternalError(fmt.Errorf("error accessing terminators from old object: %w", err)) + } + current, _, err := unstructured.NestedStringSlice(obj.(*unstructured.Unstructured).UnstructuredContent(), "status", "terminators") + if err != nil { + logger.Error(err, "error accessing terminators from new object") + return errors.NewInternalError(fmt.Errorf("error accessing terminators from old object: %w", err)) + } + + invalidUpdateErr := errors.NewInvalid( + corev1alpha1.Kind("LogicalCluster"), + name, + field.ErrorList{field.Invalid( + field.NewPath("status", "terminators"), + current, + fmt.Sprintf("only removing the %q terminator is supported", terminator), + )}, + ) + + if len(previous)-len(current) != 1 { + return invalidUpdateErr + } + if slices.Contains(current, string(terminator)) { + return invalidUpdateErr + } + + return nil + } +} + +// terminatorLabelSetRequirement creates a label requirement which requires the +// terminator hashlabel to be set. +func terminatorLabelSetRequirement(terminator corev1alpha1.LogicalClusterTerminator) (labels.Requirements, error) { + labelSelector := map[string]string{} + + key, value := termination.TerminatorToLabel(terminator) + labelSelector[key] = value + + requirements, selectable := labels.SelectorFromSet(labelSelector).Requirements() + if !selectable { + return nil, fmt.Errorf("unable to create a selector from the provided labels") + } + + return requirements, nil +} diff --git a/pkg/virtual/terminatingworkspaces/builder/forwarding_test.go b/pkg/virtual/terminatingworkspaces/builder/forwarding_test.go new file mode 100644 index 00000000000..86684c9389a --- /dev/null +++ b/pkg/virtual/terminatingworkspaces/builder/forwarding_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" +) + +func TestValidateOnlyTerminatorChanged(t *testing.T) { + tests := []struct { + name string + expErr bool + terminator string + old runtime.Object + new runtime.Object + }{ + { + name: "remove owned terminator", + expErr: false, + terminator: "t1", + old: &corev1alpha1.LogicalCluster{ + Status: corev1alpha1.LogicalClusterStatus{ + Terminators: []corev1alpha1.LogicalClusterTerminator{ + "t1", + "t2", + }, + }, + }, + new: &corev1alpha1.LogicalCluster{ + Status: corev1alpha1.LogicalClusterStatus{ + Terminators: []corev1alpha1.LogicalClusterTerminator{ + "t2", + }, + }, + }, + }, + { + name: "remove non-owned terminator", + expErr: true, + terminator: "t1", + old: &corev1alpha1.LogicalCluster{ + Status: corev1alpha1.LogicalClusterStatus{ + Terminators: []corev1alpha1.LogicalClusterTerminator{ + "t1", + "t2", + }, + }, + }, + new: &corev1alpha1.LogicalCluster{ + Status: corev1alpha1.LogicalClusterStatus{ + Terminators: []corev1alpha1.LogicalClusterTerminator{ + "t1", + }, + }, + }, + }, + { + name: "no object changes", + expErr: true, // we expect an error here, as we always expect the number of terminators to decrease + old: &corev1alpha1.LogicalCluster{}, + new: &corev1alpha1.LogicalCluster{}, + }, + } + + // swallow any log output, so we don't pollute test results + ctx := logr.NewContext(context.Background(), logr.Discard()) + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + // mimic the real unstructured.Unstructured objects which are coming into the validateTerminatorsUpdate funcs + oldMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.old) + if err != nil { + t.Error(err) + } + newMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.new) + if err != nil { + t.Error(err) + } + + err = validateTerminatorStatusUpdate(corev1alpha1.LogicalClusterTerminator(tc.terminator), "test-cluster")(ctx, &unstructured.Unstructured{Object: newMap}, &unstructured.Unstructured{Object: oldMap}) + if !tc.expErr && err != nil { + t.Errorf("expected no error, but got %q", err) + } else if tc.expErr && err == nil { + t.Errorf("expected an error, but got none") + } + }) + } +} diff --git a/pkg/virtual/terminatingworkspaces/doc.go b/pkg/virtual/terminatingworkspaces/doc.go new file mode 100644 index 00000000000..7e3aef64808 --- /dev/null +++ b/pkg/virtual/terminatingworkspaces/doc.go @@ -0,0 +1,43 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package terminatingworkspaces and its sub-packages provide the Terminating Workspace Virtual Workspace. +// +// It allows for cross-cluster LIST + WATCH of LogicalClusters which: +// - are marked for deletion via a DeletionTimestamp +// - request termination by a specific controller +// +// That is, a request for +// GET /services/terminatingworkspaces//clusters/*/apis/core.kcp.io/v1alpha1/logicalclusters +// will return a list of LogicalCluster objects which are Terminating and for which status.terminators contains the +// . +// WATCH semantics are similar to (and implemented by) timestamp and terminators selectors - a LogicalCluster that stops +// matching the requirements to be served (not being marked for deletion, not requesting termination by +// the controller) will be removed from the stream with a synthetic Deleted event. +/* + __ + _(\ |@@| +(__/\__ \--/ __ + \___|----| | __ + \ }{ /\ )_ / _\ + /\__/\ \__O (__ + (--/\--) \__/ + _)( )(_ + `---''---` +*/ +package terminatingworkspaces + +const VirtualWorkspaceName string = "terminatingworkspaces" diff --git a/pkg/virtual/terminatingworkspaces/options/options.go b/pkg/virtual/terminatingworkspaces/options/options.go new file mode 100644 index 00000000000..e726e7e8347 --- /dev/null +++ b/pkg/virtual/terminatingworkspaces/options/options.go @@ -0,0 +1,72 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "path" + + "github.com/spf13/pflag" + + "k8s.io/client-go/rest" + + kcpdynamic "github.com/kcp-dev/client-go/dynamic" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" + + "github.com/kcp-dev/kcp/pkg/virtual/framework/rootapiserver" + "github.com/kcp-dev/kcp/pkg/virtual/terminatingworkspaces" + "github.com/kcp-dev/kcp/pkg/virtual/terminatingworkspaces/builder" + kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" +) + +type TerminatingWorkspaces struct{} + +func New() *TerminatingWorkspaces { + return &TerminatingWorkspaces{} +} + +func (o *TerminatingWorkspaces) AddFlags(flags *pflag.FlagSet, prefix string) { + if o == nil { + return + } +} + +func (o *TerminatingWorkspaces) Validate(flagPrefix string) []error { + if o == nil { + return nil + } + errs := []error{} + + return errs +} + +func (o *TerminatingWorkspaces) NewVirtualWorkspaces( + rootPathPrefix string, + config *rest.Config, + wildcardKcpInformers kcpinformers.SharedInformerFactory, +) (workspaces []rootapiserver.NamedVirtualWorkspace, err error) { + config = rest.AddUserAgent(rest.CopyConfig(config), "terminatingworkspaces-virtual-workspace") + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) + if err != nil { + return nil, err + } + dynamicClusterClient, err := kcpdynamic.NewForConfig(config) + if err != nil { + return nil, err + } + + return builder.BuildVirtualWorkspace(config, path.Join(rootPathPrefix, terminatingworkspaces.VirtualWorkspaceName), dynamicClusterClient, kubeClusterClient) +} diff --git a/sdk/apis/core/v1alpha1/logicalcluster_types.go b/sdk/apis/core/v1alpha1/logicalcluster_types.go index d26dad600b8..d007e751f03 100644 --- a/sdk/apis/core/v1alpha1/logicalcluster_types.go +++ b/sdk/apis/core/v1alpha1/logicalcluster_types.go @@ -52,9 +52,9 @@ const ( // LogicalClusterName is the name of the LogicalCluster singleton. LogicalClusterName = "cluster" - // LogicalClusterFinalizer attached to the owner of the LogicalCluster resource (usually a Workspace) so that we can control + // LogicalClusterFinalizerName attached to the owner of the LogicalCluster resource (usually a Workspace) so that we can control // deletion of LogicalCluster resources. - LogicalClusterFinalizer = "core.kcp.io/logicalcluster" + LogicalClusterFinalizerName = "core.kcp.io/logicalcluster" ) // LogicalClusterPhaseType is the type of the current phase of the logical cluster. @@ -72,6 +72,7 @@ const ( // This should be used when we really can't serve the logical cluster content and not some // temporary flakes, like readiness probe failing. LogicalClusterPhaseUnavailable LogicalClusterPhaseType = "Unavailable" + LogicalClusterPhaseDeleting LogicalClusterPhaseType = "Deleting" ) // LogicalClusterInitializer is a unique string corresponding to a logical cluster @@ -80,6 +81,12 @@ const ( // +kubebuilder:validation:Pattern:="^([a-z0-9]([-a-z0-9]*[a-z0-9])?(:[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(:[a-z0-9][a-z0-9]([-a-z0-9]*[a-z0-9])?))|(system:.+)$" type LogicalClusterInitializer string +// LogicalClusterTerminator is a unique string corresponding to a logical cluster +// terminator controller. +// +// +kubebuilder:validation:Pattern:="^([a-z0-9]([-a-z0-9]*[a-z0-9])?(:[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(:[a-z0-9][a-z0-9]([-a-z0-9]*[a-z0-9])?))|(system:.+)$" +type LogicalClusterTerminator string + // LogicalClusterSpec is the specification of the LogicalCluster resource. type LogicalClusterSpec struct { // DirectlyDeletable indicates that this logical cluster can be directly deleted by the user @@ -104,6 +111,12 @@ type LogicalClusterSpec struct { // // +optional Initializers []LogicalClusterInitializer `json:"initializers,omitempty"` + + // Terminators are set on creation by the system and copied to status when + // termination starts. + // + // +optional + Terminators []LogicalClusterTerminator `json:"terminators,omitempty"` } // LogicalClusterOwner is a reference to a resource controlling the life-cycle of a LogicalCluster. @@ -172,6 +185,13 @@ type LogicalClusterStatus struct { // // +optional Initializers []LogicalClusterInitializer `json:"initializers,omitempty"` + + // Terminators are set on creation by the system and must be cleared + // by a controller before the logical cluster can be deleted. The LogicalCluster object + // will stay in the phase "Deleting" until all terminator are cleared. + // + // +optional + Terminators []LogicalClusterTerminator `json:"terminators,omitempty"` } func (in *LogicalCluster) SetConditions(c conditionsv1alpha1.Conditions) { diff --git a/sdk/apis/core/v1alpha1/zz_generated.deepcopy.go b/sdk/apis/core/v1alpha1/zz_generated.deepcopy.go index 90b206416e0..91707f543e0 100644 --- a/sdk/apis/core/v1alpha1/zz_generated.deepcopy.go +++ b/sdk/apis/core/v1alpha1/zz_generated.deepcopy.go @@ -118,6 +118,11 @@ func (in *LogicalClusterSpec) DeepCopyInto(out *LogicalClusterSpec) { *out = make([]LogicalClusterInitializer, len(*in)) copy(*out, *in) } + if in.Terminators != nil { + in, out := &in.Terminators, &out.Terminators + *out = make([]LogicalClusterTerminator, len(*in)) + copy(*out, *in) + } return } @@ -146,6 +151,11 @@ func (in *LogicalClusterStatus) DeepCopyInto(out *LogicalClusterStatus) { *out = make([]LogicalClusterInitializer, len(*in)) copy(*out, *in) } + if in.Terminators != nil { + in, out := &in.Terminators, &out.Terminators + *out = make([]LogicalClusterTerminator, len(*in)) + copy(*out, *in) + } return } diff --git a/sdk/apis/tenancy/termination/utils.go b/sdk/apis/tenancy/termination/utils.go new file mode 100644 index 00000000000..aa3aa234da7 --- /dev/null +++ b/sdk/apis/tenancy/termination/utils.go @@ -0,0 +1,64 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package termination + +import ( + "crypto/sha256" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + + "github.com/kcp-dev/logicalcluster/v3" + + corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" +) + +// TerminatorForType determines the identifier for the implicit terminator associated with the WorkspaceType. +func TerminatorForType(wt *tenancyv1alpha1.WorkspaceType) corev1alpha1.LogicalClusterTerminator { + return corev1alpha1.LogicalClusterTerminator(logicalcluster.From(wt).Path().Join(wt.Name).String()) +} + +// TypeFrom determines the WorkspaceType workspace and name from an terminator name. +func TypeFrom(terminator corev1alpha1.LogicalClusterTerminator) (logicalcluster.Name, string, error) { + separatorIndex := strings.LastIndex(string(terminator), ":") + switch separatorIndex { + case -1: + return "", "", fmt.Errorf("expected workspace terminator in form workspace:name, not %q", terminator) + default: + return logicalcluster.Name(terminator[:separatorIndex]), tenancyv1alpha1.ObjectName(tenancyv1alpha1.WorkspaceTypeName(terminator[separatorIndex+1:])), nil + } +} + +// TerminatorToLabel transforms a terminator into a key-value pair to add to a label set. We use a hash +// to create a unique identifier from this information, prefixing the hash in order to create a value which +// is unlikely to collide, and adding the full hash as a value in order to make it difficult to forge the pair. +func TerminatorToLabel(terminator corev1alpha1.LogicalClusterTerminator) (string, string) { + hash := fmt.Sprintf("%x", sha256.Sum224([]byte(terminator))) + labelKeyHashLength := validation.LabelValueMaxLength - len(tenancyv1alpha1.WorkspaceTerminatorLabelPrefix) + return tenancyv1alpha1.WorkspaceTerminatorLabelPrefix + hash[0:labelKeyHashLength], hash +} + +// TerminatorsToStrings converts a list of terminators into a list of strings. +func TerminatorsToStrings(terminator []corev1alpha1.LogicalClusterTerminator) []string { + s := make([]string, 0, len(terminator)) + for _, f := range terminator { + s = append(s, string(f)) + } + return s +} diff --git a/sdk/apis/tenancy/v1alpha1/types_workspace.go b/sdk/apis/tenancy/v1alpha1/types_workspace.go index 175898f60af..d2f1eca3b42 100644 --- a/sdk/apis/tenancy/v1alpha1/types_workspace.go +++ b/sdk/apis/tenancy/v1alpha1/types_workspace.go @@ -268,6 +268,12 @@ type WorkspaceStatus struct { // // +optional Initializers []corev1alpha1.LogicalClusterInitializer `json:"initializers,omitempty"` + + // terminators must be cleared by a controller before the workspace is being + // deleted. + // + // +optional + Terminators []corev1alpha1.LogicalClusterTerminator `json:"terminators,omitempty"` } func (in *Workspace) SetConditions(c conditionsv1alpha1.Conditions) { diff --git a/sdk/apis/tenancy/v1alpha1/types_workspacetype.go b/sdk/apis/tenancy/v1alpha1/types_workspacetype.go index 21e9be2e380..49bbff0570c 100644 --- a/sdk/apis/tenancy/v1alpha1/types_workspacetype.go +++ b/sdk/apis/tenancy/v1alpha1/types_workspacetype.go @@ -74,6 +74,19 @@ type WorkspaceTypeSpec struct { // +optional Initializer bool `json:"initializer,omitempty"` + // Terminator determines if this WorkspaceType has an associated terminating + // controller. These controllers are used to add functionality to a Workspace; + // all controllers must finish their work before the Workspace is being deleted. + // + // One terminating controller is supported per WorkspaceType; the identifier + // for this terminator will be a colon-delimited string using the workspace in which + // the WorkspaceType is defined, and the type's name. For example, if a + // WorkspaceType `example` is created in the `root:org` workspace, the implicit + // terminator name is `root:org:example`. + // + // +optional + Terminator bool `json:"terminator,omitempty"` + // extend is a list of other WorkspaceTypes whose initializers and limitAllowedChildren // and limitAllowedParents this WorkspaceType is inheriting. By (transitively) extending // another WorkspaceType, this WorkspaceType will be considered as that @@ -263,6 +276,10 @@ const ( // and the set of labels with this prefix is enforced to match the set of initializers by a mutating admission // webhook. WorkspaceInitializerLabelPrefix = "initializer.internal.kcp.io/" + // WorkspaceTerminatorLabelPrefix is the prefix for labels which match Workspace.Status.Terminators, + // and the set of labels with this prefix is enforced to match the set of terminator by a mutating admission + // webhook. + WorkspaceTerminatorLabelPrefix = "terminator.internal.kcp.io/" ) const ( diff --git a/sdk/apis/tenancy/v1alpha1/zz_generated.deepcopy.go b/sdk/apis/tenancy/v1alpha1/zz_generated.deepcopy.go index d3bf3f7d9ad..45923fbd953 100644 --- a/sdk/apis/tenancy/v1alpha1/zz_generated.deepcopy.go +++ b/sdk/apis/tenancy/v1alpha1/zz_generated.deepcopy.go @@ -479,6 +479,11 @@ func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) { *out = make([]corev1alpha1.LogicalClusterInitializer, len(*in)) copy(*out, *in) } + if in.Terminators != nil { + in, out := &in.Terminators, &out.Terminators + *out = make([]corev1alpha1.LogicalClusterTerminator, len(*in)) + copy(*out, *in) + } return } diff --git a/sdk/client/applyconfiguration/core/v1alpha1/logicalclusterspec.go b/sdk/client/applyconfiguration/core/v1alpha1/logicalclusterspec.go index 0f2ba8db573..8fa21402035 100644 --- a/sdk/client/applyconfiguration/core/v1alpha1/logicalclusterspec.go +++ b/sdk/client/applyconfiguration/core/v1alpha1/logicalclusterspec.go @@ -28,6 +28,7 @@ type LogicalClusterSpecApplyConfiguration struct { DirectlyDeletable *bool `json:"directlyDeletable,omitempty"` Owner *LogicalClusterOwnerApplyConfiguration `json:"owner,omitempty"` Initializers []corev1alpha1.LogicalClusterInitializer `json:"initializers,omitempty"` + Terminators []corev1alpha1.LogicalClusterTerminator `json:"terminators,omitempty"` } // LogicalClusterSpecApplyConfiguration constructs a declarative configuration of the LogicalClusterSpec type for use with @@ -61,3 +62,13 @@ func (b *LogicalClusterSpecApplyConfiguration) WithInitializers(values ...corev1 } return b } + +// WithTerminators adds the given value to the Terminators field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Terminators field. +func (b *LogicalClusterSpecApplyConfiguration) WithTerminators(values ...corev1alpha1.LogicalClusterTerminator) *LogicalClusterSpecApplyConfiguration { + for i := range values { + b.Terminators = append(b.Terminators, values[i]) + } + return b +} diff --git a/sdk/client/applyconfiguration/core/v1alpha1/logicalclusterstatus.go b/sdk/client/applyconfiguration/core/v1alpha1/logicalclusterstatus.go index d1a17a65be3..8bc7ba4713a 100644 --- a/sdk/client/applyconfiguration/core/v1alpha1/logicalclusterstatus.go +++ b/sdk/client/applyconfiguration/core/v1alpha1/logicalclusterstatus.go @@ -30,6 +30,7 @@ type LogicalClusterStatusApplyConfiguration struct { Phase *corev1alpha1.LogicalClusterPhaseType `json:"phase,omitempty"` Conditions *conditionsv1alpha1.Conditions `json:"conditions,omitempty"` Initializers []corev1alpha1.LogicalClusterInitializer `json:"initializers,omitempty"` + Terminators []corev1alpha1.LogicalClusterTerminator `json:"terminators,omitempty"` } // LogicalClusterStatusApplyConfiguration constructs a declarative configuration of the LogicalClusterStatus type for use with @@ -71,3 +72,13 @@ func (b *LogicalClusterStatusApplyConfiguration) WithInitializers(values ...core } return b } + +// WithTerminators adds the given value to the Terminators field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Terminators field. +func (b *LogicalClusterStatusApplyConfiguration) WithTerminators(values ...corev1alpha1.LogicalClusterTerminator) *LogicalClusterStatusApplyConfiguration { + for i := range values { + b.Terminators = append(b.Terminators, values[i]) + } + return b +} diff --git a/sdk/client/applyconfiguration/tenancy/v1alpha1/workspacestatus.go b/sdk/client/applyconfiguration/tenancy/v1alpha1/workspacestatus.go index 8d4bc898bcd..ad651609b57 100644 --- a/sdk/client/applyconfiguration/tenancy/v1alpha1/workspacestatus.go +++ b/sdk/client/applyconfiguration/tenancy/v1alpha1/workspacestatus.go @@ -29,6 +29,7 @@ type WorkspaceStatusApplyConfiguration struct { Phase *corev1alpha1.LogicalClusterPhaseType `json:"phase,omitempty"` Conditions *conditionsv1alpha1.Conditions `json:"conditions,omitempty"` Initializers []corev1alpha1.LogicalClusterInitializer `json:"initializers,omitempty"` + Terminators []corev1alpha1.LogicalClusterTerminator `json:"terminators,omitempty"` } // WorkspaceStatusApplyConfiguration constructs a declarative configuration of the WorkspaceStatus type for use with @@ -62,3 +63,13 @@ func (b *WorkspaceStatusApplyConfiguration) WithInitializers(values ...corev1alp } return b } + +// WithTerminators adds the given value to the Terminators field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Terminators field. +func (b *WorkspaceStatusApplyConfiguration) WithTerminators(values ...corev1alpha1.LogicalClusterTerminator) *WorkspaceStatusApplyConfiguration { + for i := range values { + b.Terminators = append(b.Terminators, values[i]) + } + return b +} diff --git a/sdk/client/applyconfiguration/tenancy/v1alpha1/workspacetypespec.go b/sdk/client/applyconfiguration/tenancy/v1alpha1/workspacetypespec.go index 46d112cc239..0ac26768d13 100644 --- a/sdk/client/applyconfiguration/tenancy/v1alpha1/workspacetypespec.go +++ b/sdk/client/applyconfiguration/tenancy/v1alpha1/workspacetypespec.go @@ -26,6 +26,7 @@ import ( // with apply. type WorkspaceTypeSpecApplyConfiguration struct { Initializer *bool `json:"initializer,omitempty"` + Terminator *bool `json:"terminator,omitempty"` Extend *WorkspaceTypeExtensionApplyConfiguration `json:"extend,omitempty"` AdditionalWorkspaceLabels map[string]string `json:"additionalWorkspaceLabels,omitempty"` DefaultChildWorkspaceType *WorkspaceTypeReferenceApplyConfiguration `json:"defaultChildWorkspaceType,omitempty"` @@ -50,6 +51,14 @@ func (b *WorkspaceTypeSpecApplyConfiguration) WithInitializer(value bool) *Works return b } +// WithTerminator sets the Terminator field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Terminator field is set to the value of the last call. +func (b *WorkspaceTypeSpecApplyConfiguration) WithTerminator(value bool) *WorkspaceTypeSpecApplyConfiguration { + b.Terminator = &value + return b +} + // WithExtend sets the Extend field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Extend field is set to the value of the last call. diff --git a/sdk/testing/server/fixture.go b/sdk/testing/server/fixture.go index 54e62bc22d6..61580da2c4f 100644 --- a/sdk/testing/server/fixture.go +++ b/sdk/testing/server/fixture.go @@ -37,6 +37,7 @@ import ( "golang.org/x/sync/errgroup" "sigs.k8s.io/yaml" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -527,6 +528,15 @@ func artifact(t TestingT, server RunningServer, producer func() (runtime.Object, // the test requesting retention regardless of server's scope. t.Cleanup(func() { data, err := producer() + // Do not fail the test if the source object does not exist anymore. + // Required for tests which create objects and delete them later. + // By making this exception we will create artifacts if the test fails + // prematurely before the deletion for debugging, but doesn't fail if + // the test succeeds in deleting them. + if errors.IsNotFound(err) { + t.Log("Skipping artifact creation, as object does not exist") + return + } require.NoError(t, err, "error fetching artifact") accessor, ok := data.(metav1.Object) diff --git a/test/e2e/virtual/initializingworkspaces/virtualworkspace_test.go b/test/e2e/virtual/initializingworkspaces/virtualworkspace_test.go index e8c0b70fdc2..0816a4ac0a1 100644 --- a/test/e2e/virtual/initializingworkspaces/virtualworkspace_test.go +++ b/test/e2e/virtual/initializingworkspaces/virtualworkspace_test.go @@ -27,6 +27,7 @@ import ( jsonpatch "github.com/evanphx/json-patch" "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -46,6 +47,7 @@ import ( kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" "github.com/kcp-dev/logicalcluster/v3" + "github.com/kcp-dev/kcp/pkg/virtual/initializingworkspaces" "github.com/kcp-dev/kcp/sdk/apis/core" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization" @@ -261,7 +263,10 @@ func TestInitializingWorkspacesVirtualWorkspaceAccess(t *testing.T) { require.NoError(t, err) vwURLs := []string{} for _, vwURL := range workspacetypes[initializer].Status.VirtualWorkspaces { - vwURLs = append(vwURLs, vwURL.URL) + // only add urls belonging to initializing workspaces + if strings.Contains(vwURL.URL, initializingworkspaces.VirtualWorkspaceName) { + vwURLs = append(vwURLs, vwURL.URL) + } } targetVwURL, foundTargetVwURL, err := framework.VirtualWorkspaceURL(ctx, sourceKcpClusterClient, ws, vwURLs) @@ -356,10 +361,9 @@ func TestInitializingWorkspacesVirtualWorkspaceAccess(t *testing.T) { t.Log("Ensure that LIST calls through the virtual workspace eventually show the correct values") for _, wsName := range wsNames { - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(c *assert.CollectT) { _, err := sourceKcpClusterClient.CoreV1alpha1().Cluster(wsPath.Join(wsName)).LogicalClusters().Get(ctx, corev1alpha1.LogicalClusterName, metav1.GetOptions{}) - require.True(t, err == nil || errors.IsForbidden(err), "got %#v error getting logicalcluster %q, expected unauthorized or success", err, wsPath.Join(wsName)) - return err == nil + require.NoError(c, err, "got %#v error getting logicalcluster %q, expected success", err, wsPath.Join(wsName)) }, wait.ForeverTestTimeout, 100*time.Millisecond) } diff --git a/test/e2e/virtual/terminatingworkspaces/virtualworkspace_test.go b/test/e2e/virtual/terminatingworkspaces/virtualworkspace_test.go new file mode 100644 index 00000000000..a669096cfdc --- /dev/null +++ b/test/e2e/virtual/terminatingworkspaces/virtualworkspace_test.go @@ -0,0 +1,748 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package terminatingworkspaces + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "path" + "slices" + "strings" + "testing" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/endpoints/discovery" + "k8s.io/client-go/rest" + + kcpdiscovery "github.com/kcp-dev/client-go/discovery" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" + "github.com/kcp-dev/logicalcluster/v3" + + "github.com/kcp-dev/kcp/cmd/virtual-workspaces/options" + "github.com/kcp-dev/kcp/pkg/virtual/terminatingworkspaces" + "github.com/kcp-dev/kcp/sdk/apis/core" + corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + "github.com/kcp-dev/kcp/sdk/apis/tenancy/termination" + tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" + kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" + kcptesting "github.com/kcp-dev/kcp/sdk/testing" + kcptestinghelpers "github.com/kcp-dev/kcp/sdk/testing/helpers" + "github.com/kcp-dev/kcp/test/e2e/framework" +) + +func TestTerminatingWorkspacesVirtualWorkspaceDiscovery(t *testing.T) { + t.Parallel() + framework.Suite(t, "control-plane") + + source := kcptesting.SharedKcpServer(t) + rootShardCfg := source.RootShardSystemMasterBaseConfig(t) + rootShardCfg.Host += path.Join(options.DefaultRootPathPrefix, terminatingworkspaces.VirtualWorkspaceName, "something") + + virtualWorkspaceDiscoveryClient, err := kcpdiscovery.NewForConfig(rootShardCfg) + require.NoError(t, err) + + _, apiResourceLists, err := virtualWorkspaceDiscoveryClient.ServerGroupsAndResources() + require.NoError(t, err) + + require.Empty(t, cmp.Diff([]*metav1.APIResourceList{{ + GroupVersion: "v1", + }, { + TypeMeta: metav1.TypeMeta{ + Kind: "APIResourceList", + APIVersion: "v1", + }, + GroupVersion: "core.kcp.io/v1alpha1", + APIResources: []metav1.APIResource{ + { + Kind: "LogicalCluster", + Name: "logicalclusters", + SingularName: "logicalcluster", + Categories: []string{"kcp"}, + Verbs: metav1.Verbs{"get", "list", "watch"}, + StorageVersionHash: discovery.StorageVersionHash("", "core.kcp.io", "v1alpha1", "LogicalCluster"), + }, + { + Kind: "LogicalCluster", + Name: "logicalclusters/status", + }, + }, + }}, apiResourceLists)) +} + +func TestTerminatingWorkspacesVirtualWorkspaceAccess(t *testing.T) { + t.Parallel() + framework.Suite(t, "control-plane") + + source := kcptesting.SharedKcpServer(t) + wsPath, _ := kcptesting.NewWorkspaceFixture(t, source, core.RootCluster.Path()) + ctx, cancelFunc := context.WithCancel(context.Background()) + t.Cleanup(cancelFunc) + + sourceConfig := source.BaseConfig(t) + + sourceKcpClusterClient, err := kcpclientset.NewForConfig(sourceConfig) + require.NoError(t, err) + + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(sourceConfig) + require.NoError(t, err) + + username := "user-1" + framework.AdmitWorkspaceAccess(ctx, t, kubeClusterClient, wsPath, []string{username}, nil, false) + + testLabelSelector := map[string]string{ + "internal.kcp.io/e2e-test": t.Name(), + } + + t.Log("Create workspacetypes with a terminators") + // store workspaces and their randomized names under easy to find aliases + workspaceTypes := map[string]*tenancyv1alpha1.WorkspaceType{ + "alpha": {}, + "beta": {}, + "gamma": {}, + } + for name := range workspaceTypes { + wst := &tenancyv1alpha1.WorkspaceType{ + ObjectMeta: metav1.ObjectMeta{ + // WorkspaceTypes and the terminator names will have to be globally unique, so we add some suffix here + // to ensure that parallel test runs do not impact our ability to verify this behavior. + Name: name + "-" + randSuffix(), + }, + Spec: tenancyv1alpha1.WorkspaceTypeSpec{ + Terminator: true, + }, + } + workspaceTypes[name] = wst + } + // make gamma extend alpha and beta + workspaceTypes["gamma"].Spec.Extend.With = []tenancyv1alpha1.WorkspaceTypeReference{ + {Path: wsPath.String(), Name: tenancyv1alpha1.TypeName(workspaceTypes["alpha"].Name)}, + {Path: wsPath.String(), Name: tenancyv1alpha1.TypeName(workspaceTypes["beta"].Name)}, + } + + // create workspacetypes + for _, wst := range workspaceTypes { + _, err := sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).WorkspaceTypes().Create(ctx, wst, metav1.CreateOptions{}) + require.NoError(t, err) + source.Artifact(t, func() (runtime.Object, error) { + return sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).WorkspaceTypes().Get(ctx, wst.Name, metav1.GetOptions{}) + }) + } + + t.Log("Wait for WorkspaceTypes to have their type extensions resolved") + for _, wst := range workspaceTypes { + name := wst.Name + kcptestinghelpers.EventuallyReady(t, func() (conditions.Getter, error) { + return sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).WorkspaceTypes().Get(ctx, name, metav1.GetOptions{}) + }, "could not wait for readiness on WorkspaceType %s|%s", wsPath.String(), name) + } + + t.Log("Create workspaces using the new types") + // store workspaces and their randomized names under easy to find aliases + workspaces := map[string]*tenancyv1alpha1.Workspace{ + "alpha": {}, + "beta": {}, + "gamma": {}, + } + for name, wst := range workspaceTypes { + var ws *tenancyv1alpha1.Workspace + require.EventuallyWithT(t, func(c *assert.CollectT) { + ws, err = sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).Workspaces().Create(ctx, workspaceForType(wst, testLabelSelector), metav1.CreateOptions{}) + require.NoError(c, err) + }, wait.ForeverTestTimeout, time.Millisecond*100) + source.Artifact(t, func() (runtime.Object, error) { + return sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).Workspaces().Get(ctx, ws.Name, metav1.GetOptions{}) + }) + workspaces[name] = ws + } + + t.Log("Wait until workspaces are assigned to a shard, in phase ready, and have terminators") + for name, ws := range workspaces { + var w *tenancyv1alpha1.Workspace + require.EventuallyWithT(t, func(c *assert.CollectT) { + w, err = sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).Workspaces().Get(ctx, ws.Name, metav1.GetOptions{}) + require.NoError(c, err) + require.Contains(c, w.Annotations, "internal.tenancy.kcp.io/shard") + require.Equal(c, w.Status.Phase, corev1alpha1.LogicalClusterPhaseReady) + require.NotEmpty(c, w.Status.Terminators) + }, wait.ForeverTestTimeout, time.Millisecond*100) + workspaces[name] = w + } + + t.Log("Send workspaces into deleting state") + for _, ws := range workspaces { + require.EventuallyWithT(t, func(c *assert.CollectT) { + err = sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).Workspaces().Delete(ctx, ws.Name, metav1.DeleteOptions{}) + require.NoError(c, err) + }, wait.ForeverTestTimeout, time.Millisecond*100) + } + + t.Log("Ensure workspaces still exist and have a deletion timestamp") + for _, ws := range workspaces { + require.EventuallyWithT(t, func(c *assert.CollectT) { + ws, err := sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).Workspaces().Get(ctx, ws.Name, metav1.GetOptions{}) + require.NoError(c, err) + require.False(c, ws.GetDeletionTimestamp().IsZero()) + }, wait.ForeverTestTimeout, time.Millisecond*100) + } + + t.Log("Wait for workspace types to have virtual workspace URLs published") + for name, wst := range workspaceTypes { + var wt *tenancyv1alpha1.WorkspaceType + require.EventuallyWithT(t, func(c *assert.CollectT) { + wt, err = sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).WorkspaceTypes().Get(ctx, wst.Name, metav1.GetOptions{}) + require.NoError(c, err) + require.NotEmpty(c, wt.Status.VirtualWorkspaces) + }, wait.ForeverTestTimeout, time.Millisecond*100) + workspaceTypes[name] = wt + } + + t.Log("Create clients through the virtual workspace") + // We need to track all combinations of virtualWorkspaces (defined by + // workspacetypes) and targetWorkspace (defined by the shard it's on). This is + // required because in multi-shard setups, the gamma workspace can be on a + // different shard than alpha and beta. As a result, when using alpha's or + // beta's virtual workspace, we need to use gammas shard url when we want to + // access gamma. + // first key is workspaceType, second key is targetWorkspace + user1VwKcpClusterClients := make(map[string]map[string]kcpclientset.ClusterInterface) + adminVwKcpClusterClients := make(map[string]map[string]kcpclientset.ClusterInterface) + + for wstName, wst := range workspaceTypes { + vwURLs := []string{} + for _, vwURL := range wst.Status.VirtualWorkspaces { + // filter out any URLs not belonging to terminating virtual workspace + if strings.Contains(vwURL.URL, terminatingworkspaces.VirtualWorkspaceName) { + vwURLs = append(vwURLs, vwURL.URL) + } + } + + for targetWsName, targetWorkspace := range workspaces { + // only if our workspacetype is part of the terminators of the target workspace, we need to build + // the clientset + if slices.Contains(targetWorkspace.Status.Terminators, termination.TerminatorForType(wst)) { + // skip any combinations, which are not able to see any workspaces. + // Namely these are alpha&beta clients, which use the shard url that + // gamma is not scheduled on. We cannot simplify this filtering, because + // at the moment the workspaces get assigned to shards randomly, meaning + // all combinations of alpha/beta/gamma are possible. Additionally by + // doing this filtering now, we can simplify the testing below, because + // we only have clients that yield usable results. + if targetWorkspace.Name != workspaces[wstName].Name && targetWorkspace.Annotations["internal.tenancy.kcp.io/shard"] == workspaces[wstName].Annotations["internal.tenancy.kcp.io/shard"] { + continue + } + + targetVwURL, foundTargetVwURL, err := framework.VirtualWorkspaceURL(ctx, sourceKcpClusterClient, targetWorkspace, vwURLs) + require.NoError(t, err) + require.True(t, foundTargetVwURL) + + // build the clientsets + virtualWorkspaceConfig := rest.AddUserAgent(rest.CopyConfig(sourceConfig), t.Name()+"-virtual") + virtualWorkspaceConfig.Host = targetVwURL + + user1Config := framework.StaticTokenUserConfig("user-1", virtualWorkspaceConfig) + + // make sure the outer map exists, before adding to the inner map + if _, ok := user1VwKcpClusterClients[wstName]; !ok { + user1VwKcpClusterClients[wstName] = make(map[string]kcpclientset.ClusterInterface) + } + virtualKcpClusterClient, err := kcpclientset.NewForConfig(user1Config) + require.NoError(t, err) + user1VwKcpClusterClients[wstName][targetWsName] = virtualKcpClusterClient + + // make sure the outer map exists, before adding to the inner map + if _, ok := adminVwKcpClusterClients[wstName]; !ok { + adminVwKcpClusterClients[wstName] = make(map[string]kcpclientset.ClusterInterface) + } + adminVirtualKcpClusterClient, err := kcpclientset.NewForConfig(virtualWorkspaceConfig) + require.NoError(t, err) + adminVwKcpClusterClients[wstName][targetWsName] = adminVirtualKcpClusterClient + } + } + } + + t.Log("Ensure that LIST calls through the virtual workspace as admin succeed") + for name := range workspaces { + require.EventuallyWithT(t, func(c *assert.CollectT) { + for _, client := range adminVwKcpClusterClients[name] { + _, err := client.CoreV1alpha1().LogicalClusters().List(ctx, metav1.ListOptions{}) + require.NoError(c, err) + } + }, wait.ForeverTestTimeout, 100*time.Millisecond) + } + + t.Log("Ensure that LIST calls through the virtual workspace fail authorization") + for name := range workspaces { + require.EventuallyWithT(t, func(c *assert.CollectT) { + for _, client := range user1VwKcpClusterClients[name] { + _, err := client.CoreV1alpha1().LogicalClusters().List(ctx, metav1.ListOptions{}) + require.True(c, errors.IsForbidden(err)) + } + }, wait.ForeverTestTimeout, 100*time.Millisecond) + } + + t.Log("Set up RBAC to allow future calls to succeed") + for _, wt := range workspaceTypes { + require.EventuallyWithT(t, func(c *assert.CollectT) { + role, err := kubeClusterClient.Cluster(wsPath).RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: string(termination.TerminatorForType(wt)) + "-terminator", + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"terminate"}, + Resources: []string{"workspacetypes"}, + ResourceNames: []string{wt.Name}, + APIGroups: []string{"tenancy.kcp.io"}, + }, + }, + }, metav1.CreateOptions{}) + require.NoError(c, err) + source.Artifact(t, func() (runtime.Object, error) { + return kubeClusterClient.Cluster(wsPath).RbacV1().ClusterRoles().Get(ctx, role.Name, metav1.GetOptions{}) + }) + binding, err := kubeClusterClient.Cluster(wsPath).RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: role.Name, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + APIGroup: "rbac.authorization.k8s.io", + Name: role.Name, + }, + Subjects: []rbacv1.Subject{{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "User", + Name: "user-1", + }}, + }, metav1.CreateOptions{}) + require.NoError(c, err) + source.Artifact(t, func() (runtime.Object, error) { + return kubeClusterClient.Cluster(wsPath).RbacV1().ClusterRoleBindings().Get(ctx, binding.Name, metav1.GetOptions{}) + }) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + } + + t.Log("Ensure that LIST calls through the virtual workspace eventually show the correct values") + for name := range workspaces { + require.EventuallyWithT(t, func(c *assert.CollectT) { + for _, client := range user1VwKcpClusterClients[name] { + _, err := client.CoreV1alpha1().LogicalClusters().List(ctx, metav1.ListOptions{}) + require.NoError(c, err) + } + }, wait.ForeverTestTimeout, 100*time.Millisecond) + } + + alphaTerminator := string(termination.TerminatorForType(workspaceTypes["alpha"])) + betaTerminator := string(termination.TerminatorForType(workspaceTypes["beta"])) + gammaTerminator := string(termination.TerminatorForType(workspaceTypes["gamma"])) + // expect alpha and beta to see two logicalclusters each: the one of their own + // respective workspacetype and the one from workspacetype gamma since it + // inherits both alpha and beta + expLogicalClusters := map[string][][]string{ + "alpha": { + {alphaTerminator}, + {alphaTerminator, betaTerminator, gammaTerminator}, + }, + "beta": { + {betaTerminator}, + {alphaTerminator, betaTerminator, gammaTerminator}, + }, + "gamma": { + {alphaTerminator, betaTerminator, gammaTerminator}, + }, + } + + t.Log("Ensure that Logicalclusters have the expected terminators set") + for name := range workspaces { + var clusters []corev1alpha1.LogicalCluster + require.EventuallyWithT(t, func(c *assert.CollectT) { + for _, client := range user1VwKcpClusterClients[name] { + cls, err := client.CoreV1alpha1().LogicalClusters().List(ctx, metav1.ListOptions{}) + require.NoError(c, err) + clusters = append(clusters, cls.Items...) + } + }, wait.ForeverTestTimeout, 100*time.Millisecond) + + // check that the number of logical clusters matches + require.Equal(t, len(expLogicalClusters[name]), len(clusters)) + + for _, cluster := range clusters { + // check that spec terminators are set correctly + st := termination.TerminatorsToStrings(cluster.Spec.Terminators) + require.Contains(t, expLogicalClusters[name], st) // contains compares the terminators with all objects in the exp [][]string and does the heavy lifting for us + // check that the status terminators are set correctly + require.Contains(t, expLogicalClusters[name], st) + } + } + + t.Log("Testing Modifications through virtual workspace") + for name, wst := range workspaceTypes { + t.Logf("For workspacetype/virtual-workspace %q", name) + + for _, client := range user1VwKcpClusterClients[name] { + terminator := termination.TerminatorForType(wst) + + origLcs := []corev1alpha1.LogicalCluster{} + require.EventuallyWithT(t, func(c *assert.CollectT) { + cls, err := client.CoreV1alpha1().LogicalClusters().List(ctx, metav1.ListOptions{}) + require.NoError(c, err) + origLcs = append(origLcs, cls.Items...) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + + for _, origLc := range origLcs { + lcPath := logicalcluster.NewPath(origLc.Annotations["kcp.io/cluster"]) + if lcPath.Empty() { + t.Errorf("could not find logicalcluster path for %v", origLc) + } + + t.Log("\tModifying a non-terminator field should be not supported") + mod := origLc.DeepCopy() + mod.Annotations["wrong"] = "wrong" + patch, err := generatePatchBytes(mod, &origLc) + require.NoError(t, err) + require.EventuallyWithT(t, func(c *assert.CollectT) { + _, err = client.Cluster(lcPath).CoreV1alpha1().LogicalClusters().Patch(ctx, corev1alpha1.LogicalClusterName, types.MergePatchType, patch, metav1.PatchOptions{}) + // we expect MethodNotSupported here as the storage layer denies any non-status field updates + require.True(c, errors.IsMethodNotSupported(err)) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + + t.Log("\tModifying a terminator, which is not ours should be denied") + mod = origLc.DeepCopy() + mod.Status.Terminators = []corev1alpha1.LogicalClusterTerminator{"wrong:wrong"} + patch, err = generatePatchBytes(&origLc, mod) + require.NoError(t, err) + require.EventuallyWithT(t, func(c *assert.CollectT) { + _, err = client.Cluster(lcPath).CoreV1alpha1().LogicalClusters().Patch(ctx, corev1alpha1.LogicalClusterName, types.MergePatchType, patch, metav1.PatchOptions{}, "status") + require.True(c, errors.IsInvalid(err)) + // Since Invalid is a generic error, which is not exclusive to an + // terminator failing our custom updateValidation, we need to check for it + // as well. + // Unfortunately, it is not possible to make use of + // field.Error.Origin to do so, as we convert our field.ErrorList into an + // errors.StatusError, thus loosing this information. As a result, our only + // option is to reconstruct the expected error message. + expErrMsg := fmt.Sprintf("only removing the %q terminator is supported", terminator) + // for now using contains seems to strike the best balance between + // identifying the error, while not making the test too brittle as + // kubernetes statusError creation uses a lot of squashing an string + // manipulation to create the final exact message. + require.Contains(t, err.Error(), expErrMsg) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + } + } + } + + t.Log("Removing our terminator should work") + for name := range workspaceTypes { + terminator := termination.TerminatorForType(workspaceTypes[name]) + + for _, client := range user1VwKcpClusterClients[name] { + origLcs := []corev1alpha1.LogicalCluster{} + require.EventuallyWithT(t, func(c *assert.CollectT) { + cls, err := client.CoreV1alpha1().LogicalClusters().List(ctx, metav1.ListOptions{}) + require.NoError(c, err) + origLcs = append(origLcs, cls.Items...) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + + for _, origLc := range origLcs { + lcPath := logicalcluster.NewPath(origLc.Annotations["kcp.io/cluster"]) + mod := origLc.DeepCopy() + mod.Status.Terminators = removeByValue(mod.Status.Terminators, terminator) + patch, err := generatePatchBytes(&origLc, mod) + require.NoError(t, err) + require.EventuallyWithT(t, func(c *assert.CollectT) { + _, err = client.Cluster(lcPath).CoreV1alpha1().LogicalClusters().Patch(ctx, corev1alpha1.LogicalClusterName, types.MergePatchType, patch, metav1.PatchOptions{}, "status") + require.NoError(c, err) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + } + } + } + + t.Log("Check that the workspace is deleted") + for _, ws := range workspaces { + require.EventuallyWithT(t, func(c *assert.CollectT) { + _, err := sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).Workspaces().Get(ctx, ws.Name, metav1.GetOptions{}) + require.True(c, errors.IsNotFound(err)) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + } +} + +func TestTerminatingWorkspacesVirtualWorkspaceWatch(t *testing.T) { + t.Parallel() + framework.Suite(t, "control-plane") + + source := kcptesting.SharedKcpServer(t) + wsPath, _ := kcptesting.NewWorkspaceFixture(t, source, core.RootCluster.Path()) + ctx, cancelFunc := context.WithCancel(context.Background()) + t.Cleanup(cancelFunc) + + sourceConfig := source.BaseConfig(t) + + sourceKcpClusterClient, err := kcpclientset.NewForConfig(sourceConfig) + require.NoError(t, err) + + testLabelSelector := map[string]string{ + "internal.kcp.io/e2e-test": t.Name(), + } + + t.Log("Create workspacetypes with terminators") + workspaceTypes := map[string]*tenancyv1alpha1.WorkspaceType{ + "parent": {}, + "child": {}, + } + for name := range workspaceTypes { + wst := &tenancyv1alpha1.WorkspaceType{ + ObjectMeta: metav1.ObjectMeta{ + Name: name + "-" + randSuffix(), + }, + Spec: tenancyv1alpha1.WorkspaceTypeSpec{ + Terminator: true, + }, + } + workspaceTypes[name] = wst + } + // make child extend the parent + workspaceTypes["child"].Spec.Extend.With = []tenancyv1alpha1.WorkspaceTypeReference{ + {Path: wsPath.String(), Name: tenancyv1alpha1.TypeName(workspaceTypes["parent"].Name)}, + } + + // create workspacetypes + for _, wst := range workspaceTypes { + _, err := sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).WorkspaceTypes().Create(ctx, wst, metav1.CreateOptions{}) + require.NoError(t, err) + source.Artifact(t, func() (runtime.Object, error) { + return sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).WorkspaceTypes().Get(ctx, wst.Name, metav1.GetOptions{}) + }) + } + + t.Log("Wait for WorkspaceTypes to have their type extensions resolved and vw URLs published") + for name, wst := range workspaceTypes { + wt := &tenancyv1alpha1.WorkspaceType{} + require.EventuallyWithT(t, func(c *assert.CollectT) { + wt, err = sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).WorkspaceTypes().Get(ctx, wst.Name, metav1.GetOptions{}) + require.NoError(c, err) + require.NotEmpty(c, wt.Status.VirtualWorkspaces) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + workspaceTypes[name] = wt + } + + shards := []corev1alpha1.Shard{} + require.EventuallyWithT(t, func(c *assert.CollectT) { + s, err := sourceKcpClusterClient.Cluster(core.RootCluster.Path()).CoreV1alpha1().Shards().List(ctx, metav1.ListOptions{}) + require.NoError(c, err) + shards = s.Items + }, wait.ForeverTestTimeout, time.Millisecond*100) + + shardForWorkspace := map[string]*struct { + shard *corev1alpha1.Shard + }{ + "parent": {}, + "child": {}, + } + + t.Log("Create clients through the virtual workspace") + // pre-decide which workspace will be scheduled to which shard, so we can start the watchers beforehand. + // For multi-sharded setups, make sure that parent and child workspaces will be on different shards + isMultishard := len(shards) > 1 + if isMultishard { + shardForWorkspace["parent"].shard = &shards[0] + shardForWorkspace["child"].shard = &shards[1] + } else { + shardForWorkspace["parent"].shard = &shards[0] + shardForWorkspace["child"].shard = &shards[0] + } + + type connection struct { + watcher watch.Interface + clientset kcpclientset.ClusterInterface + workspace *tenancyv1alpha1.Workspace + } + // watchConnections is a collection of all clientset combinations which we expect to return results: + // first key is WorkspaceType, second key TargetWorkspace + watchConnections := map[string]map[string]*connection{ + "parent": make(map[string]*connection), + "child": make(map[string]*connection), + } + + t.Log("Start watchers for virtual workspace combinations") + // create clients for all suitable connections + watchConnections["parent"]["parent"] = &connection{} + watchConnections["child"]["child"] = &connection{} + watchConnections["parent"]["child"] = &connection{} + for wstName, targetCon := range watchConnections { + for targetWsName, con := range targetCon { + config := rest.AddUserAgent(rest.CopyConfig(sourceConfig), t.Name()+"-virtual") + url := terminatorUrlFromWorkspaceTypeAndShard(workspaceTypes[wstName], shardForWorkspace[targetWsName].shard) + require.NotEmpty(t, url) + config.Host = url + clientset, err := kcpclientset.NewForConfig(config) + require.NoError(t, err) + con.clientset = clientset + + var watcher watch.Interface + require.EventuallyWithT(t, func(c *assert.CollectT) { + watcher, err = clientset.CoreV1alpha1().LogicalClusters().Watch(ctx, metav1.ListOptions{}) + require.NoError(c, err) + require.NotNil(c, watcher) // if we are too fast, it is possible for .Watch to return no error but a nil watcher + }, wait.ForeverTestTimeout, 100*time.Millisecond) + con.watcher = watcher + } + } + + t.Log("Create workspaces and put them into deletion") + for name, wst := range workspaceTypes { + ws := workspaceForType(wst, testLabelSelector) + kcptesting.WithShard(shardForWorkspace[name].shard.Name)(ws) + require.EventuallyWithT(t, func(c *assert.CollectT) { + ws, err = sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).Workspaces().Create(ctx, ws, metav1.CreateOptions{}) + require.NoError(c, err) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + require.EventuallyWithT(t, func(c *assert.CollectT) { + ws, err = sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).Workspaces().Get(ctx, ws.Name, metav1.GetOptions{}) + require.NoError(c, err) + require.Contains(c, ws.Annotations, "internal.tenancy.kcp.io/shard") + require.Equal(c, ws.Status.Phase, corev1alpha1.LogicalClusterPhaseReady) + require.NotEmpty(c, ws.Status.Terminators) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + require.EventuallyWithT(t, func(c *assert.CollectT) { + err = sourceKcpClusterClient.TenancyV1alpha1().Cluster(wsPath).Workspaces().Delete(ctx, ws.Name, metav1.DeleteOptions{}) + require.NoError(c, err) + }, wait.ForeverTestTimeout, 100*time.Millisecond) + // add all the details of the workspace into watchConnections, so we can use them later + for _, targetCon := range watchConnections { + for targetWs, con := range targetCon { + if name == targetWs { + con.workspace = ws + } + } + } + } + + t.Log("Check that watchers have received events for workspaces") + for name, targetCon := range watchConnections { + for _, con := range targetCon { + for { + select { + case evt := <-con.watcher.ResultChan(): + obj, ok := evt.Object.(metav1.Object) + if !ok { + continue + } + // there might be other actors doing who-knows-what on the workspaces, so we need to specifically + // look for the first event *relating to the new workspace* that we get + cluster, ok := obj.GetAnnotations()["kcp.io/cluster"] + if !ok { + continue + } + if cluster != con.workspace.Spec.Cluster { + continue + } + // we are searching for a modified event where the object is marked for deletion + if evt.Type != watch.Modified || obj.GetDeletionTimestamp().IsZero() { + continue + } + case <-time.Tick(wait.ForeverTestTimeout): + t.Fatalf("never saw a watch modified event for vw %q and targetWs %q", name, con.workspace.Name) + } + break + } + } + } +} + +func workspaceForType(workspaceType *tenancyv1alpha1.WorkspaceType, testLabelSelector map[string]string) *tenancyv1alpha1.Workspace { + return &tenancyv1alpha1.Workspace{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "e2e-workspace-", + Labels: testLabelSelector, + }, + Spec: tenancyv1alpha1.WorkspaceSpec{ + Type: &tenancyv1alpha1.WorkspaceTypeReference{ + Name: tenancyv1alpha1.WorkspaceTypeName(workspaceType.Name), + Path: logicalcluster.From(workspaceType).String(), + }, + }, + } +} + +func generatePatchBytes[t any](original, modified *t) ([]byte, error) { + // since we don't have access to controllerruntime clients MergeFrom(), use jsonpatch + origData, err := json.Marshal(original) + if err != nil { + return nil, err + } + modData, err := json.Marshal(modified) + if err != nil { + return nil, err + } + + patch, err := jsonpatch.CreateMergePatch(origData, modData) + if err != nil { + return nil, err + } + return patch, nil +} + +func terminatorUrlFromWorkspaceTypeAndShard(wst *tenancyv1alpha1.WorkspaceType, shard *corev1alpha1.Shard) string { + vwURLs := []string{} + for _, vwURL := range wst.Status.VirtualWorkspaces { + // filter out any URLs not belonging to terminating virtual workspace + if strings.Contains(vwURL.URL, terminatingworkspaces.VirtualWorkspaceName) { + vwURLs = append(vwURLs, vwURL.URL) + } + } + for _, vwURL := range vwURLs { + if strings.HasPrefix(vwURL, shard.Spec.VirtualWorkspaceURL) { + return vwURL + } + } + return "" +} + +func removeByValue[T comparable](l []T, item T) []T { + for i, other := range l { + if other == item { + return append(l[:i], l[i+1:]...) + } + } + return l +} + +func randSuffix() string { + // WorkspaceType names are pretty locked down, using this regex: '^[A-Z0-9][a-zA-Z0-9]+$' - so we just add some simple lowercase suffix. + const characters = "abcdefghijklmnopqrstuvwxyz" + b := make([]byte, 10) + for i := range b { + b[i] = characters[rand.Intn(len(characters))] + } + return string(b) +} diff --git a/test/e2e/workspace/deletion_test.go b/test/e2e/workspace/deletion_test.go index eba9992dfe6..e1bcdeb20d6 100644 --- a/test/e2e/workspace/deletion_test.go +++ b/test/e2e/workspace/deletion_test.go @@ -60,12 +60,11 @@ func TestWorkspaceLogicalClusterRelationship(t *testing.T) { clientset, err := kcpclientset.NewForConfig(cfg) require.NoError(t, err, "error creating kube cluster client set") - lc, err := clientset.Cluster(testPath).CoreV1alpha1().LogicalClusters().Get(ctx, corev1alpha1.LogicalClusterName, v1.GetOptions{}) - require.NoError(t, err, "error getting logicalcluster") - // add a finalizer to the cluster, mimicking an external finalizer customFinalizer := "example.com/test" require.EventuallyWithT(t, func(c *assert.CollectT) { + lc, err := clientset.Cluster(testPath).CoreV1alpha1().LogicalClusters().Get(ctx, corev1alpha1.LogicalClusterName, v1.GetOptions{}) + require.NoError(t, err, "error getting logicalcluster") lcsUpd := lc.DeepCopy() lcsUpd.Finalizers = append(lcsUpd.Finalizers, customFinalizer) _, err = clientset.Cluster(testPath).CoreV1alpha1().LogicalClusters().Update(ctx, lcsUpd, v1.UpdateOptions{}) @@ -96,7 +95,7 @@ func TestWorkspaceLogicalClusterRelationship(t *testing.T) { ws, err := clientset.Cluster(fixtureRoot).TenancyV1alpha1().Workspaces().Get(ctx, wsName, v1.GetOptions{}) require.NoError(c, err, "error getting workspace") require.NotEqual(c, nil, ws.DeletionTimestamp) - require.Contains(c, ws.Finalizers, corev1alpha1.LogicalClusterFinalizer) + require.Contains(c, ws.Finalizers, corev1alpha1.LogicalClusterFinalizerName) }, wait.ForeverTestTimeout, 100*time.Millisecond, "waiting for workspace to be marked for deletion") // remove the custom finalizer from the logicalcluster object