diff --git a/Makefile b/Makefile index fafcaab18e..2fddafcd3e 100644 --- a/Makefile +++ b/Makefile @@ -65,6 +65,10 @@ IMG ?= quay.io/konveyor/oadp-operator:latest # You can override this with environment variable (e.g., export TTL_DURATION=4h) TTL_DURATION ?= 1h +# HC_NAME is the name of the HostedCluster to use for HCP tests when +# hc_backup_restore_mode is set to external. Otherwise, HC_NAME is ignored. +HC_NAME ?= "" + # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin @@ -807,6 +811,8 @@ ARTIFACT_DIR ?= /tmp HCO_UPSTREAM ?= false TEST_VIRT ?= false TEST_HCP ?= false +TEST_HCP_EXTERNAL ?= false +HCP_EXTERNAL_ARGS ?= "" TEST_CLI ?= false SKIP_MUST_GATHER ?= false TEST_UPGRADE ?= false @@ -828,6 +834,12 @@ ifeq ($(TEST_HCP),true) else TEST_FILTER += && (! hcp) endif +ifeq ($(TEST_HCP_EXTERNAL),true) + TEST_FILTER += && (hcp_external) + HCP_EXTERNAL_ARGS = -hc_backup_restore_mode=external -hc_name=$(HC_NAME) +else + TEST_FILTER += && (! hcp_external) +endif ifeq ($(TEST_CLI),true) TEST_FILTER += && (cli) else @@ -852,6 +864,7 @@ test-e2e: test-e2e-setup install-ginkgo ## Run E2E tests against OADP operator i --ginkgo.label-filter="$(TEST_FILTER)" \ --ginkgo.junit-report="$(ARTIFACT_DIR)/junit_report.xml" \ --ginkgo.timeout=2h \ + $(HCP_EXTERNAL_ARGS) \ $(GINKGO_ARGS) .PHONY: test-e2e-cleanup @@ -868,7 +881,6 @@ test-e2e-cleanup: login-required for restore_name in $(shell $(OC_CLI) get restore -n $(OADP_TEST_NAMESPACE) -o name);do $(OC_CLI) patch "$$restore_name" -n $(OADP_TEST_NAMESPACE) -p '{"metadata":{"finalizers":null}}' --type=merge;done rm -rf $(SETTINGS_TMP) - .PHONY: update-non-admin-manifests update-non-admin-manifests: NON_ADMIN_CONTROLLER_IMG?=quay.io/konveyor/oadp-non-admin:latest update-non-admin-manifests: yq ## Update Non Admin Controller (NAC) manifests shipped with OADP, from NON_ADMIN_CONTROLLER_PATH @@ -892,4 +904,8 @@ endif .PHONY: build-must-gather build-must-gather: check-go ## Build OADP Must-gather binary must-gather/oadp-must-gather +ifeq ($(SKIP_MUST_GATHER),true) + echo "Skipping must-gather build" +else cd must-gather && go build -mod=mod -a -o oadp-must-gather cmd/main.go +endif diff --git a/docs/developer/testing/TESTING.md b/docs/developer/testing/TESTING.md index c78772db81..9f2427802b 100644 --- a/docs/developer/testing/TESTING.md +++ b/docs/developer/testing/TESTING.md @@ -100,6 +100,16 @@ You can also execute make test-e2e with a $GINKGO_ARGS variable set. Example: make test-e2e GINKGO_ARGS="--ginkgo.focus='MySQL application DATAMOVER'" ``` +### Run selected test for HCP against external HostedControlPlane + +Set common env variables as mentioned above, then run: + +```bash +TEST_HCP_EXTERNAL=true \ +HC_NAME=hc1 \ +make test-e2e +``` + ### Run tests with custom images You can run tests with custom images by setting the following environment variables: diff --git a/tests/e2e/backup_restore_suite_test.go b/tests/e2e/backup_restore_suite_test.go index c28cc85d18..32b327259f 100644 --- a/tests/e2e/backup_restore_suite_test.go +++ b/tests/e2e/backup_restore_suite_test.go @@ -237,6 +237,7 @@ func runRestore(brCase BackupRestoreCase, backupName, restoreName string, nsRequ func getFailedTestLogs(oadpNamespace string, appNamespace string, installTime time.Time, report ginkgo.SpecReport) { baseReportDir := artifact_dir + "/" + report.LeafNodeText + log.Println("Storing failed test logs in: ", baseReportDir) err := os.MkdirAll(baseReportDir, 0755) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -255,12 +256,12 @@ func getFailedTestLogs(oadpNamespace string, appNamespace string, installTime ti func tearDownBackupAndRestore(brCase BackupRestoreCase, installTime time.Time, report ginkgo.SpecReport) { log.Println("Post backup and restore state: ", report.State.String()) + gatherLogs(brCase, installTime, report) + tearDownDPAResources(brCase) + deleteNamespace(brCase.Namespace) +} - if report.Failed() { - knownFlake = lib.CheckIfFlakeOccurred(accumulatedTestLogs) - accumulatedTestLogs = nil - getFailedTestLogs(namespace, brCase.Namespace, installTime, report) - } +func tearDownDPAResources(brCase BackupRestoreCase) { if brCase.BackupRestoreType == lib.CSI || brCase.BackupRestoreType == lib.CSIDataMover { log.Printf("Deleting VolumeSnapshot for CSI backuprestore of %s", brCase.Name) snapshotClassPath := fmt.Sprintf("./sample-applications/snapclass-csi/%s.yaml", provider) @@ -270,10 +271,20 @@ func tearDownBackupAndRestore(brCase BackupRestoreCase, installTime time.Time, r err := dpaCR.Delete() gomega.Expect(err).ToNot(gomega.HaveOccurred()) +} + +func gatherLogs(brCase BackupRestoreCase, installTime time.Time, report ginkgo.SpecReport) { + if report.Failed() { + knownFlake = lib.CheckIfFlakeOccurred(accumulatedTestLogs) + accumulatedTestLogs = nil + getFailedTestLogs(namespace, brCase.Namespace, installTime, report) + } +} - err = lib.DeleteNamespace(kubernetesClientForSuiteRun, brCase.Namespace) +func deleteNamespace(namespace string) { + err := lib.DeleteNamespace(kubernetesClientForSuiteRun, namespace) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Eventually(lib.IsNamespaceDeleted(kubernetesClientForSuiteRun, brCase.Namespace), time.Minute*5, time.Second*5).Should(gomega.BeTrue()) + gomega.Eventually(lib.IsNamespaceDeleted(kubernetesClientForSuiteRun, namespace), time.Minute*5, time.Second*5).Should(gomega.BeTrue()) } var _ = ginkgo.Describe("Backup and restore tests", ginkgo.Ordered, func() { diff --git a/tests/e2e/e2e_suite_test.go b/tests/e2e/e2e_suite_test.go index 41305bbaed..7a10553852 100644 --- a/tests/e2e/e2e_suite_test.go +++ b/tests/e2e/e2e_suite_test.go @@ -40,9 +40,11 @@ var ( knownFlake bool accumulatedTestLogs []string - kvmEmulation bool - useUpstreamHco bool - skipMustGather bool + kvmEmulation bool + useUpstreamHco bool + skipMustGather bool + hcBackupRestoreMode string + hcName string ) func init() { @@ -59,6 +61,8 @@ func init() { flag.BoolVar(&kvmEmulation, "kvm_emulation", true, "Enable or disable KVM emulation for virtualization testing") flag.BoolVar(&useUpstreamHco, "hco_upstream", false, "Force use of upstream virtualization operator") flag.BoolVar(&skipMustGather, "skipMustGather", false, "avoid errors with local execution and cluster architecture") + flag.StringVar(&hcBackupRestoreMode, "hc_backup_restore_mode", string(HCModeCreate), "Type of HC test to run") + flag.StringVar(&hcName, "hc_name", "", "Name of the HostedCluster to use for HCP tests") // helps with launching debug sessions from IDE if os.Getenv("E2E_USE_ENV_FLAGS") == "true" { @@ -115,14 +119,22 @@ func init() { log.Println("Error parsing SKIP_MUST_GATHER, must-gather will be enabled by default: ", err) } } + if os.Getenv("HC_BACKUP_RESTORE_MODE") != "" { + hcBackupRestoreMode = os.Getenv("HC_BACKUP_RESTORE_MODE") + } else { + hcBackupRestoreMode = string(HCModeCreate) + } + if os.Getenv("HC_NAME") != "" { + hcName = os.Getenv("HC_NAME") + } } - } func TestOADPE2E(t *testing.T) { flag.Parse() var err error + kubeConfig = config.GetConfigOrDie() kubeConfig.QPS = 50 kubeConfig.Burst = 100 @@ -200,7 +212,6 @@ var _ = ginkgo.AfterSuite(func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = lib.DeleteSecret(kubernetesClientForSuiteRun, namespace, bslSecretNameWithCarriageReturn) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - log.Printf("Deleting DPA") err = dpaCR.Delete() gomega.Expect(err).ToNot(gomega.HaveOccurred()) diff --git a/tests/e2e/hcp_backup_restore_suite_test.go b/tests/e2e/hcp_backup_restore_suite_test.go index 736ca090e2..a90c63d65a 100644 --- a/tests/e2e/hcp_backup_restore_suite_test.go +++ b/tests/e2e/hcp_backup_restore_suite_test.go @@ -8,19 +8,29 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/openshift/oadp-operator/tests/e2e/lib" libhcp "github.com/openshift/oadp-operator/tests/e2e/lib/hcp" ) -type HCPBackupRestoreCase struct { - BackupRestoreCase - Template string - Provider string -} +type HCBackupRestoreMode string -func runHCPBackupAndRestore(brCase HCPBackupRestoreCase, updateLastBRcase func(brCase HCPBackupRestoreCase), h *libhcp.HCHandler) { +const ( + HCModeCreate HCBackupRestoreMode = "create" // Create new HostedCluster for test + HCModeExternal HCBackupRestoreMode = "external" // Get external HostedCluster + // TODO: Add HCModeExternalROSA for ROSA where DPA and some other resources are already installed +) + +// runHCPBackupAndRestore is the unified function that handles both create and external HC modes +func runHCPBackupAndRestore( + brCase HCPBackupRestoreCase, + updateLastBRcase func(HCPBackupRestoreCase), + updateLastInstallTime func(), + h *libhcp.HCHandler, +) { updateLastBRcase(brCase) + updateLastInstallTime() log.Printf("Preparing backup and restore") backupName, restoreName := prepareBackupAndRestore(brCase.BackupRestoreCase, func() {}) @@ -29,19 +39,46 @@ func runHCPBackupAndRestore(brCase HCPBackupRestoreCase, updateLastBRcase func(b gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to add HCP plugin to DPA: %v", err) // TODO: move the wait for HC just after the DPA modification to allow reconciliation to go ahead without waiting for the HC to be created - //Wait for HCP plugin to be added + // Wait for HCP plugin to be added gomega.Eventually(libhcp.IsHCPPluginAdded(h.Client, dpaCR.Namespace, dpaCR.Name), 3*time.Minute, 1*time.Second).Should(gomega.BeTrue()) - // Create the HostedCluster for the test h.HCPNamespace = libhcp.GetHCPNamespace(brCase.BackupRestoreCase.Name, libhcp.ClustersNamespace) - h.HostedCluster, err = h.DeployHCManifest(brCase.Template, brCase.Provider, brCase.BackupRestoreCase.Name) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + // Unified HostedCluster setup + switch brCase.Mode { + case HCModeCreate: + // Create new HostedCluster for test + h.HostedCluster, err = h.DeployHCManifest(brCase.Template, brCase.Provider, brCase.BackupRestoreCase.Name) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + case HCModeExternal: + // Get external HostedCluster + h.HostedCluster, err = h.GetHostedCluster(brCase.BackupRestoreCase.Name, libhcp.ClustersNamespace) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + default: + ginkgo.Fail(fmt.Sprintf("unknown HCP mode: %s", brCase.Mode)) + } + + // Pre-backup verification if brCase.PreBackupVerify != nil { - err := brCase.PreBackupVerify(runTimeClientForSuiteRun, brCase.Namespace) + log.Printf("Validating HC pre-backup") + err := brCase.PreBackupVerify(runTimeClientForSuiteRun, "" /*unused*/) gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run HCP pre-backup verification: %v", err) } + if brCase.Mode == HCModeExternal { + // Pre-backup verification for guest cluster + if brCase.PreBackupVerifyGuest != nil { + log.Printf("Validating guest cluster pre-backup") + hcKubeconfig, err := h.GetHostedClusterKubeconfig(h.HostedCluster) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + crClientForHC, err := client.New(hcKubeconfig, client.Options{Scheme: lib.Scheme}) + gomega.Eventually(h.ValidateClient(crClientForHC), 5*time.Minute, 2*time.Second).Should(gomega.BeTrue()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = brCase.PreBackupVerifyGuest(crClientForHC, "" /*unused*/) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run pre-backup verification for guest cluster: %v", err) + } + } + // Backup HCP & HC log.Printf("Backing up HC") includedResources := libhcp.HCPIncludedResources @@ -59,10 +96,37 @@ func runHCPBackupAndRestore(brCase HCPBackupRestoreCase, updateLastBRcase func(b log.Printf("Restoring HC") runHCPRestore(brCase.BackupRestoreCase, backupName, restoreName, nsRequiresResticDCWorkaround) - // Wait for HCP to be restored - log.Printf("Validating HC") - err = libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, h.HCPNamespace)(h.Client, libhcp.ClustersNamespace) - gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run HCP post-restore verification: %v", err) + // Unified post-restore verification + if brCase.PostRestoreVerify != nil { + log.Printf("Validating HC post-restore") + err = brCase.PostRestoreVerify(runTimeClientForSuiteRun, "" /*unused*/) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run HCP post-restore verification: %v", err) + } + + if brCase.Mode == HCModeExternal { + // Post-restore verification for guest cluster + if brCase.PostRestoreVerifyGuest != nil { + log.Printf("Validating guest cluster post-restore") + hcKubeconfig, err := h.GetHostedClusterKubeconfig(h.HostedCluster) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + crClientForHC, err := client.New(hcKubeconfig, client.Options{Scheme: lib.Scheme}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(h.ValidateClient(crClientForHC), 5*time.Minute, 2*time.Second).Should(gomega.BeTrue()) + err = brCase.PostRestoreVerifyGuest(crClientForHC, "" /*unused*/) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run post-restore verification for guest cluster: %v", err) + } + } +} + +type VerificationFunctionGuest func(client.Client, string) error + +type HCPBackupRestoreCase struct { + BackupRestoreCase + Mode HCBackupRestoreMode + PreBackupVerifyGuest VerificationFunctionGuest + PostRestoreVerifyGuest VerificationFunctionGuest + Template string // Optional: only used when Mode == HCPModeCreate + Provider string // Optional: only used when Mode == HCPModeCreate } var _ = ginkgo.Describe("HCP Backup and Restore tests", ginkgo.Ordered, func() { @@ -77,6 +141,10 @@ var _ = ginkgo.Describe("HCP Backup and Restore tests", ginkgo.Ordered, func() { lastBRCase = brCase } + updateLastInstallTime := func() { + lastInstallTime = time.Now() + } + // Before All var _ = ginkgo.BeforeAll(func() { // Wait for CatalogSource to be ready @@ -153,11 +221,12 @@ var _ = ginkgo.Describe("HCP Backup and Restore tests", ginkgo.Ordered, func() { if ginkgo.CurrentSpecReport().NumAttempts > 1 && !knownFlake { ginkgo.Fail("No known FLAKE found in a previous run, marking test as failed.") } - runHCPBackupAndRestore(brCase, updateLastBRcase, h) + runHCPBackupAndRestore(brCase, updateLastBRcase, updateLastInstallTime, h) }, // Test Cases ginkgo.Entry("None HostedCluster backup and restore", ginkgo.Label("hcp"), HCPBackupRestoreCase{ + Mode: HCModeCreate, Template: libhcp.HCPNoneManifest, Provider: "None", BackupRestoreCase: BackupRestoreCase{ @@ -171,6 +240,7 @@ var _ = ginkgo.Describe("HCP Backup and Restore tests", ginkgo.Ordered, func() { }, nil), ginkgo.Entry("Agent HostedCluster backup and restore", ginkgo.Label("hcp"), HCPBackupRestoreCase{ + Mode: HCModeCreate, Template: libhcp.HCPAgentManifest, Provider: "Agent", BackupRestoreCase: BackupRestoreCase{ diff --git a/tests/e2e/hcp_external_cluster_backup_restore_suite_test.go b/tests/e2e/hcp_external_cluster_backup_restore_suite_test.go new file mode 100644 index 0000000000..65182c3bc7 --- /dev/null +++ b/tests/e2e/hcp_external_cluster_backup_restore_suite_test.go @@ -0,0 +1,93 @@ +package e2e_test + +import ( + "context" + "time" + + "github.com/onsi/ginkgo/v2" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openshift/oadp-operator/tests/e2e/lib" + libhcp "github.com/openshift/oadp-operator/tests/e2e/lib/hcp" +) + +// External cluster backup and restore tests will skip creating HostedCluster resource. They expect the cluster +// to already have HostedCluster with a data plane. +// The tests are skipped unless hc_backup_restore_mode flag is properly configured. +var _ = ginkgo.Describe("HCP external cluster Backup and Restore tests", ginkgo.Ordered, func() { + var ( + lastInstallTime time.Time + lastBRCase HCPBackupRestoreCase + h *libhcp.HCHandler + ) + + updateLastBRcase := func(brCase HCPBackupRestoreCase) { + lastBRCase = brCase + } + + updateLastInstallTime := func() { + lastInstallTime = time.Now() + } + + var _ = ginkgo.BeforeAll(func() { + if hcBackupRestoreMode != string(HCModeExternal) { + ginkgo.Skip("Skipping HCP full backup and restore test for non-existent HCP") + } + + h = &libhcp.HCHandler{ + Ctx: context.Background(), + Client: runTimeClientForSuiteRun, + HCOCPTestImage: libhcp.HCOCPTestImage, + } + }) + + // After Each + var _ = ginkgo.AfterEach(func(ctx ginkgo.SpecContext) { + gatherLogs(lastBRCase.BackupRestoreCase, lastInstallTime, ctx.SpecReport()) + tearDownDPAResources(lastBRCase.BackupRestoreCase) + }) + + ginkgo.It("HCP external cluster backup and restore test", ginkgo.Label("hcp_external"), func() { + if ginkgo.CurrentSpecReport().NumAttempts > 1 && !knownFlake { + ginkgo.Fail("No known FLAKE found in a previous run, marking test as failed.") + } + + runHCPBackupAndRestore(HCPBackupRestoreCase{ + Mode: HCModeExternal, + PreBackupVerifyGuest: preBackupVerifyGuest(), + PostRestoreVerifyGuest: postBackupVerifyGuest(), + BackupRestoreCase: BackupRestoreCase{ + Name: hcName, + BackupRestoreType: lib.CSIDataMover, + PreBackupVerify: libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, libhcp.GetHCPNamespace(hcName, libhcp.ClustersNamespace)), + PostRestoreVerify: libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, libhcp.GetHCPNamespace(hcName, libhcp.ClustersNamespace)), + BackupTimeout: libhcp.HCPBackupTimeout, + }, + }, updateLastBRcase, updateLastInstallTime, h) + }) +}) + +func preBackupVerifyGuest() VerificationFunctionGuest { + return func(crClientGuest client.Client, namespace string) error { + ns := &corev1.Namespace{} + ns.Name = "test" + err := crClientGuest.Create(context.Background(), ns) + if err != nil && !apierrors.IsAlreadyExists(err) { + return err + } + return nil + } +} + +func postBackupVerifyGuest() VerificationFunctionGuest { + return func(crClientGuest client.Client, namespace string) error { + ns := &corev1.Namespace{} + err := crClientGuest.Get(context.Background(), client.ObjectKey{Name: "test"}, ns) + if err != nil { + return err + } + return nil + } +} diff --git a/tests/e2e/lib/dpa_helpers.go b/tests/e2e/lib/dpa_helpers.go index 71c4c8c008..0d1e21f932 100644 --- a/tests/e2e/lib/dpa_helpers.go +++ b/tests/e2e/lib/dpa_helpers.go @@ -49,12 +49,12 @@ type DpaCustomResource struct { func LoadDpaSettingsFromJson(settings string) (*oadpv1alpha1.DataProtectionApplication, error) { file, err := ReadFile(settings) if err != nil { - return nil, fmt.Errorf("Error getting settings json file: %v", err) + return nil, fmt.Errorf("error getting settings json file: %v", err) } dpa := &oadpv1alpha1.DataProtectionApplication{} err = json.Unmarshal(file, &dpa) if err != nil { - return nil, fmt.Errorf("Error decoding json file: %v", err) + return nil, fmt.Errorf("error decoding json file: %v", err) } return dpa, nil } diff --git a/tests/e2e/lib/hcp/hcp.go b/tests/e2e/lib/hcp/hcp.go index 6577287044..2dbdef40e4 100644 --- a/tests/e2e/lib/hcp/hcp.go +++ b/tests/e2e/lib/hcp/hcp.go @@ -7,6 +7,7 @@ import ( "log" "time" + configv1 "github.com/openshift/api/config/v1" hypershiftv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -16,6 +17,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -295,6 +298,19 @@ func (h *HCHandler) WaitForHCPDeletion(hcp *hypershiftv1.HostedControlPlane) err }) } +// GetHostedCluster returns the HostedCluster object +func (h *HCHandler) GetHostedCluster(hcName, hcNamespace string) (*hypershiftv1.HostedCluster, error) { + hc := &hypershiftv1.HostedCluster{} + err := h.Client.Get(h.Ctx, types.NamespacedName{ + Name: hcName, + Namespace: hcNamespace, + }, hc) + if err != nil { + return nil, fmt.Errorf("failed to get HostedCluster: %v", err) + } + return hc, nil +} + // NukeHostedCluster removes all resources associated with a HostedCluster func (h *HCHandler) NukeHostedCluster() error { // List of resource types to check @@ -672,3 +688,40 @@ func RestartHCPPods(HCPNamespace string, c client.Client) error { } return nil } + +func buildConfigFromBytes(kubeconfigData []byte) (*rest.Config, error) { + clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeconfigData) + if err != nil { + return nil, fmt.Errorf("failed to load client config from bytes: %v", err) + } + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("failed to build complete client config: %v", err) + } + return config, nil +} + +func (h *HCHandler) GetHostedClusterKubeconfig(hc *hypershiftv1.HostedCluster) (*rest.Config, error) { + kubeconfigSecret := &corev1.Secret{} + err := h.Client.Get(h.Ctx, + types.NamespacedName{ + Namespace: hc.Namespace, + Name: hc.Status.KubeConfig.Name}, + kubeconfigSecret) + if err != nil { + return nil, err + } + kubeconfigData := kubeconfigSecret.Data["kubeconfig"] + return buildConfigFromBytes(kubeconfigData) +} + +func (h *HCHandler) ValidateClient(c client.Client) wait.ConditionFunc { + return func() (bool, error) { + clusterVersion := &configv1.ClusterVersion{} + if err := c.Get(h.Ctx, client.ObjectKey{Name: "version"}, clusterVersion); err != nil { + log.Printf("Error getting cluster version: %v", err) + return false, nil + } + return true, nil + } +}