diff --git a/cmd/thv/app/list.go b/cmd/thv/app/list.go index 7fe7bc34d..71c02f505 100644 --- a/cmd/thv/app/list.go +++ b/cmd/thv/app/list.go @@ -7,6 +7,7 @@ import ( "text/tabwriter" "github.com/spf13/cobra" + "github.com/spf13/viper" "github.com/stacklok/toolhive/pkg/core" "github.com/stacklok/toolhive/pkg/logger" @@ -26,12 +27,14 @@ var ( listFormat string listLabelFilter []string listGroupFilter string + listRuntime string ) func init() { listCmd.Flags().BoolVarP(&listAll, "all", "a", false, "Show all workloads (default shows just running)") listCmd.Flags().StringVar(&listFormat, "format", FormatText, "Output format (json, text, or mcpservers)") listCmd.Flags().StringArrayVarP(&listLabelFilter, "label", "l", []string{}, "Filter workloads by labels (format: key=value)") + listCmd.Flags().StringVar(&listRuntime, "runtime", "", "Container runtime to use (docker, kubernetes)") // TODO: Re-enable when group functionality is complete // listCmd.Flags().StringVar(&listGroupFilter, "group", "", "Filter workloads by group") @@ -41,6 +44,11 @@ func init() { func listCmdFunc(cmd *cobra.Command, _ []string) error { ctx := cmd.Context() + // Set runtime flag in viper if specified for this command + if listRuntime != "" { + viper.Set("runtime", listRuntime) + } + // Instantiate the status manager. manager, err := workloads.NewManager(ctx) if err != nil { diff --git a/docs/cli/thv_list.md b/docs/cli/thv_list.md index d76ac61a2..5c6b6307a 100644 --- a/docs/cli/thv_list.md +++ b/docs/cli/thv_list.md @@ -28,6 +28,7 @@ thv list [flags] --format string Output format (json, text, or mcpservers) (default "text") -h, --help help for list -l, --label stringArray Filter workloads by labels (format: key=value) + --runtime string Container runtime to use (docker, kubernetes) ``` ### Options inherited from parent commands diff --git a/pkg/container/kubernetes/client.go b/pkg/container/kubernetes/client.go index a9f6af049..bb2563eec 100644 --- a/pkg/container/kubernetes/client.go +++ b/pkg/container/kubernetes/client.go @@ -25,6 +25,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/tools/watch" @@ -46,17 +47,31 @@ const ( type Client struct { runtimeType runtime.Type client kubernetes.Interface + config *rest.Config // waitForStatefulSetReadyFunc is used for testing to mock the waitForStatefulSetReady function waitForStatefulSetReadyFunc func(ctx context.Context, clientset kubernetes.Interface, namespace, name string) error } // NewClient creates a new container client func NewClient(_ context.Context) (*Client, error) { - // creates the in-cluster config - config, err := rest.InClusterConfig() + var config *rest.Config + var err error + + // First, try to create an in-cluster config + config, err = rest.InClusterConfig() if err != nil { - return nil, fmt.Errorf("failed to create in-cluster config: %v", err) + logger.Debugf("Failed to create in-cluster config: %v, trying remote cluster configuration", err) + + // Fall back to remote cluster configuration + config, err = createRemoteClusterConfig() + if err != nil { + return nil, fmt.Errorf("failed to create both in-cluster and remote cluster config: %v", err) + } + logger.Info("Successfully created remote cluster configuration") + } else { + logger.Info("Successfully created in-cluster configuration") } + // creates the clientset clientset, err := kubernetes.NewForConfig(config) if err != nil { @@ -66,9 +81,77 @@ func NewClient(_ context.Context) (*Client, error) { return &Client{ runtimeType: runtime.TypeKubernetes, client: clientset, + config: config, }, nil } +// createRemoteClusterConfig creates a Kubernetes config for remote cluster access +func createRemoteClusterConfig() (*rest.Config, error) { + // Get the kubeconfig path from environment variable or use default + kubeconfigPath := os.Getenv("KUBECONFIG") + if kubeconfigPath == "" { + // Use default kubeconfig path + homeDir, err := os.UserHomeDir() + if err != nil { + return nil, fmt.Errorf("failed to get user home directory: %w", err) + } + kubeconfigPath = homeDir + "/.kube/config" + } + + // Check if the kubeconfig file exists + if _, err := os.Stat(kubeconfigPath); os.IsNotExist(err) { + return nil, fmt.Errorf("kubeconfig file not found at %s", kubeconfigPath) + } + + // Load the kubeconfig + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return nil, fmt.Errorf("failed to build config from kubeconfig file %s: %w", kubeconfigPath, err) + } + + return config, nil +} + +// getNamespaceFromServiceAccount attempts to read the namespace from the service account token file +func getNamespaceFromServiceAccount() (string, error) { + data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + return "", fmt.Errorf("failed to read namespace file: %w", err) + } + return string(data), nil +} + +// getNamespaceFromEnv attempts to get the namespace from environment variables +func getNamespaceFromEnv() (string, error) { + ns := os.Getenv("POD_NAMESPACE") + if ns == "" { + return "", fmt.Errorf("POD_NAMESPACE environment variable not set") + } + return ns, nil +} + +// getCurrentNamespace returns the namespace the pod is running in. +// It tries multiple methods in order: +// 1. Reading from the service account token file +// 2. Getting the namespace from environment variables +// 3. Falling back to "default" if both methods fail +func getCurrentNamespace() string { + // Method 1: Try to read from the service account namespace file + ns, err := getNamespaceFromServiceAccount() + if err == nil { + return ns + } + + // Method 2: Try to get the namespace from environment variables + ns, err = getNamespaceFromEnv() + if err == nil { + return ns + } + + // Method 3: Fall back to default + return "default" +} + // AttachToWorkload implements runtime.Runtime. func (c *Client) AttachToWorkload(ctx context.Context, workloadName string) (io.WriteCloser, io.ReadCloser, error) { // AttachToWorkload attaches to a workload in Kubernetes @@ -107,12 +190,8 @@ func (c *Client) AttachToWorkload(ctx context.Context, workloadName string) (io. SubResource("attach"). VersionedParams(attachOpts, scheme.ParameterCodec) - config, err := rest.InClusterConfig() - if err != nil { - panic(fmt.Errorf("failed to create k8s config: %v", err)) - } - // Create a SPDY executor - exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) + // Create a SPDY executor using the stored config + exec, err := remotecommand.NewSPDYExecutor(c.config, "POST", req.URL()) if err != nil { return nil, nil, fmt.Errorf("failed to create SPDY executor: %v", err) } @@ -401,6 +480,7 @@ func (c *Client) ListWorkloads(ctx context.Context) ([]runtime.ContainerInfo, er // List pods with the toolhive label namespace := getCurrentNamespace() + fmt.Printf("listing pods in namespace %s", namespace) pods, err := c.client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: labelSelector, }) @@ -1078,43 +1158,3 @@ func configureMCPContainer( return nil } - -// getNamespaceFromServiceAccount attempts to read the namespace from the service account token file -func getNamespaceFromServiceAccount() (string, error) { - data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") - if err != nil { - return "", fmt.Errorf("failed to read namespace file: %w", err) - } - return string(data), nil -} - -// getNamespaceFromEnv attempts to get the namespace from environment variables -func getNamespaceFromEnv() (string, error) { - ns := os.Getenv("POD_NAMESPACE") - if ns == "" { - return "", fmt.Errorf("POD_NAMESPACE environment variable not set") - } - return ns, nil -} - -// getCurrentNamespace returns the namespace the pod is running in. -// It tries multiple methods in order: -// 1. Reading from the service account token file -// 2. Getting the namespace from environment variables -// 3. Falling back to "default" if both methods fail -func getCurrentNamespace() string { - // Method 1: Try to read from the service account namespace file - ns, err := getNamespaceFromServiceAccount() - if err == nil { - return ns - } - - // Method 2: Try to get the namespace from environment variables - ns, err = getNamespaceFromEnv() - if err == nil { - return ns - } - - // Method 3: Fall back to default - return "default" -} diff --git a/pkg/container/kubernetes/client_test.go b/pkg/container/kubernetes/client_test.go index 784b063a2..b8ae3bac4 100644 --- a/pkg/container/kubernetes/client_test.go +++ b/pkg/container/kubernetes/client_test.go @@ -3,6 +3,7 @@ package kubernetes import ( "context" "encoding/json" + "os" "testing" "github.com/stretchr/testify/assert" @@ -13,6 +14,7 @@ import ( corev1apply "k8s.io/client-go/applyconfigurations/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" "github.com/stacklok/toolhive/pkg/container/runtime" "github.com/stacklok/toolhive/pkg/logger" @@ -164,6 +166,7 @@ func TestCreateContainerWithPodTemplatePatch(t *testing.T) { client := &Client{ runtimeType: runtime.TypeKubernetes, client: clientset, + config: &rest.Config{}, waitForStatefulSetReadyFunc: mockWaitForStatefulSetReady, } // Create workload options with the pod template patch @@ -662,6 +665,7 @@ func TestCreateContainerWithMCP(t *testing.T) { client := &Client{ runtimeType: runtime.TypeKubernetes, client: clientset, + config: &rest.Config{}, waitForStatefulSetReadyFunc: mockWaitForStatefulSetReady, } @@ -717,3 +721,33 @@ func TestCreateContainerWithMCP(t *testing.T) { }) } } + +// TestNewClientConfigFallback tests that NewClient properly falls back to remote config when in-cluster config fails +func TestNewClientConfigFallback(t *testing.T) { + t.Parallel() + + // Test that NewClient handles the fallback gracefully + // This test will fail to create in-cluster config (expected) and should fall back to remote config + // Since we're not in a real cluster, both should fail, but we can verify the error messages + + // Save original environment variables + originalKubeconfig := os.Getenv("KUBECONFIG") + defer func() { + if originalKubeconfig != "" { + os.Setenv("KUBECONFIG", originalKubeconfig) + } else { + os.Unsetenv("KUBECONFIG") + } + }() + + // Set a non-existent kubeconfig to ensure remote config fails + os.Setenv("KUBECONFIG", "/non/existent/path") + + // Try to create a client - this should fail with both in-cluster and remote config + _, err := NewClient(context.Background()) + + // The error should indicate that both in-cluster and remote config failed + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to create both in-cluster and remote cluster config") + assert.Contains(t, err.Error(), "kubeconfig file not found") +} diff --git a/pkg/container/runtime/types.go b/pkg/container/runtime/types.go index 18b2fa4ac..6d004cac4 100644 --- a/pkg/container/runtime/types.go +++ b/pkg/container/runtime/types.go @@ -9,6 +9,7 @@ import ( "os" "time" + "github.com/spf13/viper" "github.com/stacklok/toolhive/pkg/ignore" "github.com/stacklok/toolhive/pkg/permissions" ) @@ -285,6 +286,11 @@ type Mount struct { // IsKubernetesRuntime returns true if the runtime is Kubernetes // isn't the best way to do this, but for now it's good enough func IsKubernetesRuntime() bool { + // Check explicit flag first + if runtimeFlag := viper.GetString("runtime"); runtimeFlag != "" { + return runtimeFlag == "kubernetes" + } + // Fall back to environment detection (original logic) return os.Getenv("KUBERNETES_SERVICE_HOST") != "" }