diff --git a/terraform/aws/aws-eks-tailscale-operator/README.md b/terraform/aws/aws-eks-tailscale-operator/README.md new file mode 100644 index 0000000..7ed2ecf --- /dev/null +++ b/terraform/aws/aws-eks-tailscale-operator/README.md @@ -0,0 +1,79 @@ +# aws-eks-tailscale-operator + +This example creates the following: + +- a VPC and related resources including a NAT Gateway +- an EKS cluster with a managed node group +- a Kubernetes namespace for the Tailscale operator with privileged pod security enforcement +- the Tailscale Kubernetes Operator deployed via Helm +- necessary IAM roles and security groups for EKS and Tailscale connectivity + +## Considerations + +- The EKS cluster is configured with both public and private API server access for flexibility +- The Tailscale operator is deployed in a dedicated `tailscale` namespace with privileged pod security +- OAuth credentials are stored as Kubernetes secrets and passed securely to the Helm chart +- The operator will create a Tailscale device for API server proxy access +- Any additional Tailscale resources (like ingress controllers) created by the operator will appear in your Tailnet + +## Prerequisites + +- Create a [Tailscale OAuth Client](https://tailscale.com/kb/1215/oauth-clients#setting-up-an-oauth-client) with appropriate scopes +- Ensure you have AWS CLI configured with appropriate permissions for EKS +- Install `kubectl` for cluster access after deployment + +## To use + +Follow the documentation to configure the Terraform providers: + +- [AWS](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) +- [Kubernetes](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs) +- [Helm](https://registry.terraform.io/providers/hashicorp/helm/latest/docs) + +### Configure variables + +Create a `terraform.tfvars` file with your Tailscale OAuth credentials: + +```hcl +tailscale_oauth_client_id = "your-oauth-client-id" +tailscale_oauth_client_secret = "your-oauth-client-secret" +``` + +### Deploy + +```shell +terraform init +terraform apply +``` + +After deployment, configure kubectl to access your cluster: + +```shell +aws eks update-kubeconfig --region us-east-1 --name $(terraform output -raw cluster_name) +``` + +### Verify deployment + +Check that the Tailscale operator is running: + +```shell +kubectl get pods -n tailscale +kubectl logs -n tailscale -l app.kubernetes.io/name=tailscale-operator +``` + +## To destroy + +```shell +terraform destroy +``` + +## Customization + +You can customize the EKS cluster configuration by modifying the locals in `main.tf`: + +- `cluster_version`: EKS Kubernetes version +- `node_instance_type`: EC2 instance type for worker nodes +- `desired_size`, `max_size`, `min_size`: Node group scaling configuration +- VPC CIDR blocks and subnet configurations + +The Tailscale operator configuration can be customized in the `helm_release` resource values. \ No newline at end of file diff --git a/terraform/aws/aws-eks-tailscale-operator/main.tf b/terraform/aws/aws-eks-tailscale-operator/main.tf new file mode 100644 index 0000000..26d2ff2 --- /dev/null +++ b/terraform/aws/aws-eks-tailscale-operator/main.tf @@ -0,0 +1,286 @@ +locals { + name = "example-${basename(path.cwd)}" + + aws_tags = { + Name = local.name + } + + // Modify these to use your own VPC + vpc_cidr_block = module.vpc.vpc_cidr_block + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + security_group_ids = [aws_security_group.tailscale.id] + + # EKS cluster configuration + cluster_version = "1.30" + node_instance_type = "t3.medium" + node_capacity_type = "ON_DEMAND" + node_ami_type = "AL2_x86_64" + desired_size = 2 + max_size = 4 + min_size = 1 + + # Tailscale configuration + tailscale_oauth_client_id = var.tailscale_oauth_client_id + tailscale_oauth_client_secret = var.tailscale_oauth_client_secret +} + +// Remove this to use your own VPC. +module "vpc" { + source = "../internal-modules/aws-vpc" + + name = local.name + tags = local.aws_tags + + cidr = "10.0.0.0/16" + + public_subnets = ["10.0.1.0/24", "10.0.2.0/24"] + private_subnets = ["10.0.10.0/24", "10.0.20.0/24"] +} + +# EKS Cluster +resource "aws_eks_cluster" "main" { + name = local.name + role_arn = aws_iam_role.cluster.arn + version = local.cluster_version + + vpc_config { + subnet_ids = local.subnet_ids + endpoint_private_access = true + endpoint_public_access = true + public_access_cidrs = ["0.0.0.0/0"] + security_group_ids = [aws_security_group.cluster.id] + } + + # Enable logging + enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + + tags = local.aws_tags + + depends_on = [ + aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy, + aws_cloudwatch_log_group.eks_cluster, + module.vpc.nat_ids, # remove if using your own VPC + ] +} + +# EKS Node Group +resource "aws_eks_node_group" "main" { + cluster_name = aws_eks_cluster.main.name + node_group_name = "${local.name}-nodes" + node_role_arn = aws_iam_role.node_group.arn + subnet_ids = local.subnet_ids + + capacity_type = local.node_capacity_type + ami_type = local.node_ami_type + instance_types = [local.node_instance_type] + + scaling_config { + desired_size = local.desired_size + max_size = local.max_size + min_size = local.min_size + } + + update_config { + max_unavailable = 1 + } + + tags = local.aws_tags + + depends_on = [ + aws_iam_role_policy_attachment.node_group_AmazonEKSWorkerNodePolicy, + aws_iam_role_policy_attachment.node_group_AmazonEKS_CNI_Policy, + aws_iam_role_policy_attachment.node_group_AmazonEC2ContainerRegistryReadOnly, + ] +} + +# CloudWatch Log Group for EKS Cluster +resource "aws_cloudwatch_log_group" "eks_cluster" { + name = "/aws/eks/${local.name}/cluster" + retention_in_days = 7 + tags = local.aws_tags +} + +# Kubernetes namespace for Tailscale operator +resource "kubernetes_namespace" "tailscale_operator" { + metadata { + name = "tailscale" + labels = { + "pod-security.kubernetes.io/enforce" = "privileged" + } + } + + depends_on = [aws_eks_node_group.main] +} + + + +# Deploy Tailscale Operator using Helm +resource "helm_release" "tailscale_operator" { + name = "tailscale-operator" + repository = "https://pkgs.tailscale.com/helmcharts" + chart = "tailscale-operator" + version = "1.84.0" + namespace = kubernetes_namespace.tailscale_operator.metadata[0].name + + values = [ + yamlencode({ + operatorConfig = { + image = { + repo = "tailscale/k8s-operator" + tag = "v1.84.0" + } + } + apiServerProxyConfig = { + mode = "true" + tags = "tag:k8s-operator,tag:k8s-api-server" + } + oauth = { + clientId = local.tailscale_oauth_client_id + clientSecret = local.tailscale_oauth_client_secret + hostname = "${local.name}-operator" + tags = "tag:k8s-operator" + } + }) + ] + + set_sensitive { + name = "oauth.clientId" + value = local.tailscale_oauth_client_id + } + + set_sensitive { + name = "oauth.clientSecret" + value = local.tailscale_oauth_client_secret + } + + depends_on = [ + kubernetes_namespace.tailscale_operator, + aws_eks_node_group.main, + ] +} + +# Security group for EKS cluster +resource "aws_security_group" "cluster" { + name_prefix = "${local.name}-cluster-" + vpc_id = local.vpc_id + + tags = merge( + local.aws_tags, + { + Name = "${local.name}-cluster" + } + ) +} + +resource "aws_security_group_rule" "cluster_egress" { + security_group_id = aws_security_group.cluster.id + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] +} + +# Security group for Tailscale traffic +resource "aws_security_group" "tailscale" { + vpc_id = local.vpc_id + name = "${local.name}-tailscale" + + tags = merge( + local.aws_tags, + { + Name = "${local.name}-tailscale" + } + ) +} + +resource "aws_security_group_rule" "tailscale_ingress" { + security_group_id = aws_security_group.tailscale.id + type = "ingress" + from_port = 41641 + to_port = 41641 + protocol = "udp" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] +} + +resource "aws_security_group_rule" "tailscale_egress" { + security_group_id = aws_security_group.tailscale.id + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] +} + +resource "aws_security_group_rule" "internal_vpc_ingress_ipv4" { + security_group_id = aws_security_group.tailscale.id + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = [local.vpc_cidr_block] +} + +# IAM Role for EKS Cluster +resource "aws_iam_role" "cluster" { + name = "${local.name}-cluster-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "eks.amazonaws.com" + } + } + ] + }) + + tags = local.aws_tags +} + +resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" + role = aws_iam_role.cluster.name +} + +# IAM Role for EKS Node Group +resource "aws_iam_role" "node_group" { + name = "${local.name}-node-group-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + } + ] + }) + + tags = local.aws_tags +} + +resource "aws_iam_role_policy_attachment" "node_group_AmazonEKSWorkerNodePolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + role = aws_iam_role.node_group.name +} + +resource "aws_iam_role_policy_attachment" "node_group_AmazonEKS_CNI_Policy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + role = aws_iam_role.node_group.name +} + +resource "aws_iam_role_policy_attachment" "node_group_AmazonEC2ContainerRegistryReadOnly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + role = aws_iam_role.node_group.name +} \ No newline at end of file diff --git a/terraform/aws/aws-eks-tailscale-operator/outputs.tf b/terraform/aws/aws-eks-tailscale-operator/outputs.tf new file mode 100644 index 0000000..9f4b2f5 --- /dev/null +++ b/terraform/aws/aws-eks-tailscale-operator/outputs.tf @@ -0,0 +1,39 @@ +output "vpc_id" { + description = "VPC ID where the EKS cluster is deployed" + value = module.vpc.vpc_id +} + +output "nat_public_ips" { + description = "Public IPs of the NAT Gateways" + value = module.vpc.nat_public_ips +} + +output "cluster_endpoint" { + description = "EKS cluster endpoint" + value = aws_eks_cluster.main.endpoint +} + +output "cluster_name" { + description = "EKS cluster name" + value = aws_eks_cluster.main.name +} + +output "cluster_arn" { + description = "EKS cluster ARN" + value = aws_eks_cluster.main.arn +} + +output "cluster_certificate_authority_data" { + description = "EKS cluster certificate authority data" + value = aws_eks_cluster.main.certificate_authority[0].data +} + +output "node_group_arn" { + description = "EKS node group ARN" + value = aws_eks_node_group.main.arn +} + +output "tailscale_operator_namespace" { + description = "Kubernetes namespace where Tailscale operator is deployed" + value = kubernetes_namespace.tailscale_operator.metadata[0].name +} \ No newline at end of file diff --git a/terraform/aws/aws-eks-tailscale-operator/variables.tf b/terraform/aws/aws-eks-tailscale-operator/variables.tf new file mode 100644 index 0000000..f4d85ca --- /dev/null +++ b/terraform/aws/aws-eks-tailscale-operator/variables.tf @@ -0,0 +1,23 @@ +variable "tailscale_oauth_client_id" { + description = "Tailscale OAuth client ID" + type = string + sensitive = true + + validation { + condition = length(var.tailscale_oauth_client_id) > 0 + error_message = "Tailscale OAuth client ID must not be empty." + } +} + +variable "tailscale_oauth_client_secret" { + description = "Tailscale OAuth client secret" + type = string + sensitive = true + + validation { + condition = length(var.tailscale_oauth_client_secret) > 0 + error_message = "Tailscale OAuth client secret must not be empty." + } +} + + \ No newline at end of file diff --git a/terraform/aws/aws-eks-tailscale-operator/versions.tf b/terraform/aws/aws-eks-tailscale-operator/versions.tf new file mode 100644 index 0000000..d7525b8 --- /dev/null +++ b/terraform/aws/aws-eks-tailscale-operator/versions.tf @@ -0,0 +1,49 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 5.0, < 6.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.20" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.10" + } + } +} + +# Configure the AWS Provider +provider "aws" { + region = "us-east-1" +} + +# Configure the Kubernetes Provider +provider "kubernetes" { + host = aws_eks_cluster.main.endpoint + cluster_ca_certificate = base64decode(aws_eks_cluster.main.certificate_authority[0].data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = ["eks", "get-token", "--cluster-name", aws_eks_cluster.main.name] + } +} + +# Configure the Helm Provider +provider "helm" { + kubernetes { + host = aws_eks_cluster.main.endpoint + cluster_ca_certificate = base64decode(aws_eks_cluster.main.certificate_authority[0].data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = ["eks", "get-token", "--cluster-name", aws_eks_cluster.main.name] + } + } +} \ No newline at end of file