diff --git a/teams/team-riker/dev/templates/2048.yaml b/teams/team-riker/dev/templates/2048.yaml deleted file mode 100644 index 17dda8484..000000000 --- a/teams/team-riker/dev/templates/2048.yaml +++ /dev/null @@ -1,125 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: deployment-2048 - labels: - app: "2048" - {{- toYaml .Values.labels | nindent 4 }} -spec: - selector: - matchLabels: - app.kubernetes.io/name: app-2048 - replicas: 3 - strategy: - rollingUpdate: - maxSurge: 25% - maxUnavailable: 25% - type: RollingUpdate - template: - metadata: - labels: - app.kubernetes.io/name: app-2048 - spec: - automountServiceAccountToken: false - containers: - - image: public.ecr.aws/l6m2t8p7/docker-2048:latest - imagePullPolicy: Always - name: app-2048 - ports: - - containerPort: 80 - protocol: TCP - resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 250m - memory: 50Mi - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - shareProcessNamespace: false - terminationGracePeriodSeconds: 30 - {{ if .Values.spec.karpenterInstanceProfile }} - nodeSelector: - team: default - type: karpenter - tolerations: - - key: 'karpenter' - operator: 'Exists' - effect: 'NoSchedule' - {{ end }} - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: DoNotSchedule - labelSelector: - matchLabels: - app.kubernetes.io/name: app-2048 ---- -apiVersion: v1 -kind: Service -metadata: - namespace: team-riker - name: service-2048 - labels: - app: "2048" - {{- toYaml .Values.labels | nindent 4 }} -spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - type: NodePort - selector: - app.kubernetes.io/name: app-2048 ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - namespace: team-riker - name: ingress-2048 - labels: - app: "2048" - {{- toYaml .Values.labels | nindent 4 }} - annotations: -{{ if eq .Values.spec.ingress.type "nginx" }} - kubernetes.io/ingress.class: "nginx" -{{ else }} - alb.ingress.kubernetes.io/scheme: internet-facing - alb.ingress.kubernetes.io/target-type: ip - {{ if .Values.spec.ingress.host }} - alb.ingress.kubernetes.io/listen-ports: '[{"HTTP":80,"HTTPS": 443}]' - alb.ingress.kubernetes.io/ssl-redirect: '443' - {{ else }} - alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}]' - {{ end }} - alb.ingress.kubernetes.io/tags: Environment={{ .Values.labels.env }},Team=Riker -{{ end }} - {{ if .Values.spec.ingress.host }} - external-dns.alpha.kubernetes.io/set-identifier: {{ .Values.spec.clusterName }} - external-dns.alpha.kubernetes.io/aws-weight: '{{ .Values.spec.ingress.route53_weight }}' - external-dns.alpha.kubernetes.io/ttl: "10" - {{ end }} -spec: -{{ if eq .Values.spec.ingress.type "nginx" }} - kubernetes.io/ingress.class: "nginx" -{{ else }} - ingressClassName: alb -{{ end }} - rules: -{{ if .Values.spec.ingress.host }} - - host: 2048.{{ .Values.spec.ingress.host }} -{{ else }} - - host: -{{ end }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: service-2048 - port: - number: 80 diff --git a/teams/team-riker/dev/templates/alb-skiapp/deployment.yaml b/teams/team-riker/dev/templates/alb-skiapp/deployment.yaml new file mode 100644 index 000000000..e4f8887e8 --- /dev/null +++ b/teams/team-riker/dev/templates/alb-skiapp/deployment.yaml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: skiapp-deployment + namespace: team-riker +spec: + selector: + matchLabels: + app: skiapp + replicas: 3 + template: + metadata: + labels: + app: skiapp + spec: + containers: + - name: skiapp + image: sharepointoscar/skiapp:v1 + env: + - name: CLUSTER_NAME + value: {{.Values.spec.clusterName}} + ports: + - containerPort: 8080 + resources: + requests: + memory: '64Mi' + cpu: '250m' + limits: + memory: '128Mi' + cpu: '500m' + {{ if .Values.spec.karpenterInstanceProfile }} + nodeSelector: # <- add nodeselector, toleration and spread constraitns + team: default + type: karpenter + tolerations: + - key: 'karpenter' + operator: 'Exists' + effect: 'NoSchedule' + {{ end }} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app: skiapp + tolerations: + - key: 'karpenter' + operator: 'Exists' + effect: 'NoSchedule' \ No newline at end of file diff --git a/teams/team-riker/dev/templates/alb-skiapp/ingress.yaml b/teams/team-riker/dev/templates/alb-skiapp/ingress.yaml new file mode 100644 index 000000000..ba87560e4 --- /dev/null +++ b/teams/team-riker/dev/templates/alb-skiapp/ingress.yaml @@ -0,0 +1,26 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: skiapp-ingress + namespace: team-riker + annotations: + alb.ingress.kubernetes.io/group.name: riker + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: ip + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}]' + #alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + #alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' + alb.ingress.kubernetes.io/tags: Environment=dev,Team=Riker +spec: + ingressClassName: alb + rules: + - host: #skiapp.{{ .Values.spec.ingress.host }} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: skiapp-service + port: + number: 80 diff --git a/teams/team-riker/dev/templates/alb-skiapp/service.yaml b/teams/team-riker/dev/templates/alb-skiapp/service.yaml new file mode 100644 index 000000000..727b1bc18 --- /dev/null +++ b/teams/team-riker/dev/templates/alb-skiapp/service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: skiapp-service + namespace: team-riker +spec: + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + type: NodePort + selector: + app: skiapp diff --git a/teams/team-riker/dev/templates/karpenter.yaml b/teams/team-riker/dev/templates/karpenter.yaml new file mode 100644 index 000000000..662749006 --- /dev/null +++ b/teams/team-riker/dev/templates/karpenter.yaml @@ -0,0 +1,84 @@ +{{ if .Values.spec.karpenterInstanceProfile }} +apiVersion: karpenter.k8s.aws/v1alpha1 +kind: AWSNodeTemplate +metadata: + name: karpenter-default + labels: + {{- toYaml .Values.labels | nindent 4 }} +spec: + instanceProfile: '{{ .Values.spec.karpenterInstanceProfile }}' + subnetSelector: + kubernetes.io/cluster/{{ .Values.spec.clusterName }}: '*' + kubernetes.io/role/internal-elb: '1' # to select only private subnets + securityGroupSelector: + aws:eks:cluster-name: '{{ .Values.spec.clusterName }}' # Choose only security groups of nodes + tags: + karpenter.sh/cluster_name: {{.Values.spec.clusterName}} + karpenter.sh/provisioner: default + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 2 + httpTokens: required +--- +apiVersion: karpenter.sh/v1alpha5 +kind: Provisioner +metadata: + name: default + labels: + {{- toYaml .Values.labels | nindent 4 }} +spec: + consolidation: + enabled: true + #ttlSecondsAfterEmpty: 60 # mutual exclusive with consolitation + requirements: + - key: "karpenter.k8s.aws/instance-category" + operator: In + values: ["c", "m"] + - key: karpenter.k8s.aws/instance-cpu + operator: Lt + values: + - '33' + - key: 'kubernetes.io/arch' + operator: In + values: ['amd64'] + - key: karpenter.sh/capacity-type + operator: In + values: ['on-demand'] + - key: kubernetes.io/os + operator: In + values: + - linux + providerRef: + name: karpenter-default + + ttlSecondsUntilExpired: 2592000 # 30 Days = 60 * 60 * 24 * 30 Seconds; + + # Priority given to the provisioner when the scheduler considers which provisioner + # to select. Higher weights indicate higher priority when comparing provisioners. + # Specifying no weight is equivalent to specifying a weight of 0. + weight: 1 + limits: + resources: + cpu: '2k' + labels: + billing-team: default + team: default + type: karpenter + + # Do we want to apply some taints on the nodes ? + # taints: + # - key: karpenter + # value: 'true' + # effect: NoSchedule + + # Karpenter provides the ability to specify a few additional Kubelet args. + # These are all optional and provide support for additional customization and use cases. + kubeletConfiguration: + containerRuntime: containerd + maxPods: 110 + systemReserved: + cpu: '1' + memory: 5Gi + ephemeral-storage: 2Gi +{{ end }}