diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index f48dd5a..d72da38 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -27,5 +27,8 @@ jobs: - name: Set up chart-testing uses: helm/chart-testing-action@v2.6.1 + - name: Update Helm dependencies + run: helm dependency update charts/glassflow-etl + - name: Run chart-testing (lint) run: ct lint --config ct.yaml diff --git a/charts/glassflow-etl/Chart.lock b/charts/glassflow-etl/Chart.lock deleted file mode 100644 index c97134c..0000000 --- a/charts/glassflow-etl/Chart.lock +++ /dev/null @@ -1,9 +0,0 @@ -dependencies: -- name: glassflow-operator - repository: https://glassflow.github.io/glassflow-etl-k8s-operator - version: 0.5.0 -- name: nats - repository: https://nats-io.github.io/k8s/helm/charts/ - version: 1.3.6 -digest: sha256:67090c7537c70c84e6b2dd22a378ccc340990d0920d19cd948d9013e6d39a2e7 -generated: "2025-09-30T13:37:26.828781+02:00" diff --git a/charts/glassflow-etl/Chart.yaml b/charts/glassflow-etl/Chart.yaml index 61aaf2f..b27261b 100644 --- a/charts/glassflow-etl/Chart.yaml +++ b/charts/glassflow-etl/Chart.yaml @@ -15,17 +15,17 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.8 +version: 0.3.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "2.1.2" +appVersion: "2.1.3" dependencies: - name: glassflow-operator - version: "0.5.0" + version: "0.5.3" repository: https://glassflow.github.io/glassflow-etl-k8s-operator - name: nats version: "1.3.6" diff --git a/charts/glassflow-etl/templates/deployment.yaml b/charts/glassflow-etl/templates/deployment.yaml index db63c1e..1a40e94 100644 --- a/charts/glassflow-etl/templates/deployment.yaml +++ b/charts/glassflow-etl/templates/deployment.yaml @@ -112,11 +112,23 @@ spec: - name: GLASSFLOW_NATS_SERVER value: "nats://{{ .Release.Name }}-nats.{{ .Release.Namespace }}.svc.cluster.local:4222" - name: GLASSFLOW_LOG_LEVEL - value: "INFO" + value: "{{ .Values.api.logLevel }}" - name: GLASSFLOW_RUN_LOCAL value: "false" - name: GLASSFLOW_K8S_NAMESPACE value: {{ .Release.Namespace }} + - name: GLASSFLOW_OTEL_SERVICE_NAME + value: "glassflow-api" + - name: GLASSFLOW_OTEL_SERVICE_NAMESPACE + value: {{ .Release.Namespace }} + - name: GLASSFLOW_OTEL_SERVICE_VERSION + value: "{{ .Values.api.image.tag }}" + - name: GLASSFLOW_OTEL_LOGS_ENABLED + value: "{{ .Values.global.observability.logs.enabled }}" + - name: GLASSFLOW_OTEL_METRICS_ENABLED + value: "{{ .Values.global.observability.metrics.enabled }}" + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "http://{{ .Release.Name }}-otel-collector.{{ .Release.Namespace }}.svc.cluster.local:4318" {{- range .Values.api.env }} - name: {{ .name | quote }} value: {{ .value | quote }} diff --git a/charts/glassflow-etl/templates/otel-collector-configmap.yaml b/charts/glassflow-etl/templates/otel-collector-configmap.yaml new file mode 100644 index 0000000..8ac67b5 --- /dev/null +++ b/charts/glassflow-etl/templates/otel-collector-configmap.yaml @@ -0,0 +1,64 @@ +{{- if or .Values.global.observability.metrics.enabled .Values.global.observability.logs.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-otel-collector-config + namespace: {{ .Release.Namespace }} +data: + otel-collector-config.yaml: | + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + + processors: + batch: + timeout: 1s + send_batch_size: 1024 + resource: + attributes: + - key: service.name + from_attribute: service.name + action: insert + - key: service.version + from_attribute: service.version + action: insert + - key: service.namespace + from_attribute: service.namespace + action: insert + + exporters: +{{- if .Values.global.observability.logs.enabled }} + otlp: {{ toYaml .Values.global.observability.logs.exporter.otlp | nindent 8 }} +{{- end }} +{{- if .Values.global.observability.metrics.enabled }} + prometheus: + endpoint: "0.0.0.0:9090" + namespace: {{ .Release.Namespace }} + send_timestamps: true + enable_open_metrics: true +{{- end }} + + extensions: + health_check: + endpoint: 0.0.0.0:13133 + + service: + extensions: [health_check] + pipelines: +{{- if .Values.global.observability.logs.enabled }} + logs: + receivers: [otlp] + processors: [batch, resource] + exporters: [otlp] +{{- end }} +{{- if .Values.global.observability.metrics.enabled }} + metrics: + receivers: [otlp] + processors: [batch, resource] + exporters: [prometheus] +{{- end }} +{{- end }} diff --git a/charts/glassflow-etl/templates/otel-collector-deployment.yaml b/charts/glassflow-etl/templates/otel-collector-deployment.yaml new file mode 100644 index 0000000..261a512 --- /dev/null +++ b/charts/glassflow-etl/templates/otel-collector-deployment.yaml @@ -0,0 +1,63 @@ +{{- if or .Values.global.observability.metrics.enabled .Values.global.observability.logs.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-otel-collector + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Release.Name }}-otel-collector +spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Release.Name }}-otel-collector + template: + metadata: + labels: + app: {{ .Release.Name }}-otel-collector + spec: + containers: + - name: {{ .Release.Name }}-otel-collector + image: otel/opentelemetry-collector-contrib:0.108.0 + command: + - "/otelcol-contrib" + - "--config=/etc/otel-collector-config.yaml" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 500m + memory: 512Mi + ports: + - containerPort: 4317 + name: otlp-grpc + - containerPort: 4318 + name: otlp-http +{{- if .Values.global.observability.metrics.enabled }} + - containerPort: 9090 + name: prometheus +{{- end }} + - containerPort: 13133 + name: health-check + volumeMounts: + - name: config + mountPath: /etc/otel-collector-config.yaml + subPath: otel-collector-config.yaml + livenessProbe: + httpGet: + path: / + port: 13133 + initialDelaySeconds: 30 + periodSeconds: 30 + readinessProbe: + httpGet: + path: / + port: 13133 + initialDelaySeconds: 5 + periodSeconds: 10 + volumes: + - name: config + configMap: + name: {{ .Release.Name }}-otel-collector-config +{{- end }} diff --git a/charts/glassflow-etl/templates/otel-collector-service.yaml b/charts/glassflow-etl/templates/otel-collector-service.yaml new file mode 100644 index 0000000..2b14c5f --- /dev/null +++ b/charts/glassflow-etl/templates/otel-collector-service.yaml @@ -0,0 +1,24 @@ +{{- if or .Values.global.observability.metrics.enabled .Values.global.observability.logs.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-otel-collector + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Release.Name }}-otel-collector +spec: + ports: + - name: otlp-grpc + port: 4317 + targetPort: 4317 + - name: otlp-http + port: 4318 + targetPort: 4318 +{{- if .Values.global.observability.metrics.enabled }} + - name: prometheus + port: 9090 + targetPort: 9090 +{{- end }} + selector: + app: {{ .Release.Name }}-otel-collector +{{- end }} diff --git a/charts/glassflow-etl/values.ingress.tls.yaml b/charts/glassflow-etl/values.ingress.tls.yaml index b27443e..f7f9853 100644 --- a/charts/glassflow-etl/values.ingress.tls.yaml +++ b/charts/glassflow-etl/values.ingress.tls.yaml @@ -1,239 +1,3 @@ -# Default values for glassflow-etl with TLS configuration. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# ============================================================================= -# GLOBAL SETTINGS -# ============================================================================= -# Global settings that apply across all components of the chart -global: - # Global image registry - if set, will be prepended to all image repositories - imageRegistry: "" - -# ============================================================================= -# API COMPONENT CONFIGURATION -# ============================================================================= -# Backend API service configuration for the GlassFlow ETL application -api: - # Number of API replicas for high availability - replicas: 1 - - # Container image configuration for the API component - image: - # Docker image repository for the GlassFlow ETL backend - repository: ghcr.io/glassflow/glassflow-etl-be - # Image tag/version to deploy - tag: stable - # Image pull policy: Always, IfNotPresent, or Never - pullPolicy: Always - - # Resource requirements and limits for the API pods - resources: - # Minimum resources required by the API pods - requests: - memory: "100Mi" # Memory request - adjust based on your workload - cpu: "100m" # CPU request - 250 millicores = 0.25 CPU cores - # Maximum resources the API pods can use - limits: - memory: "200Mi" # Memory limit - adjust based on your workload - cpu: "250m" # CPU limit - 500 millicores = 0.5 CPU cores - - service: - type: ClusterIP - port: 8081 - targetPort: 8081 - - # Environment variables for the API service - # Supported variables: - # - GLASSFLOW_LOG_FILE_PATH: Path to the log file (Default: /tmp/logs/glassflow) - # - GLASSFLOW_NATS_SERVER: NATS server address (Default: nats://{{ .Release.Name }}-nats.{{ .Release.Namespace }}.svc.cluster.local:4222) - # - GLASSFLOW_LOG_LEVEL: Log level (Default: INFO) - # Example: - # env: - # - name: GLASSFLOW_LOG_LEVEL - # value: "DEBUG" - env: {} - -# ============================================================================= -# UI COMPONENT CONFIGURATION -# ============================================================================= -# Frontend UI service configuration for the GlassFlow ETL application -ui: - # Number of UI replicas for high availability - replicas: 1 - - # Container image configuration for the UI component - image: - # Docker image repository for the GlassFlow ETL frontend - repository: ghcr.io/glassflow/glassflow-etl-fe - # Image tag/version to deploy - tag: stable - # Image pull policy: Always, IfNotPresent, or Never - pullPolicy: Always - - # Resource requirements and limits for the UI pods - resources: - # Minimum resources required by the UI pods - requests: - memory: "512Mi" # Memory request - adjust based on your workload - cpu: "100m" # CPU request - 100 millicores = 0.1 CPU cores - # Maximum resources the UI pods can use - limits: - memory: "1Gi" # Memory limit - adjust based on your workload - cpu: "200m" # CPU limit - 200 millicores = 0.2 CPU cores - - service: - type: ClusterIP - port: 8080 - targetPort: 8080 - - # Environment variables for the UI service - # Supported variables: - # - NEXT_PUBLIC_API_URL: API URL for the application - # Example: - # env: - # - name: NEXT_PUBLIC_API_URL - # value: "http://{{ .Release.Name }}-api.{{ .Release.Namespace }}.svc.cluster.local:8080" - env: {} - -# ============================================================================= -# GLASSFLOW OPERATOR CONFIGURATION -# ============================================================================= -# Kubernetes operator for managing ETL pipelines and related resources -glassflow-operator: - # Operator deployment configuration - controllerManager: - # Number of operator replicas for high availability - replicas: 1 - - # Operator manager configuration - manager: - # Container image configuration for the operator - image: - # Docker image repository for the GlassFlow ETL Kubernetes operator - repository: ghcr.io/glassflow/glassflow-etl-k8s-operator - # Image tag/version to deploy - tag: main - # Image pull policy: Always, IfNotPresent, or Never - pullPolicy: Always - - # Resource requirements and limits for the operator pods - resources: - # Maximum resources the operator pods can use - limits: - cpu: 500m # CPU limit - 500 millicores = 0.5 CPU cores - memory: 128Mi # Memory limit - adjust based on your workload - # Minimum resources required by the operator pods - requests: - cpu: 10m # CPU request - 10 millicores = 0.01 CPU cores - memory: 64Mi # Memory request - adjust based on your workload - - # Service account configuration for the operator - serviceAccount: - # Annotations to add to the operator service account - # Useful for IAM roles, OIDC providers, etc. - annotations: {} - - # Component image configurations - componentImages: - # Ingestor component docker image - ingestor: - repository: ghcr.io/glassflow/glassflow-etl-ingestor - tag: stable - - # Join component docker image - join: - repository: ghcr.io/glassflow/glassflow-etl-join - tag: stable - - # Sink component docker image - sink: - repository: ghcr.io/glassflow/glassflow-etl-sink - tag: stable - - # NATS configuration for the operator - # These settings are used when the operator needs to connect to NATS - # You can ignore this section if nats.enabled is true (default) - nats: - # NATS address for the operator to connect to - # Defaults to {{ .Release.Name }}-nats.{{ .Release.Namespace }}.svc.cluster.local if not specified - address: "" - - # NATS component address for the operator to connect to - # Defaults to the nats address if not specified - componentAddress: "" - - # Metrics service for the operator - metricsService: - # Service ports configuration - ports: - - name: https # Port name for service discovery - port: 8443 # Service port (external) - protocol: TCP # Protocol type - targetPort: 8443 # Container port (internal) - # Service type: ClusterIP, NodePort, or LoadBalancer - type: ClusterIP - -# ============================================================================= -# NATS CONFIGURATION -# ============================================================================= -# NATS messaging system configuration for internal communication -nats: - # Enable or disable NATS deployment - # Set to false if you want to use an external NATS instance - enabled: true - - # NATS configuration settings - # Note: Node selector for NATS should be set under podTemplate.merge.spec.nodeSelector - config: - # NATS clustering configuration for high availability - cluster: - # Enable NATS clustering (recommended for production) - enabled: true - # Port for cluster communication between NATS nodes - port: 6222 - # Number of NATS replicas - # IMPORTANT: Must be 2 or higher when JetStream is enabled - replicas: 3 - - # JetStream configuration for persistent messaging - jetstream: - # Enable JetStream for persistent message storage - enabled: true - - # Memory store configuration (fast but non-persistent) - memoryStore: - # Enable memory-based storage (not recommended for production) - enabled: false - # Maximum size of memory store - maxSize: 1Gi - - # File store configuration (persistent but slower) - fileStore: - # Enable file-based storage (recommended for production) - enabled: true - # Directory for storing JetStream data - dir: /data - # Persistent volume claim configuration - pvc: - # Enable PVC for persistent storage - enabled: true - # Size of the persistent volume - size: 100Gi - # Storage class for the PVC (uses global.storageClass if empty) - storageClassName: "" - - # Resource requirements and limits for NATS pods - resources: - # Minimum resources required by NATS pods - requests: - memory: "2Gi" # Memory request - adjust based on your workload - cpu: "500m" # CPU request - 500 millicores = 0.5 CPU cores - # Maximum resources NATS pods can use - limits: - memory: "4Gi" # Memory limit - adjust based on your workload - cpu: "1000m" # CPU limit - 1000 millicores = 1.0 CPU cores - # ============================================================================= # INGRESS CONFIGURATION WITH TLS # ============================================================================= @@ -274,101 +38,3 @@ ingress: - hosts: - "glassflow.example.com" secretName: "glassflow-tls-secret" - -# ============================================================================= -# SECURITY CONFIGURATION -# ============================================================================= -# Pod and container security context settings -podSecurityContext: {} - # fsGroup: 2000 # File system group ID - -# Container security context settings -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -# ============================================================================= -# SERVICE ACCOUNT CONFIGURATION -# ============================================================================= -# Service account configuration for the application (UI and API) -serviceAccount: - # Create a new service account for the application (UI and API) - # Set to false if you want to use an existing service account - create: true - - # Automatically mount the service account's API credentials - # Useful for accessing the Kubernetes API from within pods - automount: true - - # Annotations to add to the service account - # Useful for IAM roles, OIDC providers, etc. - annotations: {} - - # Name of the service account to use - # If not set and create is true, a name is generated using the fullname template - # Example: "glassflow-etl-sa" - name: "" - -# ============================================================================= -# POD CONFIGURATION -# ============================================================================= -# Pod-level configuration settings -podAnnotations: {} # Annotations to add to all pods -podLabels: {} # Labels to add to all pods - -# Node selector for main application pods (UI and API) -# Useful for scheduling pods on specific nodes -# Example: -# nodeSelector: -# kubernetes.io/os: linux -# node-role.kubernetes.io/worker: "true" -nodeSelector: {} - -# Pod tolerations for scheduling -# Useful for running pods on nodes with taints -# Example: -# - key: "dedicated" -# operator: "Equal" -# value: "glassflow" -# effect: "NoSchedule" -tolerations: [] - -# Pod affinity rules for scheduling -# Useful for co-locating or spreading pods -# Example: -# podAffinity: -# requiredDuringSchedulingIgnoredDuringExecution: -# - labelSelector: -# matchExpressions: -# - key: app -# operator: In -# values: -# - glassflow-etl -# topologyKey: kubernetes.io/hostname -affinity: {} - -# ============================================================================= -# AUTOSCALING CONFIGURATION -# ============================================================================= -# Horizontal Pod Autoscaler (HPA) configuration -autoscaling: - # Enable or disable autoscaling - enabled: false - - # Minimum number of replicas when autoscaling - minReplicas: 1 - - # Maximum number of replicas when autoscaling - maxReplicas: 5 - - # Target CPU utilization percentage for scaling - # HPA will scale up when CPU usage exceeds this percentage - targetCPUUtilizationPercentage: 80 - - # Target memory utilization percentage for scaling (commented out) - # Uncomment and set a value to enable memory-based scaling - # targetMemoryUtilizationPercentage: 80 diff --git a/charts/glassflow-etl/values.ingress.yaml b/charts/glassflow-etl/values.ingress.yaml index 7cf53b9..977578b 100644 --- a/charts/glassflow-etl/values.ingress.yaml +++ b/charts/glassflow-etl/values.ingress.yaml @@ -1,238 +1,4 @@ -# Default values for glassflow-etl. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# ============================================================================= -# GLOBAL SETTINGS -# ============================================================================= -# Global settings that apply across all components of the chart -global: - # Global image registry - if set, will be prepended to all image repositories - imageRegistry: "" - -# ============================================================================= -# API COMPONENT CONFIGURATION -# ============================================================================= -# Backend API service configuration for the GlassFlow ETL application -api: - # Number of API replicas for high availability - replicas: 1 - - # Container image configuration for the API component - image: - # Docker image repository for the GlassFlow ETL backend - repository: ghcr.io/glassflow/glassflow-etl-be - # Image tag/version to deploy - tag: stable - # Image pull policy: Always, IfNotPresent, or Never - pullPolicy: Always - - # Resource requirements and limits for the API pods - resources: - # Minimum resources required by the API pods - requests: - memory: "100Mi" # Memory request - adjust based on your workload - cpu: "100m" # CPU request - 250 millicores = 0.25 CPU cores - # Maximum resources the API pods can use - limits: - memory: "200Mi" # Memory limit - adjust based on your workload - cpu: "250m" # CPU limit - 500 millicores = 0.5 CPU cores - - service: - type: ClusterIP - port: 8081 - targetPort: 8081 - - # Environment variables for the API service - # Supported variables: - # - GLASSFLOW_LOG_FILE_PATH: Path to the log file (Default: /tmp/logs/glassflow) - # - GLASSFLOW_NATS_SERVER: NATS server address (Default: nats://{{ .Release.Name }}-nats.{{ .Release.Namespace }}.svc.cluster.local:4222) - # - GLASSFLOW_LOG_LEVEL: Log level (Default: INFO) - # Example: - # env: - # - name: GLASSFLOW_LOG_LEVEL - # value: "DEBUG" - env: {} - -# ============================================================================= -# UI COMPONENT CONFIGURATION -# ============================================================================= -# Frontend UI service configuration for the GlassFlow ETL application -ui: - # Number of UI replicas for high availability - replicas: 1 - - # Container image configuration for the UI component - image: - # Docker image repository for the GlassFlow ETL frontend - repository: ghcr.io/glassflow/glassflow-etl-fe - # Image tag/version to deploy - tag: stable - # Image pull policy: Always, IfNotPresent, or Never - pullPolicy: Always - - # Resource requirements and limits for the UI pods - resources: - # Minimum resources required by the UI pods - requests: - memory: "512Mi" # Memory request - adjust based on your workload - cpu: "100m" # CPU request - 100 millicores = 0.1 CPU cores - # Maximum resources the UI pods can use - limits: - memory: "1Gi" # Memory limit - adjust based on your workload - cpu: "200m" # CPU limit - 200 millicores = 0.2 CPU cores - - service: - type: ClusterIP - port: 8080 - targetPort: 8080 - - # Environment variables for the UI service - # Supported variables: - # - NEXT_PUBLIC_API_URL: API URL for the application - # Example: - # env: - # - name: NEXT_PUBLIC_API_URL - # value: "http://{{ .Release.Name }}-api.{{ .Release.Namespace }}.svc.cluster.local:8080" - env: {} - -# ============================================================================= -# GLASSFLOW OPERATOR CONFIGURATION -# ============================================================================= -# Kubernetes operator for managing ETL pipelines and related resources -glassflow-operator: - # Operator deployment configuration - controllerManager: - # Number of operator replicas for high availability - replicas: 1 - - # Operator manager configuration - manager: - # Container image configuration for the operator - image: - # Docker image repository for the GlassFlow ETL Kubernetes operator - repository: ghcr.io/glassflow/glassflow-etl-k8s-operator - # Image tag/version to deploy - tag: main - # Image pull policy: Always, IfNotPresent, or Never - pullPolicy: Always - - # Resource requirements and limits for the operator pods - resources: - # Maximum resources the operator pods can use - limits: - cpu: 500m # CPU limit - 500 millicores = 0.5 CPU cores - memory: 128Mi # Memory limit - adjust based on your workload - # Minimum resources required by the operator pods - requests: - cpu: 10m # CPU request - 10 millicores = 0.01 CPU cores - memory: 64Mi # Memory request - adjust based on your workload - - # Service account configuration for the operator - serviceAccount: - # Annotations to add to the operator service account - # Useful for IAM roles, OIDC providers, etc. - annotations: {} - - # Component image configurations - componentImages: - # Ingestor component docker image - ingestor: - repository: ghcr.io/glassflow/glassflow-etl-ingestor - tag: stable - - # Join component docker image - join: - repository: ghcr.io/glassflow/glassflow-etl-join - tag: stable - - # Sink component docker image - sink: - repository: ghcr.io/glassflow/glassflow-etl-sink - tag: stable - - # NATS configuration for the operator - # These settings are used when the operator needs to connect to NATS - # You can ignore this section if nats.enabled is true (default) - nats: - # NATS address for the operator to connect to - # Defaults to {{ .Release.Name }}-nats.{{ .Release.Namespace }}.svc.cluster.local if not specified - address: "" - - # NATS component address for the operator to connect to - # Defaults to the nats address if not specified - componentAddress: "" - - # Metrics service for the operator - metricsService: - # Service ports configuration - ports: - - name: https # Port name for service discovery - port: 8443 # Service port (external) - protocol: TCP # Protocol type - targetPort: 8443 # Container port (internal) - # Service type: ClusterIP, NodePort, or LoadBalancer - type: ClusterIP - -# ============================================================================= -# NATS CONFIGURATION -# ============================================================================= -# NATS messaging system configuration for internal communication -nats: - # Enable or disable NATS deployment - # Set to false if you want to use an external NATS instance - enabled: true - - # NATS configuration settings - # Note: Node selector for NATS should be set under podTemplate.merge.spec.nodeSelector - config: - # NATS clustering configuration for high availability - cluster: - # Enable NATS clustering (recommended for production) - enabled: true - # Port for cluster communication between NATS nodes - port: 6222 - # Number of NATS replicas - # IMPORTANT: Must be 2 or higher when JetStream is enabled - replicas: 3 - - # JetStream configuration for persistent messaging - jetstream: - # Enable JetStream for persistent message storage - enabled: true - - # Memory store configuration (fast but non-persistent) - memoryStore: - # Enable memory-based storage (not recommended for production) - enabled: false - # Maximum size of memory store - maxSize: 1Gi - - # File store configuration (persistent but slower) - fileStore: - # Enable file-based storage (recommended for production) - enabled: true - # Directory for storing JetStream data - dir: /data - # Persistent volume claim configuration - pvc: - # Enable PVC for persistent storage - enabled: true - # Size of the persistent volume - size: 100Gi - # Storage class for the PVC (uses global.storageClass if empty) - storageClassName: "" - - # Resource requirements and limits for NATS pods - resources: - # Minimum resources required by NATS pods - requests: - memory: "2Gi" # Memory request - adjust based on your workload - cpu: "500m" # CPU request - 500 millicores = 0.5 CPU cores - # Maximum resources NATS pods can use - limits: - memory: "4Gi" # Memory limit - adjust based on your workload - cpu: "1000m" # CPU limit - 1000 millicores = 1.0 CPU cores +# Example values to show how to configure ingress # ============================================================================= # INGRESS CONFIGURATION @@ -268,102 +34,4 @@ ingress: # - hosts: # - "glassflow.example.com" # secretName: "glassflow-tls-secret" - tls: [] - -# ============================================================================= -# SECURITY CONFIGURATION -# ============================================================================= -# Pod and container security context settings -podSecurityContext: {} - # fsGroup: 2000 # File system group ID - -# Container security context settings -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -# ============================================================================= -# SERVICE ACCOUNT CONFIGURATION -# ============================================================================= -# Service account configuration for the application (UI and API) -serviceAccount: - # Create a new service account for the application (UI and API) - # Set to false if you want to use an existing service account - create: true - - # Automatically mount the service account's API credentials - # Useful for accessing the Kubernetes API from within pods - automount: true - - # Annotations to add to the service account - # Useful for IAM roles, OIDC providers, etc. - annotations: {} - - # Name of the service account to use - # If not set and create is true, a name is generated using the fullname template - # Example: "glassflow-etl-sa" - name: "" - -# ============================================================================= -# POD CONFIGURATION -# ============================================================================= -# Pod-level configuration settings -podAnnotations: {} # Annotations to add to all pods -podLabels: {} # Labels to add to all pods - -# Node selector for main application pods (UI and API) -# Useful for scheduling pods on specific nodes -# Example: -# nodeSelector: -# kubernetes.io/os: linux -# node-role.kubernetes.io/worker: "true" -nodeSelector: {} - -# Pod tolerations for scheduling -# Useful for running pods on nodes with taints -# Example: -# - key: "dedicated" -# operator: "Equal" -# value: "glassflow" -# effect: "NoSchedule" -tolerations: [] - -# Pod affinity rules for scheduling -# Useful for co-locating or spreading pods -# Example: -# podAffinity: -# requiredDuringSchedulingIgnoredDuringExecution: -# - labelSelector: -# matchExpressions: -# - key: app -# operator: In -# values: -# - glassflow-etl -# topologyKey: kubernetes.io/hostname -affinity: {} - -# ============================================================================= -# AUTOSCALING CONFIGURATION -# ============================================================================= -# Horizontal Pod Autoscaler (HPA) configuration -autoscaling: - # Enable or disable autoscaling - enabled: false - - # Minimum number of replicas when autoscaling - minReplicas: 1 - - # Maximum number of replicas when autoscaling - maxReplicas: 5 - - # Target CPU utilization percentage for scaling - # HPA will scale up when CPU usage exceeds this percentage - targetCPUUtilizationPercentage: 80 - - # Target memory utilization percentage for scaling (commented out) - # Uncomment and set a value to enable memory-based scaling - # targetMemoryUtilizationPercentage: 80 + tls: [] \ No newline at end of file diff --git a/charts/glassflow-etl/values.yaml b/charts/glassflow-etl/values.yaml index 51bc23c..e8519cc 100644 --- a/charts/glassflow-etl/values.yaml +++ b/charts/glassflow-etl/values.yaml @@ -8,7 +8,14 @@ # Global settings that apply across all components of the chart global: # Global image registry - if set, will be prepended to all image repositories - imageRegistry: "" + imageRegistry: "ghcr.io/glassflow/" + observability: + metrics: + enabled: true + logs: + enabled: false + exporter: + otlp: {} # ============================================================================= # API COMPONENT CONFIGURATION @@ -17,13 +24,13 @@ global: api: # Number of API replicas for high availability replicas: 1 - + logLevel: "INFO" # Container image configuration for the API component image: # Docker image repository for the GlassFlow ETL backend - repository: ghcr.io/glassflow/glassflow-etl-be + repository: glassflow-etl-be # Image tag/version to deploy - tag: v2.1.2 + tag: v2.1.3 # Image pull policy: Always, IfNotPresent, or Never pullPolicy: IfNotPresent @@ -52,7 +59,7 @@ api: # env: # - name: GLASSFLOW_LOG_LEVEL # value: "DEBUG" - env: {} + env: [] # ============================================================================= # UI COMPONENT CONFIGURATION @@ -65,9 +72,9 @@ ui: # Container image configuration for the UI component image: # Docker image repository for the GlassFlow ETL frontend - repository: ghcr.io/glassflow/glassflow-etl-fe + repository: glassflow-etl-fe # Image tag/version to deploy - tag: v2.1.2 + tag: v2.1.3 # Image pull policy: Always, IfNotPresent, or Never pullPolicy: IfNotPresent @@ -94,7 +101,7 @@ ui: # env: # - name: NEXT_PUBLIC_API_URL # value: "http://{{ .Release.Name }}-api.{{ .Release.Namespace }}.svc.cluster.local:8080" - env: {} + env: [] # ============================================================================= # GLASSFLOW OPERATOR CONFIGURATION @@ -111,9 +118,9 @@ glassflow-operator: # Container image configuration for the operator image: # Docker image repository for the GlassFlow ETL Kubernetes operator - repository: ghcr.io/glassflow/glassflow-etl-k8s-operator + repository: glassflow-etl-k8s-operator # Image tag/version to deploy - tag: v0.6.0 + tag: v1.0.0 # Image pull policy: Always, IfNotPresent, or Never pullPolicy: IfNotPresent @@ -134,37 +141,49 @@ glassflow-operator: # Useful for IAM roles, OIDC providers, etc. annotations: {} - # Components resource definitions - componentResources: - # Ingestor component resource definitions - ingestor: - requests: {} - limits: {} - # Join component resource definitions - join: - requests: {} - limits: {} - # Sink component resource definitions - sink: - requests: {} - limits: {} - - # Component image configurations - componentImages: - # Ingestor component docker image + glassflowComponents: ingestor: - repository: ghcr.io/glassflow/glassflow-etl-ingestor - tag: v2.1.2 - - # Join component docker image + image: + repository: glassflow-etl-ingestor + tag: v2.1.3 + logLevel: "INFO" + resources: + requests: + cpu: 1000m + memory: 1Gi + limits: + cpu: 1500m + memory: 1.5Gi + # Node affinity for ingestor component (optional) + affinity: {} join: - repository: ghcr.io/glassflow/glassflow-etl-join - tag: v2.1.2 - - # Sink component docker image + image: + repository: glassflow-etl-join + tag: v2.1.3 + logLevel: "INFO" + resources: + requests: + cpu: 1000m + memory: 1Gi + limits: + cpu: 1500m + memory: 1.5Gi + # Node affinity for join component (optional) + affinity: {} sink: - repository: ghcr.io/glassflow/glassflow-etl-sink - tag: v2.1.2 + image: + repository: glassflow-etl-sink + tag: v2.1.3 + logLevel: "INFO" + resources: + requests: + cpu: 1000m + memory: 1Gi + limits: + cpu: 1500m + memory: 1.5Gi + # Node affinity for sink component (optional) + affinity: {} # NATS configuration for the operator # These settings are used when the operator needs to connect to NATS